mirror of
https://github.com/jojo61/vdr-plugin-softhdcuvid.git
synced 2025-03-01 10:39:28 +00:00
Compare commits
61 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4b1ffc5b2e | ||
|
47b461ab46 | ||
|
8629946041 | ||
|
a222f6a1d5 | ||
|
b51589aaa9 | ||
|
c3af54aae0 | ||
|
1d66d9389d | ||
|
7e387fa3f1 | ||
|
b2fd6ba240 | ||
|
cf9ef0c3b0 | ||
|
f3e5a14fdf | ||
|
aa0c2f80e4 | ||
|
215f251572 | ||
|
a425ec94e0 | ||
|
91961bdffe | ||
|
f741dff042 | ||
|
7a31761c89 | ||
|
d5ca73c22f | ||
|
6704b2ca5a | ||
|
e0bbaceec0 | ||
|
45043b9ffc | ||
|
a56b3737c7 | ||
|
e59eeba0d2 | ||
|
838dfab45b | ||
|
ddd44e6f62 | ||
|
1390139cbd | ||
|
f72653c3c1 | ||
|
4b9cd22405 | ||
|
38bda0c834 | ||
|
45c86f12dd | ||
|
e2e9ae94d7 | ||
|
4837f7fa35 | ||
|
7f054f8320 | ||
|
ad7acde1f4 | ||
|
e5c48a4bb7 | ||
|
fb67617d63 | ||
|
8aa807eec6 | ||
|
0621ed064d | ||
|
9219f06c5a | ||
|
4e96489e35 | ||
|
65017da5ac | ||
|
7b41b9b45a | ||
|
58c39d51f4 | ||
|
d78e905411 | ||
|
7b10c2d0a3 | ||
|
9714824a5a | ||
|
3e9b909685 | ||
|
464f7de014 | ||
|
da33b90f94 | ||
|
90194d4b6c | ||
|
45a83eaa3f | ||
|
37f87e2511 | ||
|
bd9184db01 | ||
|
101bffd01f | ||
|
43085a3608 | ||
|
3de7a17105 | ||
|
c229e77151 | ||
|
af370721d4 | ||
|
79fa8efc6a | ||
|
d5dec38d62 | ||
|
74847c9bed |
20
Makefile
20
Makefile
@@ -24,18 +24,17 @@ DRM ?= 0
|
||||
|
||||
# use libplacebo -
|
||||
# available for all decoders but for DRM you need LIBPLACEBO_GL
|
||||
LIBPLACEBO ?= 1
|
||||
LIBPLACEBO ?= 0
|
||||
LIBPLACEBO_GL ?= 0
|
||||
|
||||
# use YADIF deint - only available with cuvid
|
||||
#YADIF = 1
|
||||
YADIF = 1
|
||||
|
||||
# use gamma correction
|
||||
#GAMMA ?= 0
|
||||
|
||||
CONFIG := -DDEBUG # remove '#' to enable debug output
|
||||
|
||||
|
||||
#--------------------- no more config needed past this point--------------------------------
|
||||
|
||||
# sanitize selections --------
|
||||
@@ -159,29 +158,29 @@ endif
|
||||
|
||||
ifeq ($(LIBPLACEBO_GL),1)
|
||||
CONFIG += -DPLACEBO_GL -DPLACEBO
|
||||
LIBS += -lepoxy
|
||||
LIBS += -lplacebo
|
||||
_CFLAGS += $(shell pkg-config --cflags libplacebo)
|
||||
LIBS += $(shell pkg-config --libs epoxy libplacebo)
|
||||
else
|
||||
LIBS += -lEGL
|
||||
LIBS += $(shell pkg-config --libs egl)
|
||||
endif
|
||||
|
||||
ifeq ($(LIBPLACEBO),1)
|
||||
CONFIG += -DPLACEBO
|
||||
LIBS += -lEGL
|
||||
LIBS += -lplacebo
|
||||
_CFLAGS += $(shell pkg-config --cflags libplacebo)
|
||||
LIBS += $(shell pkg-config --libs egl libplacebo)
|
||||
endif
|
||||
|
||||
ifeq ($(DRM),1)
|
||||
PLUGIN = softhddrm
|
||||
CONFIG += -DUSE_DRM -DVAAPI
|
||||
_CFLAGS += $(shell pkg-config --cflags libdrm)
|
||||
LIBS += -lgbm -ldrm -lEGL
|
||||
LIBS += $(shell pkg-config --libs egl gbm libdrm)
|
||||
endif
|
||||
|
||||
ifeq ($(CUVID),1)
|
||||
#CONFIG += -DUSE_PIP # PIP support
|
||||
CONFIG += -DCUVID # enable CUVID decoder
|
||||
LIBS += -lEGL -lGL
|
||||
LIBS += $(shell pkg-config --libs egl gl)
|
||||
ifeq ($(YADIF),1)
|
||||
CONFIG += -DYADIF # Yadif only with CUVID
|
||||
endif
|
||||
@@ -274,6 +273,7 @@ LIBS += -lcuda -lnvcuvid
|
||||
endif
|
||||
|
||||
LIBS += -lGLEW -lGLU -ldl -lglut
|
||||
#LIBS += -ldl $(shell pkg-config --libs glew glu glut)
|
||||
|
||||
### Includes and Defines (add further entries here):
|
||||
|
||||
|
@@ -37,7 +37,6 @@ A software and GPU emulated UHD output device plugin for VDR.
|
||||
o PIP (Picture-in-Picture) (only for CUVID)
|
||||
|
||||
|
||||
To compile you must have the 'requires' installed.
|
||||
|
||||
|
||||
This is a fork of johns original softhddevice work and I reworked it to support HEVC with CUDA and opengl output.
|
||||
@@ -83,7 +82,6 @@ You have to adapt the Makefile. There are 3 possible Version that you can build:
|
||||
runs without X Server. There are several commandline options to select the resolution and refresh rate.
|
||||
I recommend to use libplacebo and set LIBPLACEBO_GL=1 in the Makefile.
|
||||
|
||||
Libplacebo API Version >= 113 is needed.
|
||||
|
||||
|
||||
Install:
|
||||
@@ -122,6 +120,9 @@ Beginners Guide for libplacebo:
|
||||
|
||||
All other settings can be in their default state.
|
||||
|
||||
Note for NUC11/12 Users:
|
||||
Provide paramete -w alsa-no-test to get Audio working.
|
||||
|
||||
Beginning with libplacebo API 58 user shaders from mpv are supported. Use -S parameter to set the shader.
|
||||
The plugins searches the shaders in $ConfigDir/plugins/shaders for the shaders. One example shader is
|
||||
provided in the shader subdirectory. Copy it to e.g.: /etc/vdr/plugins/shaders and then start
|
||||
|
285
audio.c
285
audio.c
@@ -93,6 +93,7 @@ static const AudioModule NoopModule; ///< forward definition of noop module
|
||||
// Variables
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
char AudioAlsaNotest; ///< disable Audio capbility test
|
||||
char AudioAlsaDriverBroken; ///< disable broken driver message
|
||||
char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix
|
||||
char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix
|
||||
@@ -927,6 +928,7 @@ static snd_pcm_t *AlsaOpenPCM(int passthrough) {
|
||||
const char *device;
|
||||
snd_pcm_t *handle;
|
||||
int err;
|
||||
char tmp[80];
|
||||
|
||||
// &&|| hell
|
||||
if (!(passthrough && ((device = AudioPassthroughDevice) || (device = getenv("ALSA_PASSTHROUGH_DEVICE")))) &&
|
||||
@@ -936,30 +938,42 @@ static snd_pcm_t *AlsaOpenPCM(int passthrough) {
|
||||
if (!AudioDoingInit) { // reduce blabla during init
|
||||
Info(_("audio/alsa: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device);
|
||||
}
|
||||
//printf("audio/alsa: using %sdevice '%s'\n", passthrough ? "pass-through " : "", device);
|
||||
|
||||
//
|
||||
// for AC3 pass-through try to set the non-audio bit, use AES0=6
|
||||
//
|
||||
if (passthrough && AudioAppendAES) {
|
||||
#if 0
|
||||
// FIXME: not yet finished
|
||||
char *buf;
|
||||
const char *s;
|
||||
int n;
|
||||
|
||||
n = strlen(device);
|
||||
buf = alloca(n + sizeof(":AES0=6") + 1);
|
||||
strcpy(buf, device);
|
||||
if (!(s = strchr(buf, ':'))) {
|
||||
// no alsa parameters
|
||||
strcpy(buf + n, ":AES=6");
|
||||
}
|
||||
Debug(3, "audio/alsa: try '%s'\n", buf);
|
||||
#endif
|
||||
}
|
||||
// open none blocking; if device is already used, we don't want wait
|
||||
if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
|
||||
Error(_("audio/alsa: playback open '%s' error: %s\n"), device, snd_strerror(err));
|
||||
return NULL;
|
||||
if (!(strchr(device, ':'))) {
|
||||
sprintf(tmp,
|
||||
//"AES0=%d,AES1=%d,AES2=0,AES3=%d",
|
||||
"%s:AES0=%d,AES1=%d,AES2=0",
|
||||
device,
|
||||
IEC958_AES0_NONAUDIO | IEC958_AES0_PRO_EMPHASIS_NONE,
|
||||
IEC958_AES1_CON_ORIGINAL | IEC958_AES1_CON_PCM_CODER);
|
||||
//map_iec958_srate(ao->samplerate));
|
||||
}
|
||||
else {
|
||||
sprintf(tmp,
|
||||
//"AES0=%d,AES1=%d,AES2=0,AES3=%d",
|
||||
"%s,AES0=%d,AES1=%d,AES2=0",
|
||||
device,
|
||||
IEC958_AES0_NONAUDIO | IEC958_AES0_PRO_EMPHASIS_NONE,
|
||||
IEC958_AES1_CON_ORIGINAL | IEC958_AES1_CON_PCM_CODER);
|
||||
//map_iec958_srate(ao->samplerate));
|
||||
}
|
||||
|
||||
printf( "opening device '%s' => '%s'\n", device, tmp);
|
||||
if ((err = snd_pcm_open(&handle, tmp, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0 ) {
|
||||
Error(_("audio/alsa: playback open '%s' error: %s\n"), device, snd_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
// open none blocking; if device is already used, we don't want wait
|
||||
if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
|
||||
Error(_("audio/alsa: playback open '%s' error: %s\n"), device, snd_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if ((err = snd_pcm_nonblock(handle, 0)) < 0) {
|
||||
@@ -2173,126 +2187,145 @@ found:
|
||||
AudioDoingInit = 1;
|
||||
AudioRingInit();
|
||||
AudioUsedModule->Init();
|
||||
//
|
||||
// Check which channels/rates/formats are supported
|
||||
// FIXME: we force 44.1Khz and 48Khz must be supported equal
|
||||
// FIXME: should use bitmap of channels supported in RatesInHw
|
||||
// FIXME: use loop over sample-rates
|
||||
freq = 44100;
|
||||
AudioRatesInHw[Audio44100] = 0;
|
||||
for (chan = 1; chan < 9; ++chan) {
|
||||
int tchan;
|
||||
int tfreq;
|
||||
|
||||
tchan = chan;
|
||||
tfreq = freq;
|
||||
if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) {
|
||||
AudioChannelsInHw[chan] = 0;
|
||||
} else {
|
||||
AudioChannelsInHw[chan] = chan;
|
||||
AudioRatesInHw[Audio44100] |= (1 << chan);
|
||||
}
|
||||
}
|
||||
freq = 48000;
|
||||
AudioRatesInHw[Audio48000] = 0;
|
||||
for (chan = 1; chan < 9; ++chan) {
|
||||
int tchan;
|
||||
int tfreq;
|
||||
if (AudioAlsaNotest) {
|
||||
for (u = 0; u < AudioRatesMax; ++u) {
|
||||
|
||||
AudioChannelMatrix[u][1]=AudioChannelMatrix[u][2]=2;
|
||||
AudioChannelMatrix[u][3]=AudioChannelMatrix[u][4]=4;
|
||||
AudioChannelMatrix[u][5]=AudioChannelMatrix[u][6]=6;
|
||||
AudioChannelMatrix[u][7]=AudioChannelMatrix[u][8]=8;
|
||||
printf("audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n", AudioRatesTable[u],
|
||||
AudioChannelMatrix[u][1], AudioChannelMatrix[u][2], AudioChannelMatrix[u][3], AudioChannelMatrix[u][4],
|
||||
AudioChannelMatrix[u][5], AudioChannelMatrix[u][6], AudioChannelMatrix[u][7], AudioChannelMatrix[u][8]);
|
||||
|
||||
if (!AudioChannelsInHw[chan]) {
|
||||
continue;
|
||||
}
|
||||
tchan = chan;
|
||||
tfreq = freq;
|
||||
if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) {
|
||||
// AudioChannelsInHw[chan] = 0;
|
||||
} else {
|
||||
AudioChannelsInHw[chan] = chan;
|
||||
AudioRatesInHw[Audio48000] |= (1 << chan);
|
||||
}
|
||||
}
|
||||
freq = 192000;
|
||||
AudioRatesInHw[Audio192000] = 0;
|
||||
for (chan = 1; chan < 9; ++chan) {
|
||||
int tchan;
|
||||
int tfreq;
|
||||
|
||||
if (!AudioChannelsInHw[chan]) {
|
||||
continue;
|
||||
}
|
||||
tchan = chan;
|
||||
tfreq = freq;
|
||||
if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) {
|
||||
// AudioChannelsInHw[chan] = 0;
|
||||
} else {
|
||||
AudioChannelsInHw[chan] = chan;
|
||||
AudioRatesInHw[Audio192000] |= (1 << chan);
|
||||
}
|
||||
AudioChannelsInHw[1]=AudioChannelsInHw[3]=AudioChannelsInHw[4]=AudioChannelsInHw[5]=AudioChannelsInHw[6]=AudioChannelsInHw[7]=AudioChannelsInHw[8]=0;
|
||||
AudioChannelsInHw[2]=2;
|
||||
}
|
||||
// build channel support and conversion table
|
||||
for (u = 0; u < AudioRatesMax; ++u) {
|
||||
else {
|
||||
//
|
||||
// Check which channels/rates/formats are supported
|
||||
// FIXME: we force 44.1Khz and 48Khz must be supported equal
|
||||
// FIXME: should use bitmap of channels supported in RatesInHw
|
||||
// FIXME: use loop over sample-rates
|
||||
freq = 44100;
|
||||
AudioRatesInHw[Audio44100] = 0;
|
||||
for (chan = 1; chan < 9; ++chan) {
|
||||
AudioChannelMatrix[u][chan] = 0;
|
||||
if (!AudioRatesInHw[u]) { // rate unsupported
|
||||
int tchan;
|
||||
int tfreq;
|
||||
|
||||
tchan = chan;
|
||||
tfreq = freq;
|
||||
if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) {
|
||||
AudioChannelsInHw[chan] = 0;
|
||||
} else {
|
||||
AudioChannelsInHw[chan] = chan;
|
||||
AudioRatesInHw[Audio44100] |= (1 << chan);
|
||||
}
|
||||
}
|
||||
freq = 48000;
|
||||
AudioRatesInHw[Audio48000] = 0;
|
||||
for (chan = 1; chan < 9; ++chan) {
|
||||
int tchan;
|
||||
int tfreq;
|
||||
|
||||
if (!AudioChannelsInHw[chan]) {
|
||||
continue;
|
||||
}
|
||||
if (AudioChannelsInHw[chan]) {
|
||||
AudioChannelMatrix[u][chan] = chan;
|
||||
tchan = chan;
|
||||
tfreq = freq;
|
||||
if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) {
|
||||
// AudioChannelsInHw[chan] = 0;
|
||||
} else {
|
||||
switch (chan) {
|
||||
case 1:
|
||||
if (AudioChannelsInHw[2]) {
|
||||
AudioChannelMatrix[u][chan] = 2;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
case 3:
|
||||
if (AudioChannelsInHw[4]) {
|
||||
AudioChannelMatrix[u][chan] = 4;
|
||||
AudioChannelsInHw[chan] = chan;
|
||||
AudioRatesInHw[Audio48000] |= (1 << chan);
|
||||
}
|
||||
}
|
||||
freq = 192000;
|
||||
AudioRatesInHw[Audio192000] = 0;
|
||||
for (chan = 1; chan < 9; ++chan) {
|
||||
int tchan;
|
||||
int tfreq;
|
||||
|
||||
if (!AudioChannelsInHw[chan]) {
|
||||
continue;
|
||||
}
|
||||
tchan = chan;
|
||||
tfreq = freq;
|
||||
if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) {
|
||||
// AudioChannelsInHw[chan] = 0;
|
||||
} else {
|
||||
AudioChannelsInHw[chan] = chan;
|
||||
AudioRatesInHw[Audio192000] |= (1 << chan);
|
||||
}
|
||||
}
|
||||
// build channel support and conversion table
|
||||
for (u = 0; u < AudioRatesMax; ++u) {
|
||||
for (chan = 1; chan < 9; ++chan) {
|
||||
AudioChannelMatrix[u][chan] = 0;
|
||||
if (!AudioRatesInHw[u]) { // rate unsupported
|
||||
continue;
|
||||
}
|
||||
if (AudioChannelsInHw[chan]) {
|
||||
AudioChannelMatrix[u][chan] = chan;
|
||||
} else {
|
||||
switch (chan) {
|
||||
case 1:
|
||||
if (AudioChannelsInHw[2]) {
|
||||
AudioChannelMatrix[u][chan] = 2;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 4:
|
||||
if (AudioChannelsInHw[5]) {
|
||||
AudioChannelMatrix[u][chan] = 5;
|
||||
case 2:
|
||||
case 3:
|
||||
if (AudioChannelsInHw[4]) {
|
||||
AudioChannelMatrix[u][chan] = 4;
|
||||
break;
|
||||
}
|
||||
case 4:
|
||||
if (AudioChannelsInHw[5]) {
|
||||
AudioChannelMatrix[u][chan] = 5;
|
||||
break;
|
||||
}
|
||||
case 5:
|
||||
if (AudioChannelsInHw[6]) {
|
||||
AudioChannelMatrix[u][chan] = 6;
|
||||
break;
|
||||
}
|
||||
case 6:
|
||||
if (AudioChannelsInHw[7]) {
|
||||
AudioChannelMatrix[u][chan] = 7;
|
||||
break;
|
||||
}
|
||||
case 7:
|
||||
if (AudioChannelsInHw[8]) {
|
||||
AudioChannelMatrix[u][chan] = 8;
|
||||
break;
|
||||
}
|
||||
case 8:
|
||||
if (AudioChannelsInHw[6]) {
|
||||
AudioChannelMatrix[u][chan] = 6;
|
||||
break;
|
||||
}
|
||||
if (AudioChannelsInHw[2]) {
|
||||
AudioChannelMatrix[u][chan] = 2;
|
||||
break;
|
||||
}
|
||||
if (AudioChannelsInHw[1]) {
|
||||
AudioChannelMatrix[u][chan] = 1;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 5:
|
||||
if (AudioChannelsInHw[6]) {
|
||||
AudioChannelMatrix[u][chan] = 6;
|
||||
break;
|
||||
}
|
||||
case 6:
|
||||
if (AudioChannelsInHw[7]) {
|
||||
AudioChannelMatrix[u][chan] = 7;
|
||||
break;
|
||||
}
|
||||
case 7:
|
||||
if (AudioChannelsInHw[8]) {
|
||||
AudioChannelMatrix[u][chan] = 8;
|
||||
break;
|
||||
}
|
||||
case 8:
|
||||
if (AudioChannelsInHw[6]) {
|
||||
AudioChannelMatrix[u][chan] = 6;
|
||||
break;
|
||||
}
|
||||
if (AudioChannelsInHw[2]) {
|
||||
AudioChannelMatrix[u][chan] = 2;
|
||||
break;
|
||||
}
|
||||
if (AudioChannelsInHw[1]) {
|
||||
AudioChannelMatrix[u][chan] = 1;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (u = 0; u < AudioRatesMax; ++u) {
|
||||
Info(_("audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n"), AudioRatesTable[u],
|
||||
AudioChannelMatrix[u][1], AudioChannelMatrix[u][2], AudioChannelMatrix[u][3], AudioChannelMatrix[u][4],
|
||||
AudioChannelMatrix[u][5], AudioChannelMatrix[u][6], AudioChannelMatrix[u][7], AudioChannelMatrix[u][8]);
|
||||
for (u = 0; u < AudioRatesMax; ++u) {
|
||||
Debug(3,"audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n", AudioRatesTable[u],
|
||||
AudioChannelMatrix[u][1], AudioChannelMatrix[u][2], AudioChannelMatrix[u][3], AudioChannelMatrix[u][4],
|
||||
AudioChannelMatrix[u][5], AudioChannelMatrix[u][6], AudioChannelMatrix[u][7], AudioChannelMatrix[u][8]);
|
||||
}
|
||||
}
|
||||
#ifdef USE_AUDIO_THREAD
|
||||
if (AudioUsedModule->Thread) { // supports threads
|
||||
|
1
audio.h
1
audio.h
@@ -60,6 +60,7 @@ extern void AudioExit(void); ///< cleanup and exit audio module
|
||||
// Variables
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
extern char AudioAlsaNotest; ///< disable Alsa capability test
|
||||
extern char AudioAlsaDriverBroken; ///< disable broken driver message
|
||||
extern char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix
|
||||
extern char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix
|
||||
|
129
codec.c
129
codec.c
@@ -234,20 +234,6 @@ void CodecVideoOpen(VideoDecoder *decoder, int codec_id) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef RASPI
|
||||
switch (codec_id) {
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
name = "mpeg2_v4l2m2m";
|
||||
break;
|
||||
case AV_CODEC_ID_H264:
|
||||
name = "h264_v4l2m2m";
|
||||
// name = "h264_mmal";
|
||||
break;
|
||||
case AV_CODEC_ID_HEVC:
|
||||
name = "hevc_v4l2m2m";
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
if (name && (video_codec = avcodec_find_decoder_by_name(name))) {
|
||||
Debug(3, "codec: decoder found\n");
|
||||
@@ -264,31 +250,27 @@ void CodecVideoOpen(VideoDecoder *decoder, int codec_id) {
|
||||
Fatal(_("codec: can't allocate video codec context\n"));
|
||||
}
|
||||
|
||||
#ifndef RASPI
|
||||
if (!HwDeviceContext) {
|
||||
Fatal("codec: no hw device context to be used");
|
||||
}
|
||||
decoder->VideoCtx->hw_device_ctx = av_buffer_ref(HwDeviceContext);
|
||||
#else
|
||||
decoder->VideoCtx->pix_fmt = AV_PIX_FMT_DRM_PRIME; /* request a DRM frame */
|
||||
// decoder->VideoCtx->pix_fmt = AV_PIX_FMT_MMAL; /* request a DRM frame
|
||||
//*/
|
||||
#endif
|
||||
|
||||
// FIXME: for software decoder use all cpus, otherwise 1
|
||||
decoder->VideoCtx->thread_count = 1;
|
||||
|
||||
decoder->VideoCtx->pkt_timebase.num = 1;
|
||||
decoder->VideoCtx->pkt_timebase.den = 90000;
|
||||
decoder->VideoCtx->framerate.num = 50;
|
||||
decoder->VideoCtx->framerate.den = 1;
|
||||
//decoder->VideoCtx->framerate.num = 50;
|
||||
//decoder->VideoCtx->framerate.den = 1;
|
||||
|
||||
pthread_mutex_lock(&CodecLockMutex);
|
||||
// open codec
|
||||
#ifdef YADIF
|
||||
deint = 2;
|
||||
#endif
|
||||
#if defined VAAPI && !defined RASPI
|
||||
|
||||
#if defined VAAPI
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(59,8,100)
|
||||
// decoder->VideoCtx->extra_hw_frames = 8; // VIDEO_SURFACES_MAX +1
|
||||
if (video_codec->capabilities & (AV_CODEC_CAP_AUTO_THREADS)) {
|
||||
Debug(3, "codec: auto threads enabled");
|
||||
@@ -299,6 +281,7 @@ void CodecVideoOpen(VideoDecoder *decoder, int codec_id) {
|
||||
Debug(3, "codec: supports truncated packets");
|
||||
// decoder->VideoCtx->flags |= CODEC_FLAG_TRUNCATED;
|
||||
}
|
||||
#endif
|
||||
// FIXME: own memory management for video frames.
|
||||
if (video_codec->capabilities & AV_CODEC_CAP_DR1) {
|
||||
Debug(3, "codec: can use own buffer management");
|
||||
@@ -315,22 +298,10 @@ void CodecVideoOpen(VideoDecoder *decoder, int codec_id) {
|
||||
}
|
||||
// if (av_opt_set_int(decoder->VideoCtx, "refcounted_frames", 1, 0) < 0)
|
||||
// Fatal(_("VAAPI Refcounts invalid\n"));
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(59,8,100)
|
||||
decoder->VideoCtx->thread_safe_callbacks = 0;
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef RASPI
|
||||
decoder->VideoCtx->codec_id = codec_id;
|
||||
decoder->VideoCtx->flags |= AV_CODEC_FLAG_BITEXACT;
|
||||
if (video_codec->capabilities & AV_CODEC_CAP_FRAME_THREADS || AV_CODEC_CAP_SLICE_THREADS) {
|
||||
Debug(3, "codec: supports frame threads");
|
||||
decoder->VideoCtx->thread_count = 4;
|
||||
// decoder->VideoCtx->thread_type |= FF_THREAD_FRAME;
|
||||
}
|
||||
if (video_codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) {
|
||||
Debug(3, "codec: supports slice threads");
|
||||
decoder->VideoCtx->thread_type |= FF_THREAD_SLICE;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CUVID
|
||||
@@ -340,12 +311,12 @@ void CodecVideoOpen(VideoDecoder *decoder, int codec_id) {
|
||||
pthread_mutex_unlock(&CodecLockMutex);
|
||||
Fatal(_("codec: can't set option deint to video codec!\n"));
|
||||
}
|
||||
#if 1
|
||||
if (av_opt_set_int(decoder->VideoCtx->priv_data, "surfaces", 9, 0) < 0) {
|
||||
|
||||
if (av_opt_set_int(decoder->VideoCtx->priv_data, "surfaces", 10, 0) < 0) {
|
||||
pthread_mutex_unlock(&CodecLockMutex);
|
||||
Fatal(_("codec: can't set option surfces to video codec!\n"));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (av_opt_set(decoder->VideoCtx->priv_data, "drop_second_field", "false", 0) < 0) {
|
||||
pthread_mutex_unlock(&CodecLockMutex);
|
||||
Fatal(_("codec: can't set option drop 2.field to video codec!\n"));
|
||||
@@ -355,12 +326,10 @@ void CodecVideoOpen(VideoDecoder *decoder, int codec_id) {
|
||||
pthread_mutex_unlock(&CodecLockMutex);
|
||||
Fatal(_("codec: can't set option deint to video codec!\n"));
|
||||
}
|
||||
#if 1
|
||||
if (av_opt_set_int(decoder->VideoCtx->priv_data, "surfaces", 13, 0) < 0) {
|
||||
pthread_mutex_unlock(&CodecLockMutex);
|
||||
Fatal(_("codec: can't set option surfces to video codec!\n"));
|
||||
}
|
||||
#endif
|
||||
if (av_opt_set(decoder->VideoCtx->priv_data, "drop_second_field", "false", 0) < 0) {
|
||||
pthread_mutex_unlock(&CodecLockMutex);
|
||||
Fatal(_("codec: can't set option drop 2.field to video codec!\n"));
|
||||
@@ -401,7 +370,7 @@ void CodecVideoOpen(VideoDecoder *decoder, int codec_id) {
|
||||
|
||||
// reset buggy ffmpeg/libav flag
|
||||
decoder->GetFormatDone = 0;
|
||||
#if defined(YADIF) || defined(RASPI)
|
||||
#if defined(YADIF)
|
||||
decoder->filter = 0;
|
||||
#endif
|
||||
}
|
||||
@@ -521,8 +490,7 @@ void CodecVideoDecode(VideoDecoder *decoder, const AVPacket *avpkt) {
|
||||
decoder->filter = 2;
|
||||
}
|
||||
}
|
||||
if (frame->interlaced_frame && decoder->filter == 2 &&
|
||||
(frame->height != 720)) { // broken ZDF sends Interlaced flag
|
||||
if (decoder->filter == 2) {
|
||||
push_filters(video_ctx, decoder->HwDecoder, frame);
|
||||
continue;
|
||||
}
|
||||
@@ -554,7 +522,6 @@ next_part:
|
||||
pkt = avpkt; // use copy
|
||||
got_frame = 0;
|
||||
|
||||
// printf("decode packet %d\n",(GetusTicks()-first_time)/1000000);
|
||||
ret1 = avcodec_send_packet(video_ctx, pkt);
|
||||
|
||||
// first_time = GetusTicks();
|
||||
@@ -574,12 +541,12 @@ next_part:
|
||||
frame = av_frame_alloc();
|
||||
ret = avcodec_receive_frame(video_ctx, frame); // get new frame
|
||||
if (ret >= 0) { // one is avail.
|
||||
first_time = frame->pts;
|
||||
got_frame = 1;
|
||||
} else {
|
||||
got_frame = 0;
|
||||
}
|
||||
// printf("got %s packet from
|
||||
// decoder\n",got_frame?"1":"no");
|
||||
//printf("got %s packet from decoder\n",got_frame?"1":"no");
|
||||
if (got_frame) { // frame completed
|
||||
// printf("video frame pts %#012" PRIx64 "
|
||||
//%dms\n",frame->pts,(int)(apts - frame->pts) / 90);
|
||||
@@ -587,15 +554,14 @@ next_part:
|
||||
if (decoder->filter) {
|
||||
if (decoder->filter == 1) {
|
||||
if (init_filters(video_ctx, decoder->HwDecoder, frame) < 0) {
|
||||
Fatal(_("video: Init of YADIF Filter failed\n"));
|
||||
Debug(3,"video: Init of YADIF Filter failed\n");
|
||||
decoder->filter = 0;
|
||||
} else {
|
||||
Debug(3, "Init YADIF ok\n");
|
||||
decoder->filter = 2;
|
||||
}
|
||||
}
|
||||
if (frame->interlaced_frame && decoder->filter == 2 &&
|
||||
(frame->height != 720)) { // broken ZDF sends Interlaced flag
|
||||
if (decoder->filter == 2) {
|
||||
ret = push_filters(video_ctx, decoder->HwDecoder, frame);
|
||||
// av_frame_unref(frame);
|
||||
continue;
|
||||
@@ -751,7 +717,15 @@ void CodecAudioOpen(AudioDecoder *audio_decoder, int codec_id) {
|
||||
}
|
||||
|
||||
if (CodecDownmix) {
|
||||
audio_decoder->AudioCtx->request_channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,61,100)
|
||||
audio_decoder->AudioCtx->request_channels = 2;
|
||||
#endif
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(59,24,100)
|
||||
audio_decoder->AudioCtx->request_channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
#else
|
||||
AVChannelLayout dmlayout = AV_CHANNEL_LAYOUT_STEREO;
|
||||
av_opt_set_chlayout(audio_decoder->AudioCtx->priv_data, "downmix", &dmlayout, 0);
|
||||
#endif
|
||||
}
|
||||
pthread_mutex_lock(&CodecLockMutex);
|
||||
// open codec
|
||||
@@ -905,7 +879,7 @@ static int CodecAudioUpdateHelper(AudioDecoder *audio_decoder, int *passthrough)
|
||||
int err;
|
||||
|
||||
audio_ctx = audio_decoder->AudioCtx;
|
||||
Debug(3, "codec/audio: format change %s %dHz *%d channels%s%s%s%s%s\n",
|
||||
Debug(3, "codec/audio: Chanlayout %lx format change %s %dHz *%d channels%s%s%s%s%s\n",audio_ctx->channel_layout,
|
||||
av_get_sample_fmt_name(audio_ctx->sample_fmt), audio_ctx->sample_rate, audio_ctx->channels,
|
||||
CodecPassthrough & CodecPCM ? " PCM" : "", CodecPassthrough & CodecMPA ? " MPA" : "",
|
||||
CodecPassthrough & CodecAC3 ? " AC-3" : "", CodecPassthrough & CodecEAC3 ? " E-AC-3" : "",
|
||||
@@ -923,13 +897,17 @@ static int CodecAudioUpdateHelper(AudioDecoder *audio_decoder, int *passthrough)
|
||||
(CodecPassthrough & CodecEAC3 && audio_ctx->codec_id == AV_CODEC_ID_EAC3)) {
|
||||
if (audio_ctx->codec_id == AV_CODEC_ID_EAC3) {
|
||||
// E-AC-3 over HDMI some receivers need HBR
|
||||
audio_decoder->HwSampleRate *= 4;
|
||||
//audio_decoder->HwSampleRate *= 4;
|
||||
}
|
||||
audio_decoder->HwChannels = 2;
|
||||
audio_decoder->SpdifIndex = 0; // reset buffer
|
||||
audio_decoder->SpdifCount = 0;
|
||||
*passthrough = 1;
|
||||
}
|
||||
|
||||
if (audio_decoder->HwChannels > 2 && CodecDownmix) {
|
||||
audio_decoder->HwChannels = 2;
|
||||
}
|
||||
// channels/sample-rate not support?
|
||||
if ((err = AudioSetup(&audio_decoder->HwSampleRate, &audio_decoder->HwChannels, *passthrough))) {
|
||||
|
||||
@@ -1195,14 +1173,43 @@ static void CodecAudioUpdateFormat(AudioDecoder *audio_decoder) {
|
||||
}
|
||||
#endif
|
||||
|
||||
audio_decoder->Resample = swr_alloc_set_opts(audio_decoder->Resample, audio_ctx->channel_layout, AV_SAMPLE_FMT_S16,
|
||||
audio_decoder->HwSampleRate, audio_ctx->channel_layout,
|
||||
audio_ctx->sample_fmt, audio_ctx->sample_rate, 0, NULL);
|
||||
if (audio_decoder->Resample) {
|
||||
swr_init(audio_decoder->Resample);
|
||||
#if LIBSWRESAMPLE_VERSION_INT < AV_VERSION_INT(4,5,100)
|
||||
if (audio_decoder->Channels > 2 && CodecDownmix) {
|
||||
audio_decoder->Resample = swr_alloc_set_opts(audio_decoder->Resample,
|
||||
AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, audio_decoder->HwSampleRate,
|
||||
audio_ctx->channel_layout, audio_ctx->sample_fmt,audio_ctx->sample_rate,
|
||||
0, NULL);
|
||||
} else {
|
||||
Error(_("codec/audio: can't setup resample\n"));
|
||||
audio_decoder->Resample = swr_alloc_set_opts(audio_decoder->Resample, audio_ctx->channel_layout,
|
||||
AV_SAMPLE_FMT_S16, audio_decoder->HwSampleRate,
|
||||
audio_ctx->channel_layout, audio_ctx->sample_fmt,
|
||||
audio_ctx->sample_rate, 0, NULL);
|
||||
}
|
||||
#else
|
||||
if (audio_decoder->Channels > 2 && CodecDownmix) { // Codec does not Support Downmix
|
||||
//printf("last ressort downmix Layout in %lx Lyout out: %llx \n",audio_ctx->channel_layout,AV_CH_LAYOUT_STEREO);
|
||||
audio_decoder->Resample = swr_alloc();
|
||||
av_opt_set_channel_layout(audio_decoder->Resample, "in_channel_layout",audio_ctx->channel_layout, 0);
|
||||
av_opt_set_channel_layout(audio_decoder->Resample, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
|
||||
av_opt_set_int(audio_decoder->Resample, "in_sample_rate", audio_ctx->sample_rate, 0);
|
||||
av_opt_set_int(audio_decoder->Resample, "out_sample_rate", audio_ctx->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(audio_decoder->Resample, "in_sample_fmt", audio_ctx->sample_fmt, 0);
|
||||
av_opt_set_sample_fmt(audio_decoder->Resample, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
}
|
||||
else {
|
||||
swr_alloc_set_opts2(&audio_decoder->Resample, &audio_ctx->ch_layout,
|
||||
AV_SAMPLE_FMT_S16, audio_decoder->HwSampleRate,
|
||||
&audio_ctx->ch_layout, audio_ctx->sample_fmt,
|
||||
audio_ctx->sample_rate, 0, NULL);
|
||||
}
|
||||
#endif
|
||||
if (audio_decoder->Resample) {
|
||||
swr_init(audio_decoder->Resample);
|
||||
} else {
|
||||
Error(_("codec/audio: can't setup resample\n"));
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1254,13 +1261,15 @@ void CodecAudioDecode(AudioDecoder *audio_decoder, const AVPacket *avpkt) {
|
||||
if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (audio_decoder->Resample) {
|
||||
uint8_t outbuf[8192 * 2 * 8];
|
||||
uint8_t outbuf[8192 * 2 * 8];
|
||||
uint8_t *out[1];
|
||||
|
||||
out[0] = outbuf;
|
||||
ret = swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels),
|
||||
(const uint8_t **)frame->extended_data, frame->nb_samples);
|
||||
|
||||
if (ret > 0) {
|
||||
if (!(audio_decoder->Passthrough & CodecPCM)) {
|
||||
CodecReorderAudioFrame((int16_t *)outbuf, ret * 2 * audio_decoder->HwChannels,
|
||||
|
29
drm.c
29
drm.c
@@ -168,6 +168,7 @@ void set_video_mode(int width, int height) {
|
||||
drmModeConnector *connector;
|
||||
drmModeModeInfo *mode;
|
||||
int ii;
|
||||
printf("Set video mode %d &%d\n",width,height);
|
||||
if (height != 1080 && height != 2160)
|
||||
return;
|
||||
connector = drmModeGetConnector(render->fd_drm, render->connector_id);
|
||||
@@ -206,11 +207,7 @@ static int FindDevice(VideoRender *render) {
|
||||
int i, ii = 0;
|
||||
char connectorstr[10];
|
||||
int found = 0;
|
||||
#ifdef RASPI
|
||||
render->fd_drm = open("/dev/dri/card1", O_RDWR);
|
||||
#else
|
||||
render->fd_drm = open("/dev/dri/card0", O_RDWR);
|
||||
#endif
|
||||
if (render->fd_drm < 0) {
|
||||
fprintf(stderr, "FindDevice: cannot open /dev/dri/card0: %m\n");
|
||||
return -errno;
|
||||
@@ -278,10 +275,12 @@ static int FindDevice(VideoRender *render) {
|
||||
connector->connector_type_id);
|
||||
printf("Connector >%s< is %sconnected\n", connectorstr,
|
||||
connector->connection == DRM_MODE_CONNECTED ? "" : "not ");
|
||||
Debug(3,"Connector >%s< is %sconnected\n", connectorstr,
|
||||
connector->connection == DRM_MODE_CONNECTED ? "" : "not ");
|
||||
if (DRMConnector && strcmp(DRMConnector, connectorstr))
|
||||
continue;
|
||||
|
||||
if (connector->connection == DRM_MODE_CONNECTED && connector->count_modes > 0) {
|
||||
if (/*connector->connection == DRM_MODE_CONNECTED && */ connector->count_modes > 0) {
|
||||
float aspect = (float)connector->mmWidth / (float)connector->mmHeight;
|
||||
if ((aspect > 1.70) && (aspect < 1.85)) {
|
||||
render->mmHeight = 90;
|
||||
@@ -309,7 +308,7 @@ static int FindDevice(VideoRender *render) {
|
||||
mode = &connector->modes[ii];
|
||||
|
||||
printf("Mode %d %dx%d Rate %d\n", ii, mode->hdisplay, mode->vdisplay, mode->vrefresh);
|
||||
|
||||
Debug(3,"Mode %d %dx%d Rate %d\n", ii, mode->hdisplay, mode->vdisplay, mode->vrefresh);
|
||||
if (VideoWindowWidth && VideoWindowHeight) { // preset by command line
|
||||
if (VideoWindowWidth == mode->hdisplay && VideoWindowHeight == mode->vdisplay &&
|
||||
mode->vrefresh == DRMRefresh && !(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
|
||||
@@ -328,11 +327,16 @@ static int FindDevice(VideoRender *render) {
|
||||
found = 1;
|
||||
i = resources->count_connectors; // uuuuhh
|
||||
}
|
||||
VideoWindowWidth = render->mode.hdisplay;
|
||||
VideoWindowHeight = render->mode.vdisplay;
|
||||
if (found)
|
||||
|
||||
if (found) {
|
||||
VideoWindowWidth = render->mode.hdisplay;
|
||||
VideoWindowHeight = render->mode.vdisplay;
|
||||
|
||||
printf("Use Mode %d %dx%d Rate %d\n", ii, render->mode.hdisplay, render->mode.vdisplay,
|
||||
render->mode.vrefresh);
|
||||
Debug(3,"Use Mode %d %dx%d Rate %d\n", ii, render->mode.hdisplay, render->mode.vdisplay,
|
||||
render->mode.vrefresh);
|
||||
}
|
||||
drmModeFreeConnector(connector);
|
||||
}
|
||||
if (!found) {
|
||||
@@ -372,11 +376,7 @@ static int FindDevice(VideoRender *render) {
|
||||
for (k = 0; k < plane->count_formats; k++) {
|
||||
if (encoder->possible_crtcs & plane->possible_crtcs) {
|
||||
switch (plane->formats[k]) {
|
||||
#ifdef RASPI
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
#else
|
||||
case DRM_FORMAT_XRGB2101010:
|
||||
#endif
|
||||
if (!render->video_plane) {
|
||||
render->video_plane = plane->plane_id;
|
||||
}
|
||||
@@ -492,6 +492,7 @@ static void drm_swap_buffers() {
|
||||
uint32_t fb;
|
||||
|
||||
eglSwapBuffers(eglDisplay, eglSurface);
|
||||
usleep(1000);
|
||||
struct gbm_bo *bo = gbm_surface_lock_front_buffer(gbm.surface);
|
||||
#if 1
|
||||
if (bo == NULL)
|
||||
@@ -630,4 +631,4 @@ static void drm_clean_up() {
|
||||
close(render->fd_drm);
|
||||
eglDisplay = NULL;
|
||||
free(render);
|
||||
}
|
||||
}
|
105
openglosd.cpp
105
openglosd.cpp
@@ -20,12 +20,8 @@ void ConvertColor(const GLint &colARGB, glm::vec4 &col) {
|
||||
#ifdef CUVID
|
||||
const char *glversion = "#version 330 core ";
|
||||
#else
|
||||
#ifdef RASPI
|
||||
const char *glversion = "#version 300 es";
|
||||
#else
|
||||
const char *glversion = "#version 300 es ";
|
||||
#endif
|
||||
#endif
|
||||
|
||||
const char *rectVertexShader = "%s\n \
|
||||
\
|
||||
@@ -730,14 +726,15 @@ bool cOglCmdDeleteFb::Execute(void) {
|
||||
|
||||
//------------------ cOglCmdRenderFbToBufferFb --------------------
|
||||
cOglCmdRenderFbToBufferFb::cOglCmdRenderFbToBufferFb(cOglFb *fb, cOglFb *buffer, GLint x, GLint y, GLint transparency,
|
||||
GLint drawPortX, GLint drawPortY)
|
||||
GLint drawPortX, GLint drawPortY, bool alphablending)
|
||||
: cOglCmd(fb) {
|
||||
this->buffer = buffer;
|
||||
this->x = (GLfloat)x;
|
||||
this->y = (GLfloat)y;
|
||||
this->drawPortX = (GLfloat)drawPortX;
|
||||
this->drawPortY = (GLfloat)drawPortY;
|
||||
this->transparency = transparency;
|
||||
this->transparency = (alphablending ? transparency : ALPHA_OPAQUE);
|
||||
this->alphablending = alphablending;
|
||||
}
|
||||
|
||||
bool cOglCmdRenderFbToBufferFb::Execute(void) {
|
||||
@@ -777,11 +774,14 @@ bool cOglCmdRenderFbToBufferFb::Execute(void) {
|
||||
|
||||
if (!fb->BindTexture())
|
||||
return false;
|
||||
|
||||
if (!alphablending)
|
||||
VertexBuffers[vbTexture]->DisableBlending();
|
||||
VertexBuffers[vbTexture]->Bind();
|
||||
VertexBuffers[vbTexture]->SetVertexData(quadVertices);
|
||||
VertexBuffers[vbTexture]->DrawArrays();
|
||||
VertexBuffers[vbTexture]->Unbind();
|
||||
if (!alphablending)
|
||||
VertexBuffers[vbTexture]->EnableBlending();
|
||||
buffer->Unbind();
|
||||
|
||||
return true;
|
||||
@@ -837,6 +837,8 @@ cOglCmdDrawRectangle::cOglCmdDrawRectangle(cOglFb *fb, GLint x, GLint y, GLint w
|
||||
}
|
||||
|
||||
bool cOglCmdDrawRectangle::Execute(void) {
|
||||
if (width <= 0 || height <= 0)
|
||||
return false;
|
||||
GLfloat x1 = x;
|
||||
GLfloat y1 = y;
|
||||
GLfloat x2 = x + width;
|
||||
@@ -883,6 +885,8 @@ cOglCmdDrawEllipse::cOglCmdDrawEllipse(cOglFb *fb, GLint x, GLint y, GLint width
|
||||
}
|
||||
|
||||
bool cOglCmdDrawEllipse::Execute(void) {
|
||||
if (width <= 0 || height <= 0)
|
||||
return false;
|
||||
int numVertices = 0;
|
||||
GLfloat *vertices = NULL;
|
||||
|
||||
@@ -1090,6 +1094,8 @@ cOglCmdDrawSlope::cOglCmdDrawSlope(cOglFb *fb, GLint x, GLint y, GLint width, GL
|
||||
}
|
||||
|
||||
bool cOglCmdDrawSlope::Execute(void) {
|
||||
if (width <= 0 || height <= 0)
|
||||
return false;
|
||||
bool falling = type & 0x02;
|
||||
bool vertical = type & 0x04;
|
||||
|
||||
@@ -1177,7 +1183,7 @@ cOglCmdDrawText::~cOglCmdDrawText(void) { free(symbols); }
|
||||
bool cOglCmdDrawText::Execute(void) {
|
||||
cOglFont *f = cOglFont::Get(*fontName, fontSize);
|
||||
|
||||
if (!f)
|
||||
if (!f || !symbols[0])
|
||||
return false;
|
||||
|
||||
VertexBuffers[vbText]->ActivateShader();
|
||||
@@ -1256,10 +1262,13 @@ cOglCmdDrawImage::cOglCmdDrawImage(cOglFb *fb, tColor *argb, GLint width, GLint
|
||||
cOglCmdDrawImage::~cOglCmdDrawImage(void) { free(argb); }
|
||||
|
||||
bool cOglCmdDrawImage::Execute(void) {
|
||||
if (width <= 0 || height <= 0)
|
||||
return false;
|
||||
GLuint texture;
|
||||
|
||||
#ifdef USE_DRM
|
||||
// pthread_mutex_lock(&OSDMutex);
|
||||
//esyslog("upload Image\n");
|
||||
pthread_mutex_lock(&OSDMutex);
|
||||
GlxDrawopengl(); // here we need the Shared Context for upload
|
||||
GlxCheck();
|
||||
#endif
|
||||
@@ -1272,16 +1281,16 @@ bool cOglCmdDrawImage::Execute(void) {
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
glFlush();
|
||||
#ifdef USE_DRM
|
||||
#ifdef USE_DRM
|
||||
GlxInitopengl(); // Reset Context
|
||||
GlxCheck();
|
||||
// pthread_mutex_unlock(&OSDMutex);
|
||||
pthread_mutex_unlock(&OSDMutex);
|
||||
#endif
|
||||
|
||||
GLfloat x1 = x; // left
|
||||
GLfloat y1 = y; // top
|
||||
GLfloat x2 = x + width; // right
|
||||
GLfloat y2 = y + height; // bottom
|
||||
GLfloat x1 = x; // left
|
||||
GLfloat y1 = y; // top
|
||||
GLfloat x2 = x + width * scaleX; // right
|
||||
GLfloat y2 = y + height * scaleY; // bottom
|
||||
|
||||
GLfloat quadVertices[] = {
|
||||
x1, y2, 0.0, 1.0, // left bottom
|
||||
@@ -1315,17 +1324,21 @@ bool cOglCmdDrawImage::Execute(void) {
|
||||
}
|
||||
|
||||
//------------------ cOglCmdDrawTexture --------------------
|
||||
cOglCmdDrawTexture::cOglCmdDrawTexture(cOglFb *fb, sOglImage *imageRef, GLint x, GLint y) : cOglCmd(fb) {
|
||||
cOglCmdDrawTexture::cOglCmdDrawTexture(cOglFb *fb, sOglImage *imageRef, GLint x, GLint y, double scaleX, double scaleY) : cOglCmd(fb) {
|
||||
this->imageRef = imageRef;
|
||||
this->x = x;
|
||||
this->y = y;
|
||||
this->scaleX = scaleX;
|
||||
this->scaleY = scaleY;
|
||||
}
|
||||
|
||||
bool cOglCmdDrawTexture::Execute(void) {
|
||||
GLfloat x1 = x; // top
|
||||
GLfloat y1 = y; // left
|
||||
GLfloat x2 = x + imageRef->width; // right
|
||||
GLfloat y2 = y + imageRef->height; // bottom
|
||||
if (imageRef->width <= 0 || imageRef->height <= 0)
|
||||
return false;
|
||||
GLfloat x1 = x; // top
|
||||
GLfloat y1 = y; // left
|
||||
GLfloat x2 = x + imageRef->width * scaleX; // right
|
||||
GLfloat y2 = y + imageRef->height * scaleY; // bottom
|
||||
|
||||
GLfloat quadVertices[] = {
|
||||
// Pos // TexCoords
|
||||
@@ -1363,7 +1376,8 @@ cOglCmdStoreImage::~cOglCmdStoreImage(void) { free(data); }
|
||||
|
||||
bool cOglCmdStoreImage::Execute(void) {
|
||||
#ifdef USE_DRM
|
||||
// pthread_mutex_lock(&OSDMutex);
|
||||
return false;
|
||||
pthread_mutex_lock(&OSDMutex);
|
||||
GlxDrawopengl(); // here we need the Shared Context for upload
|
||||
GlxCheck();
|
||||
#endif
|
||||
@@ -1380,7 +1394,7 @@ bool cOglCmdStoreImage::Execute(void) {
|
||||
#ifdef USE_DRM
|
||||
GlxInitopengl(); // Reset Context
|
||||
GlxCheck();
|
||||
// pthread_mutex_lock(&OSDMutex);
|
||||
pthread_mutex_lock(&OSDMutex);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
@@ -1460,7 +1474,9 @@ int cOglThread::StoreImage(const cImage &image) {
|
||||
if (!maxCacheSize) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef USE_DRM
|
||||
return 0;
|
||||
#endif
|
||||
if (image.Width() > maxTextureSize || image.Height() > maxTextureSize) {
|
||||
esyslog("[softhddev] cannot store image of %dpx x %dpx "
|
||||
"(maximum size is %dpx x %dpx) - falling back to "
|
||||
@@ -1640,6 +1656,7 @@ void cOglThread::Action(void) {
|
||||
|
||||
bool cOglThread::InitOpenGL(void) {
|
||||
#ifdef USE_DRM
|
||||
esyslog("InitOpenGL\n");
|
||||
GlxInitopengl();
|
||||
#else
|
||||
const char *displayName = X11DisplayName;
|
||||
@@ -1651,7 +1668,7 @@ bool cOglThread::InitOpenGL(void) {
|
||||
}
|
||||
}
|
||||
|
||||
dsyslog("[softhddev]OpenGL using display %s", displayName);
|
||||
esyslog("[softhddev]OpenGL using display %s", displayName);
|
||||
|
||||
int argc = 3;
|
||||
char *buffer[3];
|
||||
@@ -1815,6 +1832,10 @@ void cOglPixmap::Fill(tColor Color) {
|
||||
}
|
||||
|
||||
void cOglPixmap::DrawImage(const cPoint &Point, const cImage &Image) {
|
||||
DrawScaledImage(Point, Image);
|
||||
}
|
||||
|
||||
void cOglPixmap::DrawScaledImage(const cPoint &Point, const cImage &Image, double FactorX, double FactorY, bool AntiAlias) {
|
||||
if (!oglThread->Active())
|
||||
return;
|
||||
tColor *argb = MALLOC(tColor, Image.Width() * Image.Height());
|
||||
@@ -1823,19 +1844,25 @@ void cOglPixmap::DrawImage(const cPoint &Point, const cImage &Image) {
|
||||
return;
|
||||
memcpy(argb, Image.Data(), sizeof(tColor) * Image.Width() * Image.Height());
|
||||
|
||||
oglThread->DoCmd(new cOglCmdDrawImage(fb, argb, Image.Width(), Image.Height(), Point.X(), Point.Y()));
|
||||
oglThread->DoCmd(new cOglCmdDrawImage(fb, argb, Image.Width(), Image.Height(), Point.X(), Point.Y(), true, FactorX, FactorY));
|
||||
SetDirty();
|
||||
MarkDrawPortDirty(cRect(Point, cSize(Image.Width(), Image.Height())).Intersected(DrawPort().Size()));
|
||||
MarkDrawPortDirty(cRect(Point, cSize(Image.Width() * FactorX, Image.Height() * FactorY)).Intersected(DrawPort().Size()));
|
||||
}
|
||||
|
||||
void cOglPixmap::DrawImage(const cPoint &Point, int ImageHandle) {
|
||||
DrawScaledImage(Point, ImageHandle);
|
||||
}
|
||||
|
||||
void cOglPixmap::DrawScaledImage(const cPoint &Point, int ImageHandle, double FactorX, double FactorY, bool AntiAlias) {
|
||||
if (!oglThread->Active())
|
||||
return;
|
||||
|
||||
if (ImageHandle < 0 && oglThread->GetImageRef(ImageHandle)) {
|
||||
sOglImage *img = oglThread->GetImageRef(ImageHandle);
|
||||
|
||||
oglThread->DoCmd(new cOglCmdDrawTexture(fb, img, Point.X(), Point.Y()));
|
||||
oglThread->DoCmd(new cOglCmdDrawTexture(fb, img, Point.X(), Point.Y(), FactorX, FactorY));
|
||||
SetDirty();
|
||||
MarkDrawPortDirty(cRect(Point, cSize(img->width * FactorX, img->height * FactorY)).Intersected(DrawPort().Size()));
|
||||
}
|
||||
/*
|
||||
Fallback to VDR implementation, needs to separate cSoftOsdProvider from
|
||||
@@ -1843,8 +1870,6 @@ void cOglPixmap::DrawImage(const cPoint &Point, int ImageHandle) {
|
||||
DrawImage(Point, *cSoftOsdProvider::GetImageData(ImageHandle));
|
||||
}
|
||||
*/
|
||||
SetDirty();
|
||||
MarkDrawPortDirty(DrawPort());
|
||||
}
|
||||
|
||||
void cOglPixmap::DrawPixel(const cPoint &Point, tColor Color) {
|
||||
@@ -2085,6 +2110,14 @@ cPixmap *cOglOsd::CreatePixmap(int Layer, const cRect &ViewPort, const cRect &Dr
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
extern "C" {void VideoSetOsdSize(int, int ) ;}
|
||||
|
||||
void SetOsdPosition(int Left, int Top, int Width, int Height) {
|
||||
printf("Set OSD Position %d %d\n",Width,Height);
|
||||
VideoSetOsdSize( Width, Height) ;
|
||||
}
|
||||
|
||||
void cOglOsd::DestroyPixmap(cPixmap *Pixmap) {
|
||||
if (!oglThread->Active())
|
||||
return;
|
||||
@@ -2130,10 +2163,16 @@ void cOglOsd::Flush(void) {
|
||||
for (int i = 0; i < oglPixmaps.Size(); i++) {
|
||||
if (oglPixmaps[i]) {
|
||||
if (oglPixmaps[i]->Layer() == layer) {
|
||||
oglThread->DoCmd(new cOglCmdRenderFbToBufferFb(
|
||||
oglPixmaps[i]->Fb(), bFb, oglPixmaps[i]->ViewPort().X(),
|
||||
(!isSubtitleOsd) ? oglPixmaps[i]->ViewPort().Y() : 0, oglPixmaps[i]->Alpha(),
|
||||
oglPixmaps[i]->DrawPort().X(), oglPixmaps[i]->DrawPort().Y()));
|
||||
bool alphablending = layer == 0 ? false : true; // Decide wether to render (with alpha) or copy a pixmap
|
||||
oglThread->DoCmd(new cOglCmdRenderFbToBufferFb( oglPixmaps[i]->Fb(),
|
||||
bFb,
|
||||
oglPixmaps[i]->ViewPort().X(),
|
||||
(!isSubtitleOsd) ? oglPixmaps[i]->ViewPort().Y() : 0,
|
||||
oglPixmaps[i]->Alpha(),
|
||||
oglPixmaps[i]->DrawPort().X(),
|
||||
oglPixmaps[i]->DrawPort().Y(),
|
||||
alphablending
|
||||
));
|
||||
oglPixmaps[i]->SetDirty(false);
|
||||
}
|
||||
}
|
||||
|
@@ -290,10 +290,11 @@ class cOglCmdRenderFbToBufferFb : public cOglCmd {
|
||||
GLfloat x, y;
|
||||
GLfloat drawPortX, drawPortY;
|
||||
GLint transparency;
|
||||
GLint alphablending;
|
||||
|
||||
public:
|
||||
cOglCmdRenderFbToBufferFb(cOglFb *fb, cOglFb *buffer, GLint x, GLint y, GLint transparency, GLint drawPortX,
|
||||
GLint drawPortY);
|
||||
GLint drawPortY, bool alphablending);
|
||||
virtual ~cOglCmdRenderFbToBufferFb(void){};
|
||||
virtual const char *Description(void) { return "Render Framebuffer to Buffer"; }
|
||||
virtual bool Execute(void);
|
||||
@@ -402,9 +403,10 @@ class cOglCmdDrawTexture : public cOglCmd {
|
||||
private:
|
||||
sOglImage *imageRef;
|
||||
GLint x, y;
|
||||
GLfloat scaleX, scaleY;
|
||||
|
||||
public:
|
||||
cOglCmdDrawTexture(cOglFb *fb, sOglImage *imageRef, GLint x, GLint y);
|
||||
cOglCmdDrawTexture(cOglFb *fb, sOglImage *imageRef, GLint x, GLint y, double scaleX = 1.0f, double scaleY = 1.0f);
|
||||
virtual ~cOglCmdDrawTexture(void){};
|
||||
virtual const char *Description(void) { return "Draw Texture"; }
|
||||
virtual bool Execute(void);
|
||||
@@ -500,6 +502,8 @@ class cOglPixmap : public cPixmap {
|
||||
virtual void Fill(tColor Color);
|
||||
virtual void DrawImage(const cPoint &Point, const cImage &Image);
|
||||
virtual void DrawImage(const cPoint &Point, int ImageHandle);
|
||||
virtual void DrawScaledImage(const cPoint &Point, const cImage &Image, double FactorX = 1.0f, double FactorY = 1.0f, bool AntiAlias = false);
|
||||
virtual void DrawScaledImage(const cPoint &Point, int ImageHandle, double FactorX = 1.0f, double FactorY = 1.0f, bool AntiAlias = false);
|
||||
virtual void DrawPixel(const cPoint &Point, tColor Color);
|
||||
virtual void DrawBitmap(const cPoint &Point, const cBitmap &Bitmap, tColor ColorFg = 0, tColor ColorBg = 0,
|
||||
bool Overlay = false);
|
||||
@@ -528,6 +532,7 @@ class cOglOsd : public cOsd {
|
||||
public:
|
||||
cOglOsd(int Left, int Top, uint Level, std::shared_ptr<cOglThread> oglThread);
|
||||
virtual ~cOglOsd();
|
||||
static void SetOsdPosition(int Left, int Top, int Width, int Height);
|
||||
virtual eOsdError SetAreas(const tArea *Areas, int NumAreas);
|
||||
virtual cPixmap *CreatePixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null);
|
||||
virtual void DestroyPixmap(cPixmap *Pixmap);
|
||||
|
@@ -4,12 +4,8 @@
|
||||
#ifdef CUVID
|
||||
const char *gl_version = "#version 330";
|
||||
#else
|
||||
#ifdef RASPI
|
||||
const char *gl_version = "#version 300 es";
|
||||
#else
|
||||
const char *gl_version = "#version 300 es ";
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Color conversion matrix: RGB = m * YUV + c
|
||||
* m is in row-major matrix, with m[row][col], e.g.:
|
||||
|
113
softhdcuvid.cpp
113
softhdcuvid.cpp
@@ -61,7 +61,7 @@ extern void ToggleLUT();
|
||||
/// vdr-plugin version number.
|
||||
/// Makefile extracts the version number for generating the file name
|
||||
/// for the distribution archive.
|
||||
static const char *const VERSION = "3.6"
|
||||
static const char *const VERSION = "3.17"
|
||||
#ifdef GIT_REV
|
||||
"-GIT" GIT_REV
|
||||
#endif
|
||||
@@ -642,36 +642,107 @@ void cSoftOsd::Flush(void) {
|
||||
|
||||
#ifdef USE_OPENGLOSD
|
||||
|
||||
//Dummy Pixmap for skins
|
||||
// Dummy Pixmap for skins
|
||||
class cDummyPixmap : public cPixmap {
|
||||
public:
|
||||
cDummyPixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null) : cPixmap(Layer, ViewPort, DrawPort) {}
|
||||
public:
|
||||
cDummyPixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null)
|
||||
: cPixmap(Layer, ViewPort, DrawPort) {}
|
||||
virtual ~cDummyPixmap(void) {}
|
||||
virtual void Clear(void) {}
|
||||
virtual void Fill(tColor Color) { (void)Color; }
|
||||
virtual void DrawImage(const cPoint &Point, const cImage &Image) { (void)Point; (void)Image; }
|
||||
virtual void DrawImage(const cPoint &Point, int ImageHandle) { (void)Point; (void)ImageHandle; }
|
||||
virtual void DrawPixel(const cPoint &Point, tColor Color) { (void)Point; (void)Color; }
|
||||
virtual void DrawBitmap(const cPoint &Point, const cBitmap &Bitmap, tColor ColorFg = 0, tColor ColorBg = 0, bool Overlay = false) {
|
||||
(void) Point; (void)Bitmap; (void)ColorFg; (void)ColorBg; (void)Overlay; }
|
||||
virtual void DrawText(const cPoint &Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont *Font, int Width = 0, int Height = 0, int Alignment = taDefault) {
|
||||
(void)Point; (void)s; (void)ColorFg; (void) ColorBg; (void) Font; (void)Width; (void)Height; (void)Alignment; }
|
||||
virtual void DrawRectangle(const cRect &Rect, tColor Color) { (void)Rect; (void)Color; }
|
||||
virtual void DrawEllipse(const cRect &Rect, tColor Color, int Quadrants = 0) { (void)Rect; (void)Color; (void)Quadrants; }
|
||||
virtual void DrawSlope(const cRect &Rect, tColor Color, int Type) { (void)Rect; (void)Color; (void)Type; }
|
||||
virtual void Render(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest) { (void)Pixmap; (void)Source; (void)Dest; }
|
||||
virtual void Copy(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest) { (void)Pixmap; (void)Source; (void)Dest; }
|
||||
virtual void Scroll(const cPoint &Dest, const cRect &Source = cRect::Null) { (void)Dest; (void)Source; }
|
||||
virtual void Pan(const cPoint &Dest, const cRect &Source = cRect::Null) { (void)Dest; (void)Source; }
|
||||
virtual void DrawImage(const cPoint &Point, const cImage &Image) {
|
||||
(void)Point;
|
||||
(void)Image;
|
||||
}
|
||||
virtual void DrawImage(const cPoint &Point, int ImageHandle) {
|
||||
(void)Point;
|
||||
(void)ImageHandle;
|
||||
}
|
||||
virtual void DrawScaledImage(const cPoint &Point, const cImage &Image, double FactorX, double FactorY, bool AntiAlias) {
|
||||
(void)Point;
|
||||
(void)Image;
|
||||
(void)FactorX;
|
||||
(void)FactorY;
|
||||
(void)AntiAlias;
|
||||
}
|
||||
virtual void DrawScaledImage(const cPoint &Point, int ImageHandle, double FactorX, double FactorY, bool AntiAlias) {
|
||||
(void)Point;
|
||||
(void)ImageHandle;
|
||||
(void)FactorX;
|
||||
(void)FactorY;
|
||||
(void)AntiAlias;
|
||||
}
|
||||
virtual void DrawPixel(const cPoint &Point, tColor Color) {
|
||||
(void)Point;
|
||||
(void)Color;
|
||||
}
|
||||
virtual void DrawBitmap(const cPoint &Point, const cBitmap &Bitmap, tColor ColorFg = 0, tColor ColorBg = 0,
|
||||
bool Overlay = false) {
|
||||
(void)Point;
|
||||
(void)Bitmap;
|
||||
(void)ColorFg;
|
||||
(void)ColorBg;
|
||||
(void)Overlay;
|
||||
}
|
||||
virtual void DrawText(const cPoint &Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont *Font,
|
||||
int Width = 0, int Height = 0, int Alignment = taDefault) {
|
||||
(void)Point;
|
||||
(void)s;
|
||||
(void)ColorFg;
|
||||
(void)ColorBg;
|
||||
(void)Font;
|
||||
(void)Width;
|
||||
(void)Height;
|
||||
(void)Alignment;
|
||||
}
|
||||
virtual void DrawRectangle(const cRect &Rect, tColor Color) {
|
||||
(void)Rect;
|
||||
(void)Color;
|
||||
}
|
||||
virtual void DrawEllipse(const cRect &Rect, tColor Color, int Quadrants = 0) {
|
||||
(void)Rect;
|
||||
(void)Color;
|
||||
(void)Quadrants;
|
||||
}
|
||||
virtual void DrawSlope(const cRect &Rect, tColor Color, int Type) {
|
||||
(void)Rect;
|
||||
(void)Color;
|
||||
(void)Type;
|
||||
}
|
||||
virtual void Render(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest) {
|
||||
(void)Pixmap;
|
||||
(void)Source;
|
||||
(void)Dest;
|
||||
}
|
||||
virtual void Copy(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest) {
|
||||
(void)Pixmap;
|
||||
(void)Source;
|
||||
(void)Dest;
|
||||
}
|
||||
virtual void Scroll(const cPoint &Dest, const cRect &Source = cRect::Null) {
|
||||
(void)Dest;
|
||||
(void)Source;
|
||||
}
|
||||
virtual void Pan(const cPoint &Dest, const cRect &Source = cRect::Null) {
|
||||
(void)Dest;
|
||||
(void)Source;
|
||||
}
|
||||
};
|
||||
|
||||
// Dummy OSD for OpenGL OSD if no X Server is available
|
||||
class cDummyOsd : public cOsd {
|
||||
private:
|
||||
cDummyPixmap *p;
|
||||
|
||||
public:
|
||||
cDummyOsd(int Left, int Top, uint Level) : cOsd(Left, Top, Level) {}
|
||||
virtual ~cDummyOsd() {}
|
||||
static void SetOsdPosition(int Left, int Top, int Width, int Height) {
|
||||
(void) Left;
|
||||
(void) Top;
|
||||
(void) Width;
|
||||
(void) Height;
|
||||
}
|
||||
virtual cPixmap *CreatePixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null) {
|
||||
p = new cDummyPixmap(Layer, ViewPort, DrawPort);
|
||||
return p;
|
||||
@@ -1096,9 +1167,9 @@ void cMenuSetupSoft::Create(void) {
|
||||
|
||||
if (scalers == 0) {
|
||||
scalingtest[0] = (char *)"Off";
|
||||
for (scalers = 0; pl_named_filters[scalers].name != NULL; scalers++) {
|
||||
scaling[scalers] = (char *)pl_named_filters[scalers].name;
|
||||
scalingtest[scalers + 1] = (char *)pl_named_filters[scalers].name;
|
||||
for (scalers = 0; pl_filter_presets[scalers].name != NULL; scalers++) {
|
||||
scaling[scalers] = (char *)pl_filter_presets[scalers].name;
|
||||
scalingtest[scalers + 1] = (char *)pl_filter_presets[scalers].name;
|
||||
// printf("Scaler %s\n",pl_named_filters[scalers].name);
|
||||
}
|
||||
// scalers -= 2;
|
||||
|
10
softhddev.c
10
softhddev.c
@@ -364,7 +364,7 @@ static int Ac3Check(const uint8_t *data, int size) {
|
||||
if ((data[4] & 0xF0) == 0xF0) { // invalid fscod fscod2
|
||||
return 0;
|
||||
}
|
||||
frame_size = ((data[2] & 0x03) << 8) + data[3] + 1;
|
||||
frame_size = ((data[2] & 0x07) << 8) + data[3] + 1;
|
||||
frame_size *= 2;
|
||||
} else { // AC-3
|
||||
int fscod;
|
||||
@@ -2142,7 +2142,7 @@ int PlayVideo3(VideoStream *stream, const uint8_t *data, int size) {
|
||||
}
|
||||
// hard limit buffer full: needed for replay
|
||||
if (atomic_read(&stream->PacketsFilled) >= VIDEO_PACKET_MAX - 10) {
|
||||
// Debug(3, "video: video buffer full\n");
|
||||
//Debug(3, "video: video buffer full\n");
|
||||
return 0;
|
||||
}
|
||||
#ifdef USE_SOFTLIMIT
|
||||
@@ -2817,8 +2817,8 @@ const char *CommandLineHelp(void) {
|
||||
"\tstill-h264-hw-decoder\tenable h264 hw decoder for still-pictures\n"
|
||||
"\talsa-driver-broken\tdisable broken alsa driver message\n"
|
||||
"\talsa-no-close-open\tdisable close open to fix alsa no sound bug\n"
|
||||
"\talsa-close-open-delay\tenable close open delay to fix no sound "
|
||||
"bug\n"
|
||||
"\talsa-no-test\tdisable Alsa Capability test on start for NUC11/12\n"
|
||||
"\talsa-close-open-delay\tenable close open delay to fix no sound bug\n"
|
||||
"\tignore-repeat-pict\tdisable repeat pict message\n"
|
||||
"\tuse-possible-defect-frames prefer faster channel switch\n"
|
||||
" -D\t\tstart in detached mode\n";
|
||||
@@ -2908,6 +2908,8 @@ int ProcessArgs(int argc, char *const argv[]) {
|
||||
AudioAlsaDriverBroken = 1;
|
||||
} else if (!strcasecmp("alsa-no-close-open", optarg)) {
|
||||
AudioAlsaNoCloseOpen = 1;
|
||||
} else if (!strcasecmp("alsa-no-test", optarg)) {
|
||||
AudioAlsaNotest = 1;
|
||||
} else if (!strcasecmp("alsa-close-open-delay", optarg)) {
|
||||
AudioAlsaCloseOpenDelay = 1;
|
||||
} else if (!strcasecmp("ignore-repeat-pict", optarg)) {
|
||||
|
424
video.c
424
video.c
@@ -155,15 +155,12 @@ typedef enum {
|
||||
#endif
|
||||
|
||||
#ifdef VAAPI
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,74,100)
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 74, 100)
|
||||
#include <libavcodec/vaapi.h>
|
||||
#endif
|
||||
#include <libavutil/hwcontext_vaapi.h>
|
||||
#include <libdrm/drm_fourcc.h>
|
||||
#include <va/va_drmcommon.h>
|
||||
#ifdef RASPI
|
||||
#include <libavutil/hwcontext_drm.h>
|
||||
#endif
|
||||
#include <libavutil/hwcontext_vaapi.h>
|
||||
#define TO_AVHW_DEVICE_CTX(x) ((AVHWDeviceContext *)x->data)
|
||||
#define TO_AVHW_FRAMES_CTX(x) ((AVHWFramesContext *)x->data)
|
||||
#define TO_VAAPI_DEVICE_CTX(x) ((AVVAAPIDeviceContext *)TO_AVHW_DEVICE_CTX(x)->hwctx)
|
||||
@@ -341,7 +338,7 @@ typedef struct {
|
||||
|
||||
#define NUM_SHADERS 5 // Number of supported user shaders with placebo
|
||||
|
||||
#if defined VAAPI && !defined RASPI
|
||||
#if defined VAAPI
|
||||
#define PIXEL_FORMAT AV_PIX_FMT_VAAPI
|
||||
#define SWAP_BUFFER_SIZE 3
|
||||
#endif
|
||||
@@ -349,21 +346,13 @@ typedef struct {
|
||||
#define PIXEL_FORMAT AV_PIX_FMT_CUDA
|
||||
#define SWAP_BUFFER_SIZE 3
|
||||
#endif
|
||||
#if defined RASPI
|
||||
#define PIXEL_FORMAT AV_PIX_FMT_MMAL
|
||||
#define SWAP_BUFFER_SIZE 3
|
||||
#endif
|
||||
//----------------------------------------------------------------------------
|
||||
// Variables
|
||||
//----------------------------------------------------------------------------
|
||||
AVBufferRef *HwDeviceContext; ///< ffmpeg HW device context
|
||||
char VideoIgnoreRepeatPict; ///< disable repeat pict warning
|
||||
|
||||
#ifdef RASPI
|
||||
int Planes = 3;
|
||||
#else
|
||||
int Planes = 2;
|
||||
#endif
|
||||
|
||||
unsigned char *posd;
|
||||
|
||||
@@ -475,7 +464,7 @@ extern void AudioVideoReady(int64_t); ///< tell audio video is ready
|
||||
static pthread_t VideoThread; ///< video decode thread
|
||||
static pthread_cond_t VideoWakeupCond; ///< wakeup condition variable
|
||||
static pthread_mutex_t VideoMutex; ///< video condition mutex
|
||||
static pthread_mutex_t VideoLockMutex; ///< video lock mutex
|
||||
pthread_mutex_t VideoLockMutex; ///< video lock mutex
|
||||
pthread_mutex_t OSDMutex; ///< OSD update mutex
|
||||
#endif
|
||||
|
||||
@@ -628,6 +617,8 @@ char *eglErrorString(EGLint error) {
|
||||
static void VideoSetPts(int64_t *pts_p, int interlaced, const AVCodecContext *video_ctx, const AVFrame *frame) {
|
||||
int64_t pts;
|
||||
int duration;
|
||||
static int64_t lastpts;
|
||||
|
||||
|
||||
//
|
||||
// Get duration for this frame.
|
||||
@@ -675,11 +666,12 @@ static void VideoSetPts(int64_t *pts_p, int interlaced, const AVCodecContext *vi
|
||||
Debug(3, "++++++++++++++++++++++++++++++++++++starte audio\n");
|
||||
AudioVideoReady(pts);
|
||||
}
|
||||
if (*pts_p != pts) {
|
||||
if (*pts_p != pts && lastpts != pts) {
|
||||
Debug(4, "video: %#012" PRIx64 "->%#012" PRIx64 " delta=%4" PRId64 " pts\n", *pts_p, pts, pts - *pts_p);
|
||||
*pts_p = pts;
|
||||
}
|
||||
}
|
||||
lastpts = pts;
|
||||
}
|
||||
|
||||
int CuvidMessage(int level, const char *format, ...);
|
||||
@@ -833,6 +825,8 @@ static uint64_t test_time = 0;
|
||||
#define Lock_and_SharedContext \
|
||||
{ \
|
||||
VideoThreadLock(); \
|
||||
Debug(4,"Lock OSDMutex %s %d\n",__FILE__, __LINE__); \
|
||||
pthread_mutex_lock(&OSDMutex); \
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); \
|
||||
EglCheck(); \
|
||||
}
|
||||
@@ -840,10 +834,14 @@ static uint64_t test_time = 0;
|
||||
{ \
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); \
|
||||
EglCheck(); \
|
||||
Debug(4,"UnLock OSDMutex %s %d\n",__FILE__, __LINE__); \
|
||||
pthread_mutex_unlock(&OSDMutex); \
|
||||
VideoThreadUnlock(); \
|
||||
}
|
||||
#define SharedContext \
|
||||
{ \
|
||||
Debug(4,"Lock OSDMutex %s %d\n",__FILE__, __LINE__); \
|
||||
pthread_mutex_lock(&OSDMutex); \
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); \
|
||||
EglCheck(); \
|
||||
}
|
||||
@@ -851,6 +849,8 @@ static uint64_t test_time = 0;
|
||||
{ \
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); \
|
||||
EglCheck(); \
|
||||
Debug(4,"UnLock OSDMutex %s %d\n",__FILE__, __LINE__); \
|
||||
pthread_mutex_unlock(&OSDMutex); \
|
||||
}
|
||||
#else
|
||||
#ifdef PLACEBO
|
||||
@@ -1227,7 +1227,7 @@ static void EglExit(void) {
|
||||
// must destroy glx
|
||||
// if (glXGetCurrentContext() == glxContext) {
|
||||
// if currently used, set to none
|
||||
glXMakeCurrent(XlibDisplay, None, NULL);
|
||||
// glXMakeCurrent(XlibDisplay, None, NULL);
|
||||
// }
|
||||
if (OSDcontext) {
|
||||
glXDestroyContext(XlibDisplay, OSDcontext);
|
||||
@@ -1444,27 +1444,31 @@ struct file {
|
||||
};
|
||||
|
||||
typedef struct priv {
|
||||
#if PL_API_VER >= 229
|
||||
const struct pl_gpu_t *gpu;
|
||||
const struct pl_vulkan_t *vk;
|
||||
const struct pl_vk_inst_t *vk_inst;
|
||||
#else
|
||||
const struct pl_gpu *gpu;
|
||||
const struct pl_vulkan *vk;
|
||||
const struct pl_vk_inst *vk_inst;
|
||||
struct pl_context *ctx;
|
||||
#if PL_API_VER >= 113
|
||||
struct pl_custom_lut *lut;
|
||||
#endif
|
||||
struct pl_context *ctx;
|
||||
struct pl_custom_lut *lut;
|
||||
struct pl_renderer *renderer;
|
||||
struct pl_renderer *renderertest;
|
||||
const struct pl_swapchain *swapchain;
|
||||
struct pl_context_params context;
|
||||
// struct pl_frame r_target;
|
||||
// struct pl_render_params r_params;
|
||||
// struct pl_tex final_fbo;
|
||||
struct pl_log_params context;
|
||||
#ifndef PLACEBO_GL
|
||||
VkSurfaceKHR pSurface;
|
||||
#endif
|
||||
// VkSemaphore sig_in;
|
||||
int has_dma_buf;
|
||||
#ifdef PLACEBO_GL
|
||||
#if PL_API_VER >= 229
|
||||
struct pl_opengl_t *gl;
|
||||
#else
|
||||
struct pl_opengl *gl;
|
||||
#endif
|
||||
#endif
|
||||
const struct pl_hook *hook[NUM_SHADERS];
|
||||
int num_shaders;
|
||||
@@ -1473,6 +1477,9 @@ typedef struct priv {
|
||||
|
||||
static priv *p;
|
||||
static struct pl_overlay osdoverlay;
|
||||
#if PL_API_VER >= 229
|
||||
static struct pl_overlay_part part;
|
||||
#endif
|
||||
|
||||
static int semid;
|
||||
struct itimerval itimer;
|
||||
@@ -1866,7 +1873,6 @@ static bool create_context_cb(EGLDisplay display, int es_version, EGLContext *ou
|
||||
EGL_NONE};
|
||||
EGLint num_configs = 0;
|
||||
|
||||
#ifndef RASPI
|
||||
attribs = attributes10;
|
||||
*bpp = 10;
|
||||
if (!eglChooseConfig(display, attributes10, NULL, 0,
|
||||
@@ -1879,9 +1885,7 @@ static bool create_context_cb(EGLDisplay display, int es_version, EGLContext *ou
|
||||
&num_configs)) { // try 8 Bit
|
||||
num_configs = 0;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
if (num_configs == 0) {
|
||||
} else if (num_configs == 0) {
|
||||
EglCheck();
|
||||
Debug(3, " 10 Bit egl Failed\n");
|
||||
attribs = attributes8;
|
||||
@@ -2014,22 +2018,20 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream *stream) {
|
||||
Fatal("codec: can't allocate HW video codec context err %04x", i);
|
||||
}
|
||||
#endif
|
||||
#if defined(VAAPI) && !defined(RASPI)
|
||||
#if defined(VAAPI)
|
||||
// if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,
|
||||
// ":0.0" , NULL, 0)) != 0 ) {
|
||||
if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, "/dev/dri/renderD128", NULL, 0)) != 0) {
|
||||
Fatal("codec: can't allocate HW video codec context err %04x", i);
|
||||
}
|
||||
#endif
|
||||
#ifndef RASPI
|
||||
HwDeviceContext = av_buffer_ref(hw_device_ctx);
|
||||
#endif
|
||||
|
||||
if (!(decoder = calloc(1, sizeof(*decoder)))) {
|
||||
Error(_("video/cuvid: out of memory\n"));
|
||||
return NULL;
|
||||
}
|
||||
#if defined(VAAPI) && !defined(RASPI)
|
||||
#if defined(VAAPI)
|
||||
VaDisplay = TO_VAAPI_DEVICE_CTX(HwDeviceContext)->display;
|
||||
decoder->VaDisplay = VaDisplay;
|
||||
#endif
|
||||
@@ -2353,9 +2355,6 @@ void createTextureDst(CuvidDecoder *decoder, int anz, unsigned int size_x, unsig
|
||||
img->repr.alpha = PL_ALPHA_UNKNOWN;
|
||||
img->color.primaries = pl_color_primaries_guess(size_x, size_y); // Gammut overwritten later
|
||||
img->color.transfer = PL_COLOR_TRC_BT_1886; // overwritten later
|
||||
img->color.light = PL_COLOR_LIGHT_SCENE_709_1886; // needs config ???
|
||||
img->color.sig_peak = 0.0f; // needs config ????
|
||||
img->color.sig_avg = 0.0f;
|
||||
img->num_overlays = 0;
|
||||
}
|
||||
NoContext;
|
||||
@@ -2381,13 +2380,19 @@ void generateVAAPIImage(CuvidDecoder *decoder, int index, const AVFrame *frame,
|
||||
return;
|
||||
}
|
||||
// vaSyncSurface(decoder->VaDisplay, (unsigned int)frame->data[3]);
|
||||
|
||||
Lock_and_SharedContext;
|
||||
|
||||
for (n = 0; n < 2; n++) { // Set DMA_BUF from VAAPI decoder to Textures
|
||||
int id = desc.layers[n].object_index[0];
|
||||
int fd = desc.objects[id].fd;
|
||||
uint32_t size = desc.objects[id].size;
|
||||
uint32_t offset = desc.layers[n].offset[0];
|
||||
#if PL_API_VER < 229
|
||||
struct pl_fmt *fmt;
|
||||
#else
|
||||
struct pl_fmt_t *fmt;
|
||||
#endif
|
||||
|
||||
if (fd == -1) {
|
||||
printf("Fehler beim Import von Surface %d\n", index);
|
||||
@@ -2450,7 +2455,9 @@ void generateVAAPIImage(CuvidDecoder *decoder, int index, const AVFrame *frame,
|
||||
|
||||
decoder->pl_frames[index].planes[n].texture = pl_tex_create(p->gpu, &tex_params);
|
||||
}
|
||||
|
||||
Unlock_and_NoContext;
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -2495,21 +2502,12 @@ void createTextureDst(CuvidDecoder *decoder, int anz, unsigned int size_x, unsig
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
#ifdef RASPI
|
||||
if (PixFmt == AV_PIX_FMT_NV12)
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, n == 0 ? size_x : size_x / 2, n == 0 ? size_y : size_y / 2, 0,
|
||||
GL_RED, GL_UNSIGNED_BYTE, NULL);
|
||||
else
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_R16, n == 0 ? size_x : size_x / 2, n == 0 ? size_y : size_y / 2, 0,
|
||||
GL_RED, GL_UNSIGNED_SHORT, NULL);
|
||||
#else
|
||||
if (PixFmt == AV_PIX_FMT_NV12)
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, n == 0 ? GL_R8 : GL_RG8, n == 0 ? size_x : size_x / 2,
|
||||
n == 0 ? size_y : size_y / 2, 0, n == 0 ? GL_RED : GL_RG, GL_UNSIGNED_BYTE, NULL);
|
||||
else
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, n == 0 ? GL_R16 : GL_RG16, n == 0 ? size_x : size_x / 2,
|
||||
n == 0 ? size_y : size_y / 2, 0, n == 0 ? GL_RED : GL_RG, GL_UNSIGNED_SHORT, NULL);
|
||||
#endif
|
||||
SDK_CHECK_ERROR_GL();
|
||||
// register this texture with CUDA
|
||||
#ifdef CUVID
|
||||
@@ -2522,6 +2520,7 @@ void createTextureDst(CuvidDecoder *decoder, int anz, unsigned int size_x, unsig
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
GlxCheck();
|
||||
#ifdef VAAPI
|
||||
@@ -2542,11 +2541,23 @@ void createTextureDst(CuvidDecoder *decoder, int anz, unsigned int size_x, unsig
|
||||
attribs[num_attribs] = EGL_NONE; \
|
||||
} while (0)
|
||||
|
||||
#define ADD_PLANE_ATTRIBS(plane) \
|
||||
do { \
|
||||
ADD_ATTRIB(EGL_DMA_BUF_PLANE##plane##_FD_EXT, desc.objects[desc.layers[n].object_index[plane]].fd); \
|
||||
ADD_ATTRIB(EGL_DMA_BUF_PLANE##plane##_OFFSET_EXT, desc.layers[n].offset[plane]); \
|
||||
ADD_ATTRIB(EGL_DMA_BUF_PLANE##plane##_PITCH_EXT, desc.layers[n].pitch[plane]); \
|
||||
|
||||
#define ADD_DMABUF_PLANE_ATTRIBS(plane, fd, offset, stride) \
|
||||
do { \
|
||||
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _FD_EXT, \
|
||||
fd); \
|
||||
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _OFFSET_EXT, \
|
||||
offset); \
|
||||
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _PITCH_EXT, \
|
||||
stride); \
|
||||
} while (0)
|
||||
|
||||
#define ADD_DMABUF_PLANE_MODIFIERS(plane, mod) \
|
||||
do { \
|
||||
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _MODIFIER_LO_EXT, \
|
||||
(uint32_t) ((mod) & 0xFFFFFFFFlu)); \
|
||||
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _MODIFIER_HI_EXT, \
|
||||
(uint32_t) (((mod) >> 32u) & 0xFFFFFFFFlu)); \
|
||||
} while (0)
|
||||
|
||||
void generateVAAPIImage(CuvidDecoder *decoder, VASurfaceID index, const AVFrame *frame, int image_width,
|
||||
@@ -2555,7 +2566,7 @@ void generateVAAPIImage(CuvidDecoder *decoder, VASurfaceID index, const AVFrame
|
||||
|
||||
uint64_t first_time;
|
||||
|
||||
#if defined(VAAPI) && !defined(RASPI)
|
||||
#if defined(VAAPI)
|
||||
VADRMPRIMESurfaceDescriptor desc;
|
||||
|
||||
vaSyncSurface(decoder->VaDisplay, (VASurfaceID)(uintptr_t)frame->data[3]);
|
||||
@@ -2570,26 +2581,26 @@ void generateVAAPIImage(CuvidDecoder *decoder, VASurfaceID index, const AVFrame
|
||||
// vaSyncSurface(decoder->VaDisplay, (VASurfaceID) (uintptr_t)
|
||||
// frame->data[3]);
|
||||
#endif
|
||||
#ifdef RASPI
|
||||
AVDRMFrameDescriptor desc;
|
||||
|
||||
memcpy(&desc, frame->data[0], sizeof(desc));
|
||||
|
||||
#endif
|
||||
|
||||
pthread_mutex_lock(&OSDMutex);
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext);
|
||||
EglCheck();
|
||||
|
||||
for (int n = 0; n < Planes; n++) {
|
||||
int attribs[20] = {EGL_NONE};
|
||||
uint num_attribs = 0;
|
||||
int fd;
|
||||
int id = desc.layers[n].object_index[0];
|
||||
int fd = desc.objects[id].fd;
|
||||
|
||||
#if defined(VAAPI) && !defined(RASPI)
|
||||
ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, desc.layers[n].drm_format);
|
||||
#if defined(VAAPI)
|
||||
//Debug(3,"Plane %d w %d h %d layers %d planes %d pitch %d format %04x\n",n,image_width,image_height,desc.num_layers,desc.layers[n].num_planes,desc.layers[n].pitch[0],desc.layers[n].drm_format);
|
||||
|
||||
|
||||
ADD_ATTRIB(EGL_WIDTH, n == 0 ? image_width : image_width / 2);
|
||||
ADD_ATTRIB(EGL_HEIGHT, n == 0 ? image_height : image_height / 2);
|
||||
ADD_PLANE_ATTRIBS(0);
|
||||
ADD_DMABUF_PLANE_MODIFIERS(0, desc.objects[id].drm_format_modifier);
|
||||
ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, desc.layers[n].drm_format);
|
||||
ADD_DMABUF_PLANE_ATTRIBS(0, fd, desc.layers[n].offset[0],desc.layers[n].pitch[0]);
|
||||
|
||||
#endif
|
||||
|
||||
decoder->images[index * Planes + n] =
|
||||
@@ -2600,12 +2611,22 @@ void generateVAAPIImage(CuvidDecoder *decoder, VASurfaceID index, const AVFrame
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[index * Planes + n]);
|
||||
EGLImageTargetTexture2DOES(GL_TEXTURE_2D, decoder->images[index * Planes + n]);
|
||||
decoder->fds[index * Planes + n] = desc.objects[n].fd;
|
||||
if (n==0) {
|
||||
decoder->fds[index * Planes + n] = fd;
|
||||
|
||||
}
|
||||
else if (fd == decoder->fds[index * Planes]) {
|
||||
decoder->fds[index * Planes + n] = 0;
|
||||
}
|
||||
else {
|
||||
decoder->fds[index * Planes + n] = fd;
|
||||
}
|
||||
}
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
|
||||
EglCheck();
|
||||
pthread_mutex_unlock(&OSDMutex);
|
||||
return;
|
||||
|
||||
esh_failed:
|
||||
@@ -2614,6 +2635,7 @@ esh_failed:
|
||||
close(desc.objects[n].fd);
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
|
||||
EglCheck();
|
||||
pthread_mutex_unlock(&OSDMutex);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
@@ -2669,8 +2691,6 @@ int push_filters(AVCodecContext *dec_ctx, CuvidDecoder *decoder, AVFrame *frame)
|
||||
while ((ret = av_buffersink_get_frame(decoder->buffersink_ctx, filt_frame)) >= 0) {
|
||||
filt_frame->pts /= 2;
|
||||
decoder->Interlaced = 0;
|
||||
// printf("vaapideint video:new %#012" PRIx64 " old %#012" PRIx64
|
||||
// "\n",filt_frame->pts,frame->pts);
|
||||
CuvidSyncRenderFrame(decoder, dec_ctx, filt_frame);
|
||||
filt_frame = av_frame_alloc(); // get new frame
|
||||
}
|
||||
@@ -2946,19 +2966,6 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder *decoder, AVCodecContext
|
||||
#ifdef PLACEBO
|
||||
VideoThreadUnlock();
|
||||
// dont show first frame
|
||||
#endif
|
||||
#ifdef YADIF
|
||||
if (VideoDeinterlace[decoder->Resolution] == VideoDeinterlaceYadif) {
|
||||
deint = 0;
|
||||
ist->filter = 1; // init yadif_cuda
|
||||
} else {
|
||||
deint = 2;
|
||||
ist->filter = 0;
|
||||
}
|
||||
CuvidMessage(2, "deint = %s\n", deint == 0 ? "Yadif" : "Cuda");
|
||||
if (av_opt_set_int(video_ctx->priv_data, "deint", deint, 0) < 0) { // adaptive
|
||||
Fatal(_("codec: can't set option deint to video codec!\n"));
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
decoder->SyncCounter = 0;
|
||||
@@ -2972,6 +2979,20 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder *decoder, AVCodecContext
|
||||
CuvidUpdateOutput(decoder); // update aspect/scaling
|
||||
}
|
||||
|
||||
#if defined YADIF && defined CUVID
|
||||
if (VideoDeinterlace[decoder->Resolution] == VideoDeinterlaceYadif) {
|
||||
deint = 0;
|
||||
ist->filter = 1; // init yadif_cuda
|
||||
} else {
|
||||
deint = 2;
|
||||
ist->filter = 0;
|
||||
}
|
||||
CuvidMessage(2, "deint = %s\n", deint == 0 ? "Yadif" : "Cuda");
|
||||
if (av_opt_set_int(video_ctx->priv_data, "deint", deint, 0) < 0) { // adaptive
|
||||
Fatal(_("codec: can't set option deint to video codec!\n"));
|
||||
}
|
||||
#endif
|
||||
|
||||
CuvidMessage(2, "GetFormat Init ok %dx%d\n", video_ctx->width, video_ctx->height);
|
||||
decoder->InputAspect = video_ctx->sample_aspect_ratio;
|
||||
#ifdef CUVID
|
||||
@@ -3060,18 +3081,10 @@ int get_RGB(CuvidDecoder *decoder) {
|
||||
glUniform1i(texLoc, 0);
|
||||
texLoc = glGetUniformLocation(gl_prog, "texture1");
|
||||
glUniform1i(texLoc, 1);
|
||||
#ifdef RASPI
|
||||
texLoc = glGetUniformLocation(gl_prog, "texture2");
|
||||
glUniform1i(texLoc, 2);
|
||||
#endif
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * Planes + 0]);
|
||||
glActiveTexture(GL_TEXTURE1);
|
||||
glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * Planes + 1]);
|
||||
#ifdef RASPI
|
||||
glActiveTexture(GL_TEXTURE2);
|
||||
glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * Planes + 2]);
|
||||
#endif
|
||||
glBindFramebuffer(GL_FRAMEBUFFER, fb);
|
||||
|
||||
render_pass_quad(1, 0.0, 0.0);
|
||||
@@ -3124,7 +3137,7 @@ int get_RGB(CuvidDecoder *decoder) {
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
}
|
||||
glFlush();
|
||||
//Debug(3, "Read pixels %d %d\n", width, height);
|
||||
// Debug(3, "Read pixels %d %d\n", width, height);
|
||||
|
||||
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
|
||||
glPixelStorei(GL_PACK_ALIGNMENT, 1);
|
||||
@@ -3183,13 +3196,11 @@ int get_RGB(CuvidDecoder *decoder) {
|
||||
target.repr.bits.bit_shift = 0;
|
||||
target.color.primaries = PL_COLOR_PRIM_BT_709;
|
||||
target.color.transfer = PL_COLOR_TRC_BT_1886;
|
||||
target.color.light = PL_COLOR_LIGHT_DISPLAY;
|
||||
target.color.sig_peak = 0;
|
||||
target.color.sig_avg = 0;
|
||||
|
||||
if (ovl) {
|
||||
target.overlays = ovl;
|
||||
target.num_overlays = 1;
|
||||
#if PL_API_VER < 229
|
||||
#ifdef PLACEBO_GL
|
||||
x0 = ovl->rect.x0;
|
||||
y1 = ovl->rect.y0;
|
||||
@@ -3209,6 +3220,28 @@ int get_RGB(CuvidDecoder *decoder) {
|
||||
ovl->rect.x1 = (float)x1 * faktorx;
|
||||
ovl->rect.y1 = (float)y1 * faktory;
|
||||
#endif
|
||||
#else
|
||||
#ifdef PLACEBO_GL
|
||||
x0 = part.dst.x0;
|
||||
y1 = part.dst.y0;
|
||||
x1 = part.dst.x1;
|
||||
y0 = part.dst.y1;
|
||||
part.dst.x0 = (float)x0 * faktorx;
|
||||
part.dst.y0 = (float)y0 * faktory;
|
||||
part.dst.x1 = (float)x1 * faktorx;
|
||||
part.dst.y1 = (float)y1 * faktory;
|
||||
#else
|
||||
x0 = part.dst.x0;
|
||||
y0 = part.dst.y0;
|
||||
x1 = part.dst.x1;
|
||||
y1 = part.dst.y1;
|
||||
part.dst.x0 = (float)x0 * faktorx;
|
||||
part.dst.y0 = (float)y0 * faktory;
|
||||
part.dst.x1 = (float)x1 * faktorx;
|
||||
part.dst.y1 = (float)y1 * faktory;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
} else {
|
||||
target.overlays = 0;
|
||||
@@ -3222,6 +3255,7 @@ int get_RGB(CuvidDecoder *decoder) {
|
||||
pl_gpu_finish(p->gpu);
|
||||
|
||||
if (ovl) {
|
||||
#if PL_API_VER < 229
|
||||
#ifdef PLACEBO_GL
|
||||
ovl->rect.x0 = x0;
|
||||
ovl->rect.y0 = y1;
|
||||
@@ -3232,6 +3266,19 @@ int get_RGB(CuvidDecoder *decoder) {
|
||||
ovl->rect.y0 = y0;
|
||||
ovl->rect.x1 = x1;
|
||||
ovl->rect.y1 = y1;
|
||||
#endif
|
||||
#else
|
||||
#ifdef PLACEBO_GL
|
||||
part.dst.x0 = x0;
|
||||
part.dst.y0 = y1;
|
||||
part.dst.x1 = x1;
|
||||
part.dst.y1 = y0;
|
||||
#else
|
||||
part.dst.x0 = x0;
|
||||
part.dst.y0 = y0;
|
||||
part.dst.x1 = x1;
|
||||
part.dst.y1 = y1;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -3499,37 +3546,14 @@ static void CuvidRenderFrame(CuvidDecoder *decoder, const AVCodecContext *video_
|
||||
if (frame->color_trc == AVCOL_TRC_RESERVED0) // cuvid decoder failure with SD channels
|
||||
frame->color_trc = AVCOL_TRC_SMPTE170M;
|
||||
|
||||
// printf("Patched colorspace %d Primaries %d TRC
|
||||
// %d\n",frame->colorspace,frame->color_primaries,frame->color_trc);
|
||||
#ifdef RASPI
|
||||
//
|
||||
// Check image, format, size
|
||||
//
|
||||
if ( // decoder->PixFmt != video_ctx->pix_fmt
|
||||
video_ctx->width != decoder->InputWidth
|
||||
// || decoder->ColorSpace != color
|
||||
|| video_ctx->height != decoder->InputHeight) {
|
||||
Debug(3, "fmt %02d:%02d width %d:%d hight %d:%d\n", decoder->ColorSpace, frame->colorspace, video_ctx->width,
|
||||
decoder->InputWidth, video_ctx->height, decoder->InputHeight);
|
||||
decoder->PixFmt = AV_PIX_FMT_NV12;
|
||||
decoder->InputWidth = video_ctx->width;
|
||||
decoder->InputHeight = video_ctx->height;
|
||||
CuvidCleanup(decoder);
|
||||
decoder->SurfacesNeeded = VIDEO_SURFACES_MAX + 1;
|
||||
CuvidSetupOutput(decoder);
|
||||
}
|
||||
#endif
|
||||
// printf("Patched colorspace %d Primaries %d TRC
|
||||
// %d\n",frame->colorspace,frame->color_primaries,frame->color_trc);
|
||||
//
|
||||
// Copy data from frame to image
|
||||
//
|
||||
#ifdef RASPI
|
||||
if (video_ctx->pix_fmt == 0) {
|
||||
#else
|
||||
if (video_ctx->pix_fmt == PIXEL_FORMAT) {
|
||||
#endif
|
||||
int w = decoder->InputWidth;
|
||||
int h = decoder->InputHeight;
|
||||
|
||||
decoder->ColorSpace = color; // save colorspace
|
||||
decoder->trc = frame->color_trc;
|
||||
decoder->color_primaries = frame->color_primaries;
|
||||
@@ -3557,8 +3581,13 @@ static void CuvidRenderFrame(CuvidDecoder *decoder, const AVCodecContext *video_
|
||||
// %p\n",surface,decoder->pl_frames[surface].planes[0].texture,decoder->pl_frames[surface].planes[1].texture);
|
||||
bool ok = pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params){
|
||||
.tex = decoder->pl_frames[surface].planes[0].texture,
|
||||
#if PL_API_VER < 292
|
||||
.stride_w = output->linesize[0],
|
||||
.stride_h = h,
|
||||
#else
|
||||
.row_pitch = output->linesize[0],
|
||||
.depth_pitch = h,
|
||||
#endif
|
||||
.ptr = output->data[0],
|
||||
.rc.x1 = w,
|
||||
.rc.y1 = h,
|
||||
@@ -3566,8 +3595,13 @@ static void CuvidRenderFrame(CuvidDecoder *decoder, const AVCodecContext *video_
|
||||
});
|
||||
ok &= pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params){
|
||||
.tex = decoder->pl_frames[surface].planes[1].texture,
|
||||
#if PL_API_VER < 292
|
||||
.stride_w = output->linesize[0] / 2,
|
||||
.stride_h = h / 2,
|
||||
#else
|
||||
.row_pitch = output->linesize[0] / 2,
|
||||
.depth_pitch = h,
|
||||
#endif
|
||||
.ptr = output->data[1],
|
||||
.rc.x1 = w / 2,
|
||||
.rc.y1 = h / 2,
|
||||
@@ -3750,7 +3784,6 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
struct pl_cone_params cone;
|
||||
struct pl_tex_vk *vkp;
|
||||
struct pl_plane *pl;
|
||||
const struct pl_fmt *fmt;
|
||||
struct pl_tex *tex0, *tex1;
|
||||
|
||||
struct pl_frame *img;
|
||||
@@ -3768,6 +3801,7 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
AVFrameSideData *sd, *sd1 = NULL, *sd2 = NULL;
|
||||
|
||||
#ifdef PLACEBO
|
||||
|
||||
if (level) {
|
||||
dst_rect.x0 = decoder->VideoX; // video window output (clip)
|
||||
dst_rect.y0 = decoder->VideoY;
|
||||
@@ -3821,19 +3855,11 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
glUniform1i(texLoc, 0);
|
||||
texLoc = glGetUniformLocation(gl_prog, "texture1");
|
||||
glUniform1i(texLoc, 1);
|
||||
#ifdef RASPI
|
||||
texLoc = glGetUniformLocation(gl_prog, "texture2");
|
||||
glUniform1i(texLoc, 2);
|
||||
#endif
|
||||
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * Planes + 0]);
|
||||
glActiveTexture(GL_TEXTURE1);
|
||||
glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * Planes + 1]);
|
||||
#ifdef RASPI
|
||||
glActiveTexture(GL_TEXTURE2);
|
||||
glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * Planes + 2]);
|
||||
#endif
|
||||
|
||||
render_pass_quad(0, xcropf, ycropf);
|
||||
|
||||
@@ -3848,6 +3874,12 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
memcpy(&render_params, &pl_render_default_params, sizeof(render_params));
|
||||
render_params.deband_params = &deband;
|
||||
|
||||
// provide LUT Table
|
||||
if (LUTon)
|
||||
render_params.lut = p->lut;
|
||||
else
|
||||
render_params.lut = NULL;
|
||||
|
||||
frame = decoder->frames[current];
|
||||
|
||||
// Fix Color Parameters
|
||||
@@ -3858,7 +3890,6 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
memcpy(&img->repr, &pl_color_repr_sdtv, sizeof(struct pl_color_repr));
|
||||
img->color.primaries = PL_COLOR_PRIM_BT_601_625;
|
||||
img->color.transfer = PL_COLOR_TRC_BT_1886;
|
||||
img->color.light = PL_COLOR_LIGHT_DISPLAY;
|
||||
pl->shift_x = 0.0f;
|
||||
break;
|
||||
case AVCOL_SPC_BT709:
|
||||
@@ -3872,9 +3903,10 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
memcpy(&img->repr, &pl_color_repr_uhdtv, sizeof(struct pl_color_repr));
|
||||
memcpy(&img->color, &pl_color_space_bt2020_hlg, sizeof(struct pl_color_space));
|
||||
deband.grain = 0.0f; // no grain in HDR
|
||||
img->color.sig_scale = 1.0f;
|
||||
pl->shift_x = -0.5f;
|
||||
|
||||
// Kein LUT bei HDR
|
||||
render_params.lut = NULL;
|
||||
#if 0
|
||||
if ((sd = av_frame_get_side_data(frame, AV_FRAME_DATA_ICC_PROFILE))) {
|
||||
img->profile = (struct pl_icc_profile){
|
||||
.data = sd->data,
|
||||
@@ -3903,13 +3935,14 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
// Make sure this value is more or less legal
|
||||
if (img->color.sig_peak < 1.0 || img->color.sig_peak > 50.0)
|
||||
img->color.sig_peak = 0.0;
|
||||
|
||||
#endif
|
||||
#if defined VAAPI || defined USE_DRM
|
||||
render_params.peak_detect_params = NULL;
|
||||
render_params.deband_params = NULL;
|
||||
render_params.dither_params = NULL;
|
||||
render_params.skip_anti_aliasing = true;
|
||||
#endif
|
||||
|
||||
break;
|
||||
|
||||
default: // fallback
|
||||
@@ -3943,7 +3976,6 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
if (decoder->ColorSpace == AVCOL_SPC_BT470BG) {
|
||||
target->color.primaries = PL_COLOR_PRIM_BT_601_625;
|
||||
target->color.transfer = PL_COLOR_TRC_BT_1886;
|
||||
target->color.light = PL_COLOR_LIGHT_DISPLAY;
|
||||
} else {
|
||||
memcpy(&target->color, &pl_color_space_bt709, sizeof(struct pl_color_space));
|
||||
}
|
||||
@@ -3955,7 +3987,6 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
} else if (decoder->ColorSpace == AVCOL_SPC_BT470BG) {
|
||||
target->color.primaries = PL_COLOR_PRIM_BT_601_625;
|
||||
target->color.transfer = PL_COLOR_TRC_BT_1886;
|
||||
target->color.light = PL_COLOR_LIGHT_DISPLAY;
|
||||
;
|
||||
} else {
|
||||
memcpy(&target->color, &pl_color_space_bt709, sizeof(struct pl_color_space));
|
||||
@@ -3983,8 +4014,6 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
}
|
||||
#endif
|
||||
|
||||
// printf("sys %d prim %d trc %d light
|
||||
// %d\n",img->repr.sys,img->color.primaries,img->color.transfer,img->color.light);
|
||||
// Source crop
|
||||
if (VideoScalerTest) { // right side defined scaler
|
||||
|
||||
@@ -4059,8 +4088,11 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
|
||||
// render_params.upscaler = &pl_filter_ewa_lanczos;
|
||||
|
||||
render_params.upscaler = pl_named_filters[VideoScaling[decoder->Resolution]].filter;
|
||||
render_params.downscaler = pl_named_filters[VideoScaling[decoder->Resolution]].filter;
|
||||
render_params.upscaler = pl_filter_presets[VideoScaling[decoder->Resolution]].filter;
|
||||
render_params.downscaler = pl_filter_presets[VideoScaling[decoder->Resolution]].filter;
|
||||
|
||||
if (level)
|
||||
render_params.skip_target_clearing = 1;
|
||||
|
||||
render_params.color_adjustment = &colors;
|
||||
|
||||
@@ -4108,13 +4140,9 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
render_params.num_hooks = p->num_shaders;
|
||||
}
|
||||
#endif
|
||||
#if PL_API_VER >= 113
|
||||
// provide LUT Table
|
||||
if (LUTon)
|
||||
render_params.lut = p->lut;
|
||||
else
|
||||
render_params.lut = NULL;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
if (decoder->newchannel && current == 0) {
|
||||
colors.brightness = -1.0f;
|
||||
@@ -4131,7 +4159,8 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
if (!pl_render_image(p->renderer, &decoder->pl_frames[current], target, &render_params)) {
|
||||
Debug(4, "Failed rendering frame!\n");
|
||||
}
|
||||
// pl_gpu_finish(p->gpu);
|
||||
|
||||
|
||||
// printf("Rendertime %ld -- \n,",GetusTicks() - tt);
|
||||
|
||||
if (VideoScalerTest) { // left side test scaler
|
||||
@@ -4154,11 +4183,12 @@ static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int lev
|
||||
target->crop.y1 = dst_video_rect.y1;
|
||||
#endif
|
||||
|
||||
render_params.upscaler = pl_named_filters[VideoScalerTest - 1].filter;
|
||||
render_params.downscaler = pl_named_filters[VideoScalerTest - 1].filter;
|
||||
render_params.upscaler = pl_filter_presets[VideoScalerTest - 1].filter;
|
||||
render_params.downscaler = pl_filter_presets[VideoScalerTest - 1].filter;
|
||||
|
||||
// render_params.lut = NULL;
|
||||
render_params.num_hooks = 0;
|
||||
render_params.skip_target_clearing = 1;
|
||||
|
||||
if (!p->renderertest)
|
||||
p->renderertest = pl_renderer_create(p->ctx, p->gpu);
|
||||
@@ -4185,11 +4215,16 @@ void make_osd_overlay(int x, int y, int width, int height) {
|
||||
|
||||
pl = &osdoverlay;
|
||||
|
||||
#if PL_API_VER < 229
|
||||
if (pl->plane.texture && (pl->plane.texture->params.w != width || pl->plane.texture->params.h != height)) {
|
||||
// pl_tex_clear(p->gpu, pl->plane.texture, (float[4]) { 0 });
|
||||
pl_tex_destroy(p->gpu, &pl->plane.texture);
|
||||
#else
|
||||
if (pl->tex && (pl->tex->params.w != width || pl->tex->params.h != height)) {
|
||||
pl_tex_destroy(p->gpu, &pl->tex);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if PL_API_VER < 229
|
||||
// make texture for OSD
|
||||
if (pl->plane.texture == NULL) {
|
||||
pl->plane.texture = pl_tex_create(
|
||||
@@ -4210,12 +4245,29 @@ void make_osd_overlay(int x, int y, int width, int height) {
|
||||
pl->plane.component_mapping[1] = PL_CHANNEL_G;
|
||||
pl->plane.component_mapping[2] = PL_CHANNEL_B;
|
||||
pl->plane.component_mapping[3] = PL_CHANNEL_A;
|
||||
#else
|
||||
// make texture for OSD
|
||||
if (pl->tex == NULL) {
|
||||
pl->tex = pl_tex_create(
|
||||
p->gpu, &(struct pl_tex_params) {
|
||||
.w = width, .h = height, .d = 0, .format = fmt, .sampleable = true, .host_writable = true,
|
||||
.blit_dst = true,
|
||||
});
|
||||
}
|
||||
// make overlay
|
||||
pl_tex_clear(p->gpu, pl->tex, (float[4]){0});
|
||||
part.src.x0 = 0.0f;
|
||||
part.src.y0 = 0.0f;
|
||||
part.src.x1 = width;
|
||||
part.src.y1 = height;
|
||||
#endif
|
||||
pl->mode = PL_OVERLAY_NORMAL;
|
||||
pl->repr.sys = PL_COLOR_SYSTEM_RGB;
|
||||
pl->repr.levels = PL_COLOR_LEVELS_PC;
|
||||
pl->repr.alpha = PL_ALPHA_INDEPENDENT;
|
||||
|
||||
memcpy(&osdoverlay.color, &pl_color_space_srgb, sizeof(struct pl_color_space));
|
||||
#if PL_API_VER < 229
|
||||
#ifdef PLACEBO_GL
|
||||
pl->rect.x0 = x;
|
||||
pl->rect.y1 = VideoWindowHeight - y; // Boden von oben
|
||||
@@ -4228,6 +4280,21 @@ void make_osd_overlay(int x, int y, int width, int height) {
|
||||
pl->rect.x1 = x + width;
|
||||
pl->rect.y1 = VideoWindowHeight - height - y + offset;
|
||||
#endif
|
||||
#else
|
||||
osdoverlay.parts = ∂
|
||||
osdoverlay.num_parts = 1;
|
||||
#ifdef PLACEBO_GL
|
||||
part.dst.x0 = x;
|
||||
part.dst.y1 = VideoWindowHeight - y; // Boden von oben
|
||||
part.dst.x1 = x + width;
|
||||
part.dst.y0 = VideoWindowHeight - height - y;
|
||||
#else
|
||||
part.dst.x0 = x;
|
||||
part.dst.y0 = VideoWindowHeight - y + offset; // Boden von oben
|
||||
part.dst.x1 = x + width;
|
||||
part.dst.y1 = VideoWindowHeight - height - y + offset;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
///
|
||||
@@ -4271,12 +4338,12 @@ static void CuvidDisplayFrame(void) {
|
||||
#else
|
||||
eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext);
|
||||
EglCheck();
|
||||
|
||||
#ifndef USE_DRM
|
||||
|
||||
#ifndef USE_DRM
|
||||
usleep(5000);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
glClear(GL_COLOR_BUFFER_BIT);
|
||||
|
||||
#else // PLACEBO
|
||||
@@ -4360,24 +4427,29 @@ static void CuvidDisplayFrame(void) {
|
||||
if ((VideoShowBlackPicture && !decoder->TrickSpeed) ||
|
||||
(VideoShowBlackPicture && decoder->Closing < -300)) {
|
||||
CuvidBlackSurface(decoder);
|
||||
CuvidMessage(4, "video/cuvid: black surface displayed\n");
|
||||
CuvidMessage(4, "video/cuvid: black surface displayed Filled %d\n",filled);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
valid_frame = 1;
|
||||
#ifdef PLACEBO
|
||||
//pthread_mutex_lock(&OSDMutex);
|
||||
if (OsdShown == 1) { // New OSD opened
|
||||
pthread_mutex_lock(&OSDMutex);
|
||||
|
||||
make_osd_overlay(OSDx, OSDy, OSDxsize, OSDysize);
|
||||
if (posd) {
|
||||
pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params){
|
||||
// upload OSD
|
||||
#if PL_API_VER >= 229
|
||||
.tex = osdoverlay.tex,
|
||||
#else
|
||||
.tex = osdoverlay.plane.texture,
|
||||
#endif
|
||||
.ptr = posd,
|
||||
});
|
||||
}
|
||||
OsdShown = 2;
|
||||
pthread_mutex_unlock(&OSDMutex);
|
||||
|
||||
}
|
||||
|
||||
if (OsdShown == 2) {
|
||||
@@ -4385,7 +4457,7 @@ static void CuvidDisplayFrame(void) {
|
||||
} else {
|
||||
CuvidMixVideo(decoder, i, &target, NULL);
|
||||
}
|
||||
|
||||
//pthread_mutex_unlock(&OSDMutex);
|
||||
#else
|
||||
CuvidMixVideo(decoder, i);
|
||||
#endif
|
||||
@@ -4402,6 +4474,9 @@ static void CuvidDisplayFrame(void) {
|
||||
decoder->grab = 0;
|
||||
}
|
||||
}
|
||||
#ifdef PLACEBO
|
||||
pl_gpu_finish(p->gpu);
|
||||
#endif
|
||||
|
||||
#ifndef PLACEBO
|
||||
// add osd to surface
|
||||
@@ -4467,10 +4542,15 @@ static void CuvidDisplayFrame(void) {
|
||||
#if defined PLACEBO // && !defined PLACEBO_GL
|
||||
// first_time = GetusTicks();
|
||||
if (!pl_swapchain_submit_frame(p->swapchain))
|
||||
Fatal(_("Failed to submit swapchain buffer\n"));
|
||||
pl_swapchain_swap_buffers(p->swapchain); // swap buffers
|
||||
NoContext;
|
||||
Fatal(_("Failed to submit swapchain buffer\n"));
|
||||
VideoThreadUnlock();
|
||||
pl_swapchain_swap_buffers(p->swapchain); // swap buffers
|
||||
#ifdef PLACEBO_GL
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
|
||||
EglCheck();
|
||||
#endif
|
||||
|
||||
|
||||
#else // not PLACEBO
|
||||
#ifdef CUVID
|
||||
glXGetVideoSyncSGI(&Count); // get current frame
|
||||
@@ -4813,7 +4893,7 @@ static void CuvidSyncRenderFrame(CuvidDecoder *decoder, const AVCodecContext *vi
|
||||
// if video output buffer is full, wait and display surface.
|
||||
// loop for interlace
|
||||
if (atomic_read(&decoder->SurfacesFilled) >= VIDEO_SURFACES_MAX) {
|
||||
Fatal("video/cuvid: this code part shouldn't be used\n");
|
||||
//Fatal("video/cuvid: this code part shouldn't be used\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -5517,7 +5597,7 @@ void InitPlacebo() {
|
||||
p->context.log_cb = &pl_log_intern;
|
||||
p->context.log_level = PL_LOG_WARN; // WARN
|
||||
|
||||
p->ctx = pl_context_create(PL_API_VER, &p->context);
|
||||
p->ctx = pl_log_create(PL_API_VER, &p->context);
|
||||
if (!p->ctx) {
|
||||
Fatal(_("Failed initializing libplacebo\n"));
|
||||
}
|
||||
@@ -5595,7 +5675,9 @@ void InitPlacebo() {
|
||||
.surface = p->pSurface,
|
||||
.present_mode = VK_PRESENT_MODE_FIFO_KHR,
|
||||
.swapchain_depth = SWAP_BUFFER_SIZE,
|
||||
#if PL_API_VER < 229
|
||||
.prefer_hdr = true,
|
||||
#endif
|
||||
});
|
||||
|
||||
#endif
|
||||
@@ -5677,8 +5759,13 @@ void exit_display() {
|
||||
return;
|
||||
}
|
||||
pl_gpu_finish(p->gpu);
|
||||
#if PL_API_VER >= 229
|
||||
if (osdoverlay.tex)
|
||||
pl_tex_destroy(p->gpu, &osdoverlay.tex);
|
||||
#else
|
||||
if (osdoverlay.plane.texture)
|
||||
pl_tex_destroy(p->gpu, &osdoverlay.plane.texture);
|
||||
#endif
|
||||
|
||||
// pl_renderer_destroy(&p->renderer);
|
||||
if (p->renderertest) {
|
||||
@@ -5696,7 +5783,7 @@ void exit_display() {
|
||||
pl_vk_inst_destroy(&p->vk_inst);
|
||||
#endif
|
||||
|
||||
pl_context_destroy(&p->ctx);
|
||||
pl_log_destroy(&p->ctx);
|
||||
#if PL_API_VER >= 113
|
||||
pl_lut_free(&p->lut);
|
||||
#endif
|
||||
@@ -6813,10 +6900,10 @@ void VideoSetAbove() {
|
||||
void VideoSetDeinterlace(int mode[VideoResolutionMax]) {
|
||||
#ifdef CUVID
|
||||
VideoDeinterlace[0] = mode[0]; // 576i
|
||||
VideoDeinterlace[1] = 1; // mode[1]; // 720p
|
||||
VideoDeinterlace[1] = 0; // mode[1]; // 720p
|
||||
VideoDeinterlace[2] = mode[2]; // fake 1080
|
||||
VideoDeinterlace[3] = mode[3]; // 1080
|
||||
VideoDeinterlace[4] = 1; // mode[4]; 2160p
|
||||
VideoDeinterlace[4] = 0; // mode[4]; 2160p
|
||||
#else
|
||||
VideoDeinterlace[0] = 1; // 576i
|
||||
VideoDeinterlace[1] = 0; // mode[1]; // 720p
|
||||
@@ -7022,14 +7109,13 @@ void VideoInit(const char *display_name) {
|
||||
if (!display_name && !(display_name = getenv("DISPLAY"))) {
|
||||
// if no environment variable, use :0.0 as default display name
|
||||
display_name = ":0.0";
|
||||
|
||||
}
|
||||
if (!getenv("DISPLAY")) {
|
||||
//force set DISPLAY environment variable, otherwise nvidia driver
|
||||
//has problems at libplace-swapchain-init
|
||||
Debug(3, "video: setting ENV DISPLAY=%s\n",display_name);
|
||||
setenv("DISPLAY",display_name,0);
|
||||
//Debug(3, "video: ENV:(%s)\n",getenv("DISPLAY"));
|
||||
// force set DISPLAY environment variable, otherwise nvidia driver
|
||||
// has problems at libplace-swapchain-init
|
||||
Debug(3, "video: setting ENV DISPLAY=%s\n", display_name);
|
||||
setenv("DISPLAY", display_name, 0);
|
||||
// Debug(3, "video: ENV:(%s)\n",getenv("DISPLAY"));
|
||||
}
|
||||
|
||||
if (!(XlibDisplay = XOpenDisplay(display_name))) {
|
||||
@@ -7212,16 +7298,16 @@ int GlxInitopengl() {
|
||||
if (!eglOSDContext) {
|
||||
EglCheck();
|
||||
Fatal(_("video/egl: can't create thread egl context\n"));
|
||||
return NULL;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglOSDContext);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int GlxDrawopengl() {
|
||||
eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void GlxDestroy() {
|
||||
|
Reference in New Issue
Block a user