From 4d2735a971a87d388fc638820c8c2d3ddda725b0 Mon Sep 17 00:00:00 2001 From: jojo61 Date: Fri, 4 Oct 2019 10:37:57 +0200 Subject: [PATCH] Vaapi changed to egl --- Makefile | 48 +- audio.c | 1222 +++++++++++++++++++++--------------------- codec.c | 290 +++++----- openglosd.cpp | 82 ++- shaders.h | 196 +++++-- softhddev.c | 135 ++--- video.c | 1426 ++++++++++++++++++++++++++++--------------------- 7 files changed, 1895 insertions(+), 1504 deletions(-) diff --git a/Makefile b/Makefile index e4797b6..2460c5d 100644 --- a/Makefile +++ b/Makefile @@ -7,9 +7,9 @@ # This name will be used in the '-P...' option of VDR to load the plugin. # By default the main source file also carries this name. -PLUGIN = softhdcuvid ### Configuration (edit this for your needs) +# comment out if not needed # what kind of driver do we make - # if VAAPI is enabled the drivername is softhdvaapi @@ -17,18 +17,30 @@ PLUGIN = softhdcuvid #VAAPI=1 CUVID=1 +# use libplacebo - available for both drivers +#LIBPLACEBO=1 + +# use YADIF deint - only available with cuvid +#YADIF=1 + + + + + + + + -# support OPENGLOSD - only configurable with cuvid -OPENGLOSD=1 -# use Libplacebo - only configurable with cuvid -LIBPLACEBO=1 -# use YADIF deint - only configurable with cuvid -YADIF=0 #--------------------- no more config needed past this point-------------------------------- - # support alsa audio output module +PLUGIN = softhdcuvid + +# support OPENGLOSD always needed +OPENGLOSD=1 + +# support alsa audio output module ALSA ?= $(shell pkg-config --exists alsa && echo 1) # support OSS audio output module OSS ?= 1 @@ -48,7 +60,7 @@ SWRESAMPLE = 1 #AVRESAMPLE = 1 #endif -CONFIG := -DDEBUG #-DOSD_DEBUG # enable debug output+functions +CONFIG := #-DDEBUG #-DOSD_DEBUG # enable debug output+functions CONFIG += -DHAVE_GL # needed for mpv libs #CONFIG += -DSTILL_DEBUG=2 # still picture debug verbose level CONFIG += -DAV_INFO -DAV_INFO_TIME=3000 # info/debug a/v sync @@ -114,8 +126,8 @@ CONFIG += -DUSE_OSS endif ifeq ($(OPENGL),1) -_CFLAGS += $(shell pkg-config --cflags libva-glx) -LIBS += $(shell pkg-config --libs libva-glx) +#_CFLAGS += $(shell pkg-config --cflags libva-glx) +#LIBS += $(shell pkg-config --libs libva-glx) endif ifeq ($(OPENGLOSD),1) @@ -125,17 +137,16 @@ endif ifeq ($(OPENGL),1) CONFIG += -DUSE_GLX _CFLAGS += $(shell pkg-config --cflags gl glu glew) -LIBS += $(shell pkg-config --libs gl glu glew) -_CFLAGS += $(shell pkg-config --cflags glew) -LIBS += $(shell pkg-config --libs glew) +#LIBS += $(shell pkg-config --libs glu glew) _CFLAGS += $(shell pkg-config --cflags freetype2) LIBS += $(shell pkg-config --libs freetype2) endif ifeq ($(VAAPI),1) -CONFIG += -DVAAPI -DUSE_OPENGLOSD -LIBPLACEBO=1 +CONFIG += -DVAAPI +#LIBPLACEBO=1 PLUGIN = softhdvaapi +LIBS += -lEGL -lEGL_mesa endif ifeq ($(LIBPLACEBO),1) @@ -144,6 +155,7 @@ endif ifeq ($(CUVID),1) CONFIG += -DCUVID # enable CUVID decoder +LIBS += -lEGL -lGL ifeq ($(YADIF),1) CONFIG += -DYADIF # Yadif only with CUVID endif @@ -235,10 +247,10 @@ LIBS += -lplacebo -lglut endif ifeq ($(CUVID),1) -LIBS += -lcuda -L/usr/local/cuda/targets/x86_64-linux/lib -lcudart -lnvcuvid +LIBS += -lcuda -L/usr/local/cuda/targets/x86_64-linux/lib -lcudart -lnvcuvid endif -LIBS += -lGLEW -lGLX -ldl +LIBS += -lGLEW -lGLU -ldl ### Includes and Defines (add further entries here): INCLUDES += diff --git a/audio.c b/audio.c index bf56c21..f61484c 100644 --- a/audio.c +++ b/audio.c @@ -143,6 +143,7 @@ static volatile char AudioRunning; ///< thread running / stopped static volatile char AudioPaused; ///< audio paused static volatile char AudioVideoIsReady; ///< video ready start early static int AudioSkip; ///< skip audio to sync to video +int AudioDelay; /// delay audio to sync to video static const int AudioBytesProSample = 2; ///< number of bytes per sample @@ -662,26 +663,26 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough) // search supported sample-rates for (u = 0; u < AudioRatesMax; ++u) { - if (AudioRatesTable[u] == sample_rate) { - goto found; - } - if (AudioRatesTable[u] > sample_rate) { - break; - } + if (AudioRatesTable[u] == sample_rate) { + goto found; + } + if (AudioRatesTable[u] > sample_rate) { + break; + } } Error(_("audio: %dHz sample-rate unsupported\n"), sample_rate); return -1; // unsupported sample-rate found: if (!AudioChannelMatrix[u][channels]) { - Error(_("audio: %d channels unsupported\n"), channels); - return -1; // unsupported nr. of channels + Error(_("audio: %d channels unsupported\n"), channels); + return -1; // unsupported nr. of channels } if (atomic_read(&AudioRingFilled) == AUDIO_RING_MAX) { // no free slot - // FIXME: can wait for ring buffer empty - Error(_("audio: out of ring buffers\n")); - return -1; + // FIXME: can wait for ring buffer empty + Error(_("audio: out of ring buffers\n")); + return -1; } AudioRingWrite = (AudioRingWrite + 1) % AUDIO_RING_MAX; @@ -702,9 +703,10 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough) #ifdef USE_AUDIO_THREAD if (AudioThread) { - // tell thread, that there is something todo - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); + // tell thread, that there is something todo + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + Debug(3,"Start on AudioRingAdd\n"); } #endif @@ -719,8 +721,8 @@ static void AudioRingInit(void) int i; for (i = 0; i < AUDIO_RING_MAX; ++i) { - // ~2s 8ch 16bit - AudioRing[i].RingBuffer = RingBufferNew(AudioRingBufferSize); + // ~2s 8ch 16bit + AudioRing[i].RingBuffer = RingBufferNew(AudioRingBufferSize); } atomic_set(&AudioRingFilled, 0); } @@ -733,12 +735,12 @@ static void AudioRingExit(void) int i; for (i = 0; i < AUDIO_RING_MAX; ++i) { - if (AudioRing[i].RingBuffer) { - RingBufferDel(AudioRing[i].RingBuffer); - AudioRing[i].RingBuffer = NULL; - } - AudioRing[i].HwSampleRate = 0; // checked for valid setup - AudioRing[i].InSampleRate = 0; + if (AudioRing[i].RingBuffer) { + RingBufferDel(AudioRing[i].RingBuffer); + AudioRing[i].RingBuffer = NULL; + } + AudioRing[i].HwSampleRate = 0; // checked for valid setup + AudioRing[i].InSampleRate = 0; } AudioRingRead = 0; AudioRingWrite = 0; @@ -781,121 +783,122 @@ static int AlsaPlayRingbuffer(void) first = 1; for (;;) { // loop for ring buffer wrap - int avail; - int n; - int err; - int frames; - const void *p; + int avail; + int n; + int err; + int frames; + const void *p; - // how many bytes can be written? - n = snd_pcm_avail_update(AlsaPCMHandle); - if (n < 0) { - if (n == -EAGAIN) { - continue; - } - Warning(_("audio/alsa: avail underrun error? '%s'\n"), - snd_strerror(n)); - err = snd_pcm_recover(AlsaPCMHandle, n, 0); - if (err >= 0) { - continue; - } - Error(_("audio/alsa: snd_pcm_avail_update(): %s\n"), - snd_strerror(n)); - return -1; - } - avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, n); - if (avail < 256) { // too much overhead - if (first) { - // happens with broken alsa drivers - if (AudioThread) { - if (!AudioAlsaDriverBroken) { - Error(_("audio/alsa: broken driver %d state '%s'\n"), - avail, - snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); - } - // try to recover - if (snd_pcm_state(AlsaPCMHandle) - == SND_PCM_STATE_PREPARED) { - if ((err = snd_pcm_start(AlsaPCMHandle)) < 0) { - Error(_("audio/alsa: snd_pcm_start(): %s\n"), - snd_strerror(err)); + // how many bytes can be written? + n = snd_pcm_avail_update(AlsaPCMHandle); + if (n < 0) { + if (n == -EAGAIN) { + continue; } - } - usleep(5 * 1000); + Warning(_("audio/alsa: avail underrun error? '%s'\n"), + snd_strerror(n)); + err = snd_pcm_recover(AlsaPCMHandle, n, 0); + if (err >= 0) { + continue; + } + Error(_("audio/alsa: snd_pcm_avail_update(): %s\n"), + snd_strerror(n)); + return -1; + } + avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, n); + if (avail < 256) { // too much overhead + if (first) { + // happens with broken alsa drivers + if (AudioThread) { + if (!AudioAlsaDriverBroken) { + Error(_("audio/alsa: broken driver %d state '%s'\n"), + avail, + snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); + } + // try to recover + if (snd_pcm_state(AlsaPCMHandle) + == SND_PCM_STATE_PREPARED) { + if ((err = snd_pcm_start(AlsaPCMHandle)) < 0) { + Error(_("audio/alsa: snd_pcm_start(): %s\n"), + snd_strerror(err)); + } + } + usleep(5 * 1000); + } + } + Debug(4, "audio/alsa: break state '%s'\n", + snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); + break; } - } - Debug(4, "audio/alsa: break state '%s'\n", - snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); - break; - } - n = RingBufferGetReadPointer(AudioRing[AudioRingRead].RingBuffer, &p); - if (!n) { // ring buffer empty - if (first) { // only error on first loop - Debug(4, "audio/alsa: empty buffers %d\n", avail); - // ring buffer empty - // AlsaLowWaterMark = 1; - return 1; - } - return 0; - } - if (n < avail) { // not enough bytes in ring buffer - avail = n; - } - if (!avail) { // full or buffer empty - break; - } - // muting pass-through AC-3, can produce disturbance - if (AudioMute || (AudioSoftVolume - && !AudioRing[AudioRingRead].Passthrough)) { - // FIXME: quick&dirty cast - AudioSoftAmplifier((int16_t *) p, avail); - // FIXME: if not all are written, we double amplify them - } - frames = snd_pcm_bytes_to_frames(AlsaPCMHandle, avail); + n = RingBufferGetReadPointer(AudioRing[AudioRingRead].RingBuffer, &p); + if (!n) { // ring buffer empty + if (first) { // only error on first loop + Debug(4, "audio/alsa: empty buffers %d\n", avail); + // ring buffer empty + // AlsaLowWaterMark = 1; + return 1; + } + return 0; + } + if (n < avail) { // not enough bytes in ring buffer + avail = n; + } + if (!avail) { // full or buffer empty + break; + } + // muting pass-through AC-3, can produce disturbance + if (AudioMute || (AudioSoftVolume + && !AudioRing[AudioRingRead].Passthrough)) { + // FIXME: quick&dirty cast + AudioSoftAmplifier((int16_t *) p, avail); + // FIXME: if not all are written, we double amplify them + } + frames = snd_pcm_bytes_to_frames(AlsaPCMHandle, avail); #ifdef DEBUG - if (avail != snd_pcm_frames_to_bytes(AlsaPCMHandle, frames)) { - Error(_("audio/alsa: bytes lost -> out of sync\n")); - } + if (avail != snd_pcm_frames_to_bytes(AlsaPCMHandle, frames)) { + Error(_("audio/alsa: bytes lost -> out of sync\n")); + } #endif - for (;;) { - if (AlsaUseMmap) { - err = snd_pcm_mmap_writei(AlsaPCMHandle, p, frames); - } else { - err = snd_pcm_writei(AlsaPCMHandle, p, frames); - } - //Debug(3, "audio/alsa: wrote %d/%d frames\n", err, frames); - if (err != frames) { - if (err < 0) { - if (err == -EAGAIN) { - continue; - } - /* - if (err == -EBADFD) { - goto again; - } - */ - Warning(_("audio/alsa: writei underrun error? '%s'\n"), - snd_strerror(err)); - err = snd_pcm_recover(AlsaPCMHandle, err, 0); - if (err >= 0) { - continue; - } - Error(_("audio/alsa: snd_pcm_writei failed: %s\n"), - snd_strerror(err)); - return -1; + for (;;) { + if (AlsaUseMmap) { + err = snd_pcm_mmap_writei(AlsaPCMHandle, p, frames); + } else { + err = snd_pcm_writei(AlsaPCMHandle, p, frames); + } + //Debug(3, "audio/alsa: wrote %d/%d frames\n", err, frames); + if (err != frames) { + if (err < 0) { + if (err == -EAGAIN) { + continue; + } + /* + if (err == -EBADFD) { + goto again; + } + */ + Warning(_("audio/alsa: writei underrun error? '%s'\n"), + snd_strerror(err)); + err = snd_pcm_recover(AlsaPCMHandle, err, 0); + if (err >= 0) { + continue; + } + Error(_("audio/alsa: snd_pcm_writei failed: %s\n"), + snd_strerror(err)); + return -1; + } + // this could happen, if underrun happened + Warning(_("audio/alsa: not all frames written\n")); + avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, err); + } + break; } - // this could happen, if underrun happened - Warning(_("audio/alsa: not all frames written\n")); - avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, err); - } - break; - } - RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, avail); - first = 0; - } + RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, avail); + first = 0; + + } return 0; } @@ -905,20 +908,20 @@ static int AlsaPlayRingbuffer(void) static void AlsaFlushBuffers(void) { if (AlsaPCMHandle) { - int err; - snd_pcm_state_t state; + int err; + snd_pcm_state_t state; - state = snd_pcm_state(AlsaPCMHandle); - Debug(3, "audio/alsa: flush state %s\n", snd_pcm_state_name(state)); - if (state != SND_PCM_STATE_OPEN) { - if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) { - Error(_("audio: snd_pcm_drop(): %s\n"), snd_strerror(err)); - } - // ****ing alsa crash, when in open state here - if ((err = snd_pcm_prepare(AlsaPCMHandle)) < 0) { - Error(_("audio: snd_pcm_prepare(): %s\n"), snd_strerror(err)); - } - } + state = snd_pcm_state(AlsaPCMHandle); + Debug(3, "audio/alsa: flush state %s\n", snd_pcm_state_name(state)); + if (state != SND_PCM_STATE_OPEN) { + if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) { + Error(_("audio: snd_pcm_drop(): %s\n"), snd_strerror(err)); + } + // ****ing alsa crash, when in open state here + if ((err = snd_pcm_prepare(AlsaPCMHandle)) < 0) { + Error(_("audio: snd_pcm_prepare(): %s\n"), snd_strerror(err)); + } + } } } @@ -942,46 +945,46 @@ static int AlsaThread(void) int err; if (!AlsaPCMHandle) { - usleep(24 * 1000); - return -1; + usleep(24 * 1000); + return -1; } for (;;) { - if (AudioPaused) { - return 1; - } - // wait for space in kernel buffers - if ((err = snd_pcm_wait(AlsaPCMHandle, 24)) < 0) { - Warning(_("audio/alsa: wait underrun error? '%s'\n"), - snd_strerror(err)); - err = snd_pcm_recover(AlsaPCMHandle, err, 0); - if (err >= 0) { - continue; - } - Error(_("audio/alsa: snd_pcm_wait(): %s\n"), snd_strerror(err)); - usleep(24 * 1000); - return -1; - } - break; + if (AudioPaused) { + return 1; + } + // wait for space in kernel buffers + if ((err = snd_pcm_wait(AlsaPCMHandle, 24)) < 0) { + Warning(_("audio/alsa: wait underrun error? '%s'\n"), + snd_strerror(err)); + err = snd_pcm_recover(AlsaPCMHandle, err, 0); + if (err >= 0) { + continue; + } + Error(_("audio/alsa: snd_pcm_wait(): %s\n"), snd_strerror(err)); + usleep(24 * 1000); + return -1; + } + break; } if (!err || AudioPaused) { // timeout or some commands - return 1; + return 1; } if ((err = AlsaPlayRingbuffer())) { // empty or error - snd_pcm_state_t state; + snd_pcm_state_t state; - if (err < 0) { // underrun error - return -1; - } + if (err < 0) { // underrun error + return -1; + } - state = snd_pcm_state(AlsaPCMHandle); - if (state != SND_PCM_STATE_RUNNING) { - Debug(3, "audio/alsa: stopping play '%s'\n", - snd_pcm_state_name(state)); - return 0; - } + state = snd_pcm_state(AlsaPCMHandle); + if (state != SND_PCM_STATE_RUNNING) { + Debug(3, "audio/alsa: stopping play '%s'\n", + snd_pcm_state_name(state)); + return 0; + } - usleep(24 * 1000); // let fill/empty the buffers + usleep(24 * 1000); // let fill/empty the buffers } return 1; } @@ -1004,44 +1007,42 @@ static snd_pcm_t *AlsaOpenPCM(int passthrough) // &&|| hell if (!(passthrough && ((device = AudioPassthroughDevice) || (device = getenv("ALSA_PASSTHROUGH_DEVICE")))) - && !(device = AudioPCMDevice) && !(device = getenv("ALSA_DEVICE"))) { - device = "default"; + && !(device = AudioPCMDevice) && !(device = getenv("ALSA_DEVICE"))) { + device = "default"; } if (!AudioDoingInit) { // reduce blabla during init - Info(_("audio/alsa: using %sdevice '%s'\n"), - passthrough ? "pass-through " : "", device); + Info(_("audio/alsa: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device); } // // for AC3 pass-through try to set the non-audio bit, use AES0=6 // if (passthrough && AudioAppendAES) { #if 0 - // FIXME: not yet finished - char *buf; - const char *s; - int n; + // FIXME: not yet finished + char *buf; + const char *s; + int n; - n = strlen(device); - buf = alloca(n + sizeof(":AES0=6") + 1); - strcpy(buf, device); - if (!(s = strchr(buf, ':'))) { - // no alsa parameters - strcpy(buf + n, ":AES=6"); - } - Debug(3, "audio/alsa: try '%s'\n", buf); + n = strlen(device); + buf = alloca(n + sizeof(":AES0=6") + 1); + strcpy(buf, device); + if (!(s = strchr(buf, ':'))) { + // no alsa parameters + strcpy(buf + n, ":AES=6"); + } + Debug(3, "audio/alsa: try '%s'\n", buf); #endif } // open none blocking; if device is already used, we don't want wait if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) { - Error(_("audio/alsa: playback open '%s' error: %s\n"), device, - snd_strerror(err)); - return NULL; + Error(_("audio/alsa: playback open '%s' error: %s\n"), device, snd_strerror(err)); + return NULL; } if ((err = snd_pcm_nonblock(handle, 0)) < 0) { - Error(_("audio/alsa: can't set block mode: %s\n"), snd_strerror(err)); + Error(_("audio/alsa: can't set block mode: %s\n"), snd_strerror(err)); } return handle; } @@ -1058,14 +1059,13 @@ static void AlsaInitPCM(void) int err; if (!(handle = AlsaOpenPCM(0))) { - return; + return; } // FIXME: pass-through and pcm out can support different features snd_pcm_hw_params_alloca(&hw_params); // choose all parameters if ((err = snd_pcm_hw_params_any(handle, hw_params)) < 0) { - Error(_ - ("audio: snd_pcm_hw_params_any: no configurations available: %s\n"), + Error(_("audio: snd_pcm_hw_params_any: no configurations available: %s\n"), snd_strerror(err)); } AlsaCanPause = snd_pcm_hw_params_can_pause(hw_params); @@ -1088,9 +1088,9 @@ static void AlsaSetVolume(int volume) int v; if (AlsaMixer && AlsaMixerElem) { - v = (volume * AlsaRatio) / (1000 * 1000); - snd_mixer_selem_set_playback_volume(AlsaMixerElem, 0, v); - snd_mixer_selem_set_playback_volume(AlsaMixerElem, 1, v); + v = (volume * AlsaRatio) / (1000 * 1000); + snd_mixer_selem_set_playback_volume(AlsaMixerElem, 0, v); + snd_mixer_selem_set_playback_volume(AlsaMixerElem, 1, v); } } @@ -1107,14 +1107,14 @@ static void AlsaInitMixer(void) long alsa_mixer_elem_max; if (!(device = AudioMixerDevice)) { - if (!(device = getenv("ALSA_MIXER"))) { - device = "default"; - } + if (!(device = getenv("ALSA_MIXER"))) { + device = "default"; + } } if (!(channel = AudioMixerChannel)) { - if (!(channel = getenv("ALSA_MIXER_CHANNEL"))) { - channel = "PCM"; - } + if (!(channel = getenv("ALSA_MIXER_CHANNEL"))) { + channel = "PCM"; + } } Debug(3, "audio/alsa: mixer %s - %s open\n", device, channel); snd_mixer_open(&alsa_mixer, 0); @@ -1122,29 +1122,29 @@ static void AlsaInitMixer(void) && snd_mixer_selem_register(alsa_mixer, NULL, NULL) >= 0 && snd_mixer_load(alsa_mixer) >= 0) { - const char *const alsa_mixer_elem_name = channel; + const char *const alsa_mixer_elem_name = channel; - alsa_mixer_elem = snd_mixer_first_elem(alsa_mixer); - while (alsa_mixer_elem) { - const char *name; + alsa_mixer_elem = snd_mixer_first_elem(alsa_mixer); + while (alsa_mixer_elem) { + const char *name; - name = snd_mixer_selem_get_name(alsa_mixer_elem); - if (!strcasecmp(name, alsa_mixer_elem_name)) { - snd_mixer_selem_get_playback_volume_range(alsa_mixer_elem, - &alsa_mixer_elem_min, &alsa_mixer_elem_max); - AlsaRatio = 1000 * (alsa_mixer_elem_max - alsa_mixer_elem_min); - Debug(3, "audio/alsa: PCM mixer found %ld - %ld ratio %d\n", - alsa_mixer_elem_min, alsa_mixer_elem_max, AlsaRatio); - break; - } + name = snd_mixer_selem_get_name(alsa_mixer_elem); + if (!strcasecmp(name, alsa_mixer_elem_name)) { + snd_mixer_selem_get_playback_volume_range(alsa_mixer_elem, + &alsa_mixer_elem_min, &alsa_mixer_elem_max); + AlsaRatio = 1000 * (alsa_mixer_elem_max - alsa_mixer_elem_min); + Debug(3, "audio/alsa: PCM mixer found %ld - %ld ratio %d\n", + alsa_mixer_elem_min, alsa_mixer_elem_max, AlsaRatio); + break; + } - alsa_mixer_elem = snd_mixer_elem_next(alsa_mixer_elem); - } + alsa_mixer_elem = snd_mixer_elem_next(alsa_mixer_elem); + } - AlsaMixer = alsa_mixer; - AlsaMixerElem = alsa_mixer_elem; + AlsaMixer = alsa_mixer; + AlsaMixerElem = alsa_mixer_elem; } else { - Error(_("audio/alsa: can't open mixer '%s'\n"), device); + Error(_("audio/alsa: can't open mixer '%s'\n"), device); } } @@ -1167,26 +1167,25 @@ static int64_t AlsaGetDelay(void) // setup error if (!AlsaPCMHandle || !AudioRing[AudioRingRead].HwSampleRate) { - return 0L; + return 0L; } // delay in frames in alsa + kernel buffers if ((err = snd_pcm_delay(AlsaPCMHandle, &delay)) < 0) { - //Debug(3, "audio/alsa: no hw delay\n"); - delay = 0L; + //Debug(3, "audio/alsa: no hw delay\n"); + delay = 0L; #ifdef DEBUG } else if (snd_pcm_state(AlsaPCMHandle) != SND_PCM_STATE_RUNNING) { - //Debug(3, "audio/alsa: %ld frames delay ok, but not running\n", delay); + //Debug(3, "audio/alsa: %ld frames delay ok, but not running\n", delay); #endif } //Debug(3, "audio/alsa: %ld frames hw delay\n", delay); // delay can be negative, when underrun occur if (delay < 0) { - delay = 0L; + delay = 0L; } - pts = - ((int64_t) delay * 90 * 1000) / AudioRing[AudioRingRead].HwSampleRate; + pts = ((int64_t) delay * 90 * 1000) / AudioRing[AudioRingRead].HwSampleRate; return pts; } @@ -1212,93 +1211,93 @@ static int AlsaSetup(int *freq, int *channels, int passthrough) int delay; if (!AlsaPCMHandle) { // alsa not running yet - // FIXME: if open fails for fe. pass-through, we never recover - return -1; + // FIXME: if open fails for fe. pass-through, we never recover + return -1; } if (!AudioAlsaNoCloseOpen) { // close+open to fix HDMI no sound bug - snd_pcm_t *handle; + snd_pcm_t *handle; - handle = AlsaPCMHandle; - // no lock needed, thread exit in main loop only - //Debug(3, "audio: %s [\n", __FUNCTION__); - AlsaPCMHandle = NULL; // other threads should check handle - snd_pcm_close(handle); - if (AudioAlsaCloseOpenDelay) { - usleep(50 * 1000); // 50ms delay for alsa recovery - } - // FIXME: can use multiple retries - if (!(handle = AlsaOpenPCM(passthrough))) { - return -1; - } - AlsaPCMHandle = handle; - //Debug(3, "audio: %s ]\n", __FUNCTION__); + handle = AlsaPCMHandle; + // no lock needed, thread exit in main loop only + //Debug(3, "audio: %s [\n", __FUNCTION__); + AlsaPCMHandle = NULL; // other threads should check handle + snd_pcm_close(handle); + if (AudioAlsaCloseOpenDelay) { + usleep(50 * 1000); // 50ms delay for alsa recovery + } + // FIXME: can use multiple retries + if (!(handle = AlsaOpenPCM(passthrough))) { + return -1; + } + AlsaPCMHandle = handle; + //Debug(3, "audio: %s ]\n", __FUNCTION__); } for (;;) { - if ((err = - snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, - AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : - SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1, - 96 * 1000))) { - // try reduced buffer size (needed for sunxi) - // FIXME: alternativ make this configurable - if ((err = - snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, - AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : - SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1, - 72 * 1000))) { + if ((err = + snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, + AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : + SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1, + 96 * 1000))) { + // try reduced buffer size (needed for sunxi) + // FIXME: alternativ make this configurable + if ((err = + snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, + AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : + SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1, + 72 * 1000))) { - /* - if ( err == -EBADFD ) { - snd_pcm_close(AlsaPCMHandle); - AlsaPCMHandle = NULL; - continue; - } - */ + /* + if ( err == -EBADFD ) { + snd_pcm_close(AlsaPCMHandle); + AlsaPCMHandle = NULL; + continue; + } + */ - if (!AudioDoingInit) { - Error(_("audio/alsa: set params error: %s\n"), - snd_strerror(err)); + if (!AudioDoingInit) { + Error(_("audio/alsa: set params error: %s\n"), + snd_strerror(err)); + } + // FIXME: must stop sound, AudioChannels ... invalid + return -1; + } } - // FIXME: must stop sound, AudioChannels ... invalid - return -1; - } - } - break; + break; } // this is disabled, no advantages! if (0) { // no underruns allowed, play silence - snd_pcm_sw_params_t *sw_params; - snd_pcm_uframes_t boundary; + snd_pcm_sw_params_t *sw_params; + snd_pcm_uframes_t boundary; - snd_pcm_sw_params_alloca(&sw_params); - err = snd_pcm_sw_params_current(AlsaPCMHandle, sw_params); - if (err < 0) { - Error(_("audio: snd_pcm_sw_params_current failed: %s\n"), - snd_strerror(err)); - } - if ((err = snd_pcm_sw_params_get_boundary(sw_params, &boundary)) < 0) { - Error(_("audio: snd_pcm_sw_params_get_boundary failed: %s\n"), - snd_strerror(err)); - } - Debug(4, "audio/alsa: boundary %lu frames\n", boundary); - if ((err = - snd_pcm_sw_params_set_stop_threshold(AlsaPCMHandle, sw_params, - boundary)) < 0) { - Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), - snd_strerror(err)); - } - if ((err = - snd_pcm_sw_params_set_silence_size(AlsaPCMHandle, sw_params, - boundary)) < 0) { - Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), - snd_strerror(err)); - } - if ((err = snd_pcm_sw_params(AlsaPCMHandle, sw_params)) < 0) { - Error(_("audio: snd_pcm_sw_params failed: %s\n"), - snd_strerror(err)); - } + snd_pcm_sw_params_alloca(&sw_params); + err = snd_pcm_sw_params_current(AlsaPCMHandle, sw_params); + if (err < 0) { + Error(_("audio: snd_pcm_sw_params_current failed: %s\n"), + snd_strerror(err)); + } + if ((err = snd_pcm_sw_params_get_boundary(sw_params, &boundary)) < 0) { + Error(_("audio: snd_pcm_sw_params_get_boundary failed: %s\n"), + snd_strerror(err)); + } + Debug(4, "audio/alsa: boundary %lu frames\n", boundary); + if ((err = + snd_pcm_sw_params_set_stop_threshold(AlsaPCMHandle, sw_params, + boundary)) < 0) { + Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), + snd_strerror(err)); + } + if ((err = + snd_pcm_sw_params_set_silence_size(AlsaPCMHandle, sw_params, + boundary)) < 0) { + Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), + snd_strerror(err)); + } + if ((err = snd_pcm_sw_params(AlsaPCMHandle, sw_params)) < 0) { + Error(_("audio: snd_pcm_sw_params failed: %s\n"), + snd_strerror(err)); + } } // update buffer @@ -1315,20 +1314,19 @@ static int AlsaSetup(int *freq, int *channels, int passthrough) // buffer time/delay in ms delay = AudioBufferTime; if (VideoAudioDelay > 0) { - delay += VideoAudioDelay / 90; + delay += VideoAudioDelay / 90; } if (AudioStartThreshold < (*freq * *channels * AudioBytesProSample * delay) / 1000U) { - AudioStartThreshold = - (*freq * *channels * AudioBytesProSample * delay) / 1000U; + AudioStartThreshold = (*freq * *channels * AudioBytesProSample * delay) / 1000U; } // no bigger, than 1/3 the buffer if (AudioStartThreshold > AudioRingBufferSize / 3) { - AudioStartThreshold = AudioRingBufferSize / 3; + AudioStartThreshold = AudioRingBufferSize / 3; } if (!AudioDoingInit) { - Info(_("audio/alsa: start delay %ums\n"), (AudioStartThreshold * 1000) - / (*freq * *channels * AudioBytesProSample)); + Info(_("audio/alsa: start delay %ums\n"), (AudioStartThreshold * 1000) + / (*freq * *channels * AudioBytesProSample)); } return 0; @@ -1342,17 +1340,17 @@ static void AlsaPlay(void) int err; if (AlsaCanPause) { - if ((err = snd_pcm_pause(AlsaPCMHandle, 0))) { - Error(_("audio/alsa: snd_pcm_pause(): %s\n"), snd_strerror(err)); - } + if ((err = snd_pcm_pause(AlsaPCMHandle, 0))) { + Error(_("audio/alsa: snd_pcm_pause(): %s\n"), snd_strerror(err)); + } } else { - if ((err = snd_pcm_prepare(AlsaPCMHandle)) < 0) { - Error(_("audio/alsa: snd_pcm_prepare(): %s\n"), snd_strerror(err)); - } + if ((err = snd_pcm_prepare(AlsaPCMHandle)) < 0) { + Error(_("audio/alsa: snd_pcm_prepare(): %s\n"), snd_strerror(err)); + } } #ifdef DEBUG if (snd_pcm_state(AlsaPCMHandle) == SND_PCM_STATE_PAUSED) { - Error(_("audio/alsa: still paused\n")); + Error(_("audio/alsa: still paused\n")); } #endif } @@ -1365,13 +1363,13 @@ static void AlsaPause(void) int err; if (AlsaCanPause) { - if ((err = snd_pcm_pause(AlsaPCMHandle, 1))) { - Error(_("snd_pcm_pause(): %s\n"), snd_strerror(err)); - } + if ((err = snd_pcm_pause(AlsaPCMHandle, 1))) { + Error(_("snd_pcm_pause(): %s\n"), snd_strerror(err)); + } } else { - if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) { - Error(_("snd_pcm_drop(): %s\n"), snd_strerror(err)); - } + if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) { + Error(_("snd_pcm_drop(): %s\n"), snd_strerror(err)); + } } } @@ -1409,13 +1407,13 @@ static void AlsaInit(void) static void AlsaExit(void) { if (AlsaPCMHandle) { - snd_pcm_close(AlsaPCMHandle); - AlsaPCMHandle = NULL; + snd_pcm_close(AlsaPCMHandle); + AlsaPCMHandle = NULL; } if (AlsaMixer) { - snd_mixer_close(AlsaMixer); - AlsaMixer = NULL; - AlsaMixerElem = NULL; + snd_mixer_close(AlsaMixer); + AlsaMixer = NULL; + AlsaMixerElem = NULL; } } @@ -1740,13 +1738,13 @@ static int64_t OssGetDelay(void) return 0L; } if (!AudioRunning) { // audio not running - Error(_("audio/oss: should not happen\n")); + Error(_("audio/oss: should not happen\n")); return 0L; } // delay in bytes in kernel buffers delay = -1; if (ioctl(OssPcmFildes, SNDCTL_DSP_GETODELAY, &delay) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n"), + Error(_("audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n"), strerror(errno)); return 0L; } @@ -2027,12 +2025,12 @@ static int AudioNextRing(void) sample_rate = AudioRing[AudioRingRead].HwSampleRate; channels = AudioRing[AudioRingRead].HwChannels; if (AudioUsedModule->Setup(&sample_rate, &channels, passthrough)) { - Error(_("audio: can't set channels %d sample-rate %dHz\n"), channels, - sample_rate); - // FIXME: handle error - AudioRing[AudioRingRead].HwSampleRate = 0; - AudioRing[AudioRingRead].InSampleRate = 0; - return -1; + Error(_("audio: can't set channels %d sample-rate %dHz\n"), channels, + sample_rate); + // FIXME: handle error + AudioRing[AudioRingRead].HwSampleRate = 0; + AudioRing[AudioRingRead].InSampleRate = 0; + return -1; } AudioSetVolume(AudioVolume); // update channel delta @@ -2048,7 +2046,7 @@ static int AudioNextRing(void) used = RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer); if (AudioStartThreshold * 4 < used || (AudioVideoIsReady && AudioStartThreshold < used)) { - return 0; + return 0; } return 1; } @@ -2063,114 +2061,117 @@ static void *AudioPlayHandlerThread(void *dummy) Debug(3, "audio: play thread started\n"); prctl(PR_SET_NAME,"cuvid audio",0,0,0); for (;;) { - // check if we should stop the thread - if (AudioThreadStop) { - Debug(3, "audio: play thread stopped\n"); - return PTHREAD_CANCELED; - } - - Debug(3, "audio: wait on start condition\n"); - pthread_mutex_lock(&AudioMutex); - AudioRunning = 0; - do { - pthread_cond_wait(&AudioStartCond, &AudioMutex); - // cond_wait can return, without signal! - } while (!AudioRunning); - pthread_mutex_unlock(&AudioMutex); - - Debug(3, "audio: ----> %dms start\n", (AudioUsedBytes() * 1000) - / (!AudioRing[AudioRingWrite].HwSampleRate + - !AudioRing[AudioRingWrite].HwChannels + - AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); - - do { - int filled; - int read; - int flush; - int err; - int i; - - // check if we should stop the thread - if (AudioThreadStop) { - Debug(3, "audio: play thread stopped\n"); - return PTHREAD_CANCELED; - } - // look if there is a flush command in the queue - flush = 0; - filled = atomic_read(&AudioRingFilled); - read = AudioRingRead; - i = filled; - while (i--) { - read = (read + 1) % AUDIO_RING_MAX; - if (AudioRing[read].FlushBuffers) { - AudioRing[read].FlushBuffers = 0; - AudioRingRead = read; - // handle all flush in queue - flush = filled - i; - } - } - - if (flush) { - Debug(3, "audio: flush %d ring buffer(s)\n", flush); - AudioUsedModule->FlushBuffers(); - atomic_sub(flush, &AudioRingFilled); - if (AudioNextRing()) { - Debug(3, "audio: break after flush\n"); - break; - } - Debug(3, "audio: continue after flush\n"); - } - // try to play some samples - err = 0; - if (RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer)) { - err = AudioUsedModule->Thread(); - } - // underrun, check if new ring buffer is available - if (!err) { - int passthrough; - int sample_rate; - int channels; - int old_passthrough; - int old_sample_rate; - int old_channels; - - // underrun, and no new ring buffer, goto sleep. - if (!atomic_read(&AudioRingFilled)) { - break; + // check if we should stop the thread + if (AudioThreadStop) { + Debug(3, "audio: play thread stopped\n"); + return PTHREAD_CANCELED; } - Debug(3, "audio: next ring buffer\n"); - old_passthrough = AudioRing[AudioRingRead].Passthrough; - old_sample_rate = AudioRing[AudioRingRead].HwSampleRate; - old_channels = AudioRing[AudioRingRead].HwChannels; + Debug(3, "audio: wait on start condition\n"); + pthread_mutex_lock(&AudioMutex); + AudioRunning = 0; + do { + pthread_cond_wait(&AudioStartCond, &AudioMutex); + // cond_wait can return, without signal! + } while (!AudioRunning); + pthread_mutex_unlock(&AudioMutex); - atomic_dec(&AudioRingFilled); - AudioRingRead = (AudioRingRead + 1) % AUDIO_RING_MAX; + Debug(3, "audio: ----> %dms start\n", (AudioUsedBytes() * 1000) + / (!AudioRing[AudioRingWrite].HwSampleRate + + !AudioRing[AudioRingWrite].HwChannels + + AudioRing[AudioRingWrite].HwSampleRate * + AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); - passthrough = AudioRing[AudioRingRead].Passthrough; - sample_rate = AudioRing[AudioRingRead].HwSampleRate; - channels = AudioRing[AudioRingRead].HwChannels; - Debug(3, "audio: thread channels %d frequency %dHz %s\n", - channels, sample_rate, passthrough ? "pass-through" : ""); - // audio config changed? - if (old_passthrough != passthrough - || old_sample_rate != sample_rate - || old_channels != channels) { - // FIXME: wait for buffer drain - if (AudioNextRing()) { - break; - } - } else { - AudioResetCompressor(); - AudioResetNormalizer(); - } - } - // FIXME: check AudioPaused ...Thread() - if (AudioPaused) { - break; - } - } while (AudioRing[AudioRingRead].HwSampleRate); + do { + int filled; + int read; + int flush; + int err; + int i; + + // check if we should stop the thread + if (AudioThreadStop) { + Debug(3, "audio: play thread stopped\n"); + return PTHREAD_CANCELED; + } + // look if there is a flush command in the queue + flush = 0; + filled = atomic_read(&AudioRingFilled); + read = AudioRingRead; + i = filled; + while (i--) { + read = (read + 1) % AUDIO_RING_MAX; + if (AudioRing[read].FlushBuffers) { + AudioRing[read].FlushBuffers = 0; + AudioRingRead = read; + // handle all flush in queue + flush = filled - i; + } + } + + if (flush) { + Debug(3, "audio: flush %d ring buffer(s)\n", flush); + AudioUsedModule->FlushBuffers(); + atomic_sub(flush, &AudioRingFilled); + if (AudioNextRing()) { + Debug(3, "audio: HandlerThread break after flush\n"); + break; + } + Debug(3, "audio: continue after flush\n"); + } + // try to play some samples + err = 0; + if (RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer)) { + err = AudioUsedModule->Thread(); + } + // underrun, check if new ring buffer is available + if (!err) { + int passthrough; + int sample_rate; + int channels; + int old_passthrough; + int old_sample_rate; + int old_channels; + + // underrun, and no new ring buffer, goto sleep. + if (!atomic_read(&AudioRingFilled)) { + Debug(3,"audio: HandlerThread Underrun with no new data\n"); + break; + } + + Debug(3, "audio: next ring buffer\n"); + old_passthrough = AudioRing[AudioRingRead].Passthrough; + old_sample_rate = AudioRing[AudioRingRead].HwSampleRate; + old_channels = AudioRing[AudioRingRead].HwChannels; + + atomic_dec(&AudioRingFilled); + AudioRingRead = (AudioRingRead + 1) % AUDIO_RING_MAX; + + passthrough = AudioRing[AudioRingRead].Passthrough; + sample_rate = AudioRing[AudioRingRead].HwSampleRate; + channels = AudioRing[AudioRingRead].HwChannels; + Debug(3, "audio: thread channels %d frequency %dHz %s\n", + channels, sample_rate, passthrough ? "pass-through" : ""); + // audio config changed? + if (old_passthrough != passthrough + || old_sample_rate != sample_rate + || old_channels != channels) { + // FIXME: wait for buffer drain + if (AudioNextRing()) { + Debug(3,"audio: HandlerThread break on nextring"); + break; + } + } else { + AudioResetCompressor(); + AudioResetNormalizer(); + } + } + // FIXME: check AudioPaused ...Thread() + if (AudioPaused) { + Debug(3,"audio: HandlerThread break on paused"); + break; + } + } while (AudioRing[AudioRingRead].HwSampleRate); } return dummy; } @@ -2197,15 +2198,15 @@ static void AudioExitThread(void) Debug(3, "audio: %s\n", __FUNCTION__); if (AudioThread) { - AudioThreadStop = 1; - AudioRunning = 1; // wakeup thread, if needed - pthread_cond_signal(&AudioStartCond); - if (pthread_join(AudioThread, &retval) || retval != PTHREAD_CANCELED) { - Error(_("audio: can't cancel play thread\n")); - } - pthread_cond_destroy(&AudioStartCond); - pthread_mutex_destroy(&AudioMutex); - AudioThread = 0; + AudioThreadStop = 1; + AudioRunning = 1; // wakeup thread, if needed + pthread_cond_signal(&AudioStartCond); + if (pthread_join(AudioThread, &retval) || retval != PTHREAD_CANCELED) { + Error(_("audio: can't cancel play thread\n")); + } + pthread_cond_destroy(&AudioStartCond); + pthread_mutex_destroy(&AudioMutex); + AudioThread = 0; } } @@ -2250,102 +2251,104 @@ void AudioEnqueue(const void *samples, int count) #endif if (!AudioRing[AudioRingWrite].HwSampleRate) { - Debug(3, "audio: enqueue not ready\n"); - return; // no setup yet + Debug(3, "audio: enqueue not ready\n"); + return; // no setup yet } // save packet size if (!AudioRing[AudioRingWrite].PacketSize) { - AudioRing[AudioRingWrite].PacketSize = count; - Debug(3, "audio: a/v packet size %d bytes\n", count); + AudioRing[AudioRingWrite].PacketSize = count; + Debug(3, "audio: a/v packet size %d bytes\n", count); } // audio sample modification allowed and needed? buffer = (void *)samples; if (!AudioRing[AudioRingWrite].Passthrough && (AudioCompression || AudioNormalize || AudioRing[AudioRingWrite].InChannels != - AudioRing[AudioRingWrite].HwChannels)) { - int frames; + AudioRing[AudioRingWrite].HwChannels)) { + int frames; - // resample into ring-buffer is too complex in the case of a roundabout - // just use a temporary buffer - frames = - count / (AudioRing[AudioRingWrite].InChannels * - AudioBytesProSample); - buffer = - alloca(frames * AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample); + // resample into ring-buffer is too complex in the case of a roundabout + // just use a temporary buffer + frames = + count / (AudioRing[AudioRingWrite].InChannels * + AudioBytesProSample); + buffer = + alloca(frames * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample); #ifdef USE_AUDIO_MIXER - // Convert / resample input to hardware format - AudioResample(samples, AudioRing[AudioRingWrite].InChannels, frames, - buffer, AudioRing[AudioRingWrite].HwChannels); + // Convert / resample input to hardware format + AudioResample(samples, AudioRing[AudioRingWrite].InChannels, frames, + buffer, AudioRing[AudioRingWrite].HwChannels); #else #ifdef DEBUG - if (AudioRing[AudioRingWrite].InChannels != - AudioRing[AudioRingWrite].HwChannels) { - Debug(3, "audio: internal failure channels mismatch\n"); - return; - } + if (AudioRing[AudioRingWrite].InChannels != + AudioRing[AudioRingWrite].HwChannels) { + Debug(3, "audio: internal failure channels mismatch\n"); + return; + } #endif - memcpy(buffer, samples, count); + memcpy(buffer, samples, count); #endif - count = - frames * AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample; + count = + frames * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample; - if (AudioCompression) { // in place operation - AudioCompressor(buffer, count); - } - if (AudioNormalize) { // in place operation - AudioNormalizer(buffer, count); - } + if (AudioCompression) { // in place operation + AudioCompressor(buffer, count); + } + if (AudioNormalize) { // in place operation + AudioNormalizer(buffer, count); + } } n = RingBufferWrite(AudioRing[AudioRingWrite].RingBuffer, buffer, count); if (n != (size_t) count) { - Error(_("audio: can't place %d samples in ring buffer\n"), count); - // too many bytes are lost - // FIXME: caller checks buffer full. - // FIXME: should skip more, longer skip, but less often? - // FIXME: round to channel + sample border + Error(_("audio: can't place %d samples in ring buffer\n"), count); + // too many bytes are lost + // FIXME: caller checks buffer full. + // FIXME: should skip more, longer skip, but less often? + // FIXME: round to channel + sample border } if (!AudioRunning) { // check, if we can start the thread - int skip; + int skip; - n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); - skip = AudioSkip; - // FIXME: round to packet size + n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); + skip = AudioSkip; + // FIXME: round to packet size - Debug(4, "audio: start? %4zdms skip %dms\n", (n * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), - (skip * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); + Debug(4, "audio: start? %4zdms skip %dms\n", (n * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * + AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), + (skip * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * + AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); - if (skip) { - if (n < (unsigned)skip) { - skip = n; - } - AudioSkip -= skip; - RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); - n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); - } - // forced start or enough video + audio buffered - // for some exotic channels * 4 too small - if (AudioStartThreshold * 4 < n || (AudioVideoIsReady - && AudioStartThreshold < n)) { - // restart play-back - // no lock needed, can wakeup next time - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); - } + if (skip) { + if (n < (unsigned)skip) { + skip = n; + } + AudioSkip -= skip; + RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); + n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); + } + // forced start or enough video + audio buffered + // for some exotic channels * 4 too small +// if (AudioStartThreshold * 4 < n || (AudioVideoIsReady + if ((AudioVideoIsReady + && AudioStartThreshold < n)) { + // restart play-back + // no lock needed, can wakeup next time + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + Debug(3,"Start on AudioEnque\n"); + } } // Update audio clock (stupid gcc developers thinks INT64_C is unsigned) if (AudioRing[AudioRingWrite].PTS != (int64_t) INT64_C(0x8000000000000000)) { - AudioRing[AudioRingWrite].PTS += ((int64_t) count * 90 * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); + AudioRing[AudioRingWrite].PTS += ((int64_t) count * 90 * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * + AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); } } @@ -2360,70 +2363,74 @@ void AudioVideoReady(int64_t pts) size_t used; if (pts == (int64_t) INT64_C(0x8000000000000000)) { - Debug(3, "audio: a/v start, no valid video\n"); + Debug(3, "audio: a/v start, no valid video\n"); return; } // no valid audio known if (!AudioRing[AudioRingWrite].HwSampleRate - || !AudioRing[AudioRingWrite].HwChannels - || AudioRing[AudioRingWrite].PTS == - (int64_t) INT64_C(0x8000000000000000)) { - Debug(3, "audio: a/v start, no valid audio\n"); - AudioVideoIsReady = 1; - return; + || !AudioRing[AudioRingWrite].HwChannels + || AudioRing[AudioRingWrite].PTS == + (int64_t) INT64_C(0x8000000000000000)) { + Debug(3, "audio: a/v start, no valid audio\n"); + AudioVideoIsReady = 1; + return; } // Audio.PTS = next written sample time stamp used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); audio_pts = - AudioRing[AudioRingWrite].PTS - - (used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); + AudioRing[AudioRingWrite].PTS - + (used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * + AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); - Debug(3, "audio: a/v sync buf(%d,%4zdms) %s|%s = %dms %s\n", - atomic_read(&AudioRingFilled), - (used * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), - Timestamp2String(pts), Timestamp2String(audio_pts), - (int)(pts - audio_pts) / 90, AudioRunning ? "running" : "ready"); + Debug(3, "audio: a/v sync buf(%d,%4zdms) %s | %s = %dms %s\n", + atomic_read(&AudioRingFilled), + (used * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * + AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), + Timestamp2String(pts), Timestamp2String(audio_pts), + (int)(pts - audio_pts) / 90, AudioRunning ? "running" : "ready"); - if (!AudioRunning) { - int skip; + if (!AudioRunning || 1) { + int skip; - // buffer ~15 video frames - // FIXME: HDTV can use smaller video buffer - skip = - pts - 15 * 20 * 90 - AudioBufferTime * 90 - audio_pts + - VideoAudioDelay; + // buffer ~15 video frames + // FIXME: HDTV can use smaller video buffer + skip = + pts - 15 * 20 * 90 - AudioBufferTime * 90 - audio_pts + VideoAudioDelay; #ifdef DEBUG - fprintf(stderr, "%dms %dms %dms\n", (int)(pts - audio_pts) / 90, - VideoAudioDelay / 90, skip / 90); + fprintf(stderr, "%dms %dms %dms\n", (int)(pts - audio_pts) / 90, + VideoAudioDelay / 90, skip / 90); #endif - // guard against old PTS - if (skip > 0 && skip < 2000 * 90) { - skip = (((int64_t) skip * AudioRing[AudioRingWrite].HwSampleRate) - / (1000 * 90)) - * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample; - // FIXME: round to packet size - if ((unsigned)skip > used) { - AudioSkip = skip - used; - skip = used; - } - Debug(3, "audio: sync advance %dms %d/%zd\n", - (skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample), skip, used); - RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); + // guard against old PTS + if (skip > 0 && skip < 4000 * 90) { + skip = (((int64_t) skip * AudioRing[AudioRingWrite].HwSampleRate) / (1000 * 90)) + * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample; + // FIXME: round to packet size + if ((unsigned)skip > used) { + AudioSkip = skip - used; + skip = used; + } + Debug(3, "audio: sync advance %dms %d/%zd\n", + (skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * + AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample), skip, used); + RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); - used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); - } - // FIXME: skip<0 we need bigger audio buffer + used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); + } + else { + Debug(3,"No audio skip -> should skip %d\n",skip/90); + AudioRunning = 0; + usleep(abs(skip/90)*1000); + } + // FIXME: skip<0 we need bigger audio buffer - // enough video + audio buffered - if (AudioStartThreshold < used) { - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); - } + // enough video + audio buffered + if (AudioStartThreshold < used) { + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + Debug(3,"Start on AudioVideoReady\n"); + } } AudioVideoIsReady = 1; @@ -2478,19 +2485,19 @@ void AudioFlushBuffers(void) int i; if (atomic_read(&AudioRingFilled) >= AUDIO_RING_MAX) { - // wait for space in ring buffer, should never happen - for (i = 0; i < 24 * 2; ++i) { - if (atomic_read(&AudioRingFilled) < AUDIO_RING_MAX) { - break; - } - Debug(3, "audio: flush out of ring buffers\n"); - usleep(1 * 1000); // avoid hot polling - } - if (atomic_read(&AudioRingFilled) >= AUDIO_RING_MAX) { - // FIXME: We can set the flush flag in the last wrote ring buffer - Error(_("audio: flush out of ring buffers\n")); - return; - } + // wait for space in ring buffer, should never happen + for (i = 0; i < 24 * 2; ++i) { + if (atomic_read(&AudioRingFilled) < AUDIO_RING_MAX) { + break; + } + Debug(3, "audio: flush out of ring buffers\n"); + usleep(1 * 1000); // avoid hot polling + } + if (atomic_read(&AudioRingFilled) >= AUDIO_RING_MAX) { + // FIXME: We can set the flush flag in the last wrote ring buffer + Error(_("audio: flush out of ring buffers\n")); + return; + } } old = AudioRingWrite; @@ -2512,15 +2519,16 @@ void AudioFlushBuffers(void) // FIXME: wait for flush complete needed? for (i = 0; i < 24 * 2; ++i) { - if (!AudioRunning) { // wakeup thread to flush buffers - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); - } - // FIXME: waiting on zero isn't correct, but currently works - if (!atomic_read(&AudioRingFilled)) { - break; - } - usleep(1 * 1000); // avoid hot polling + if (!AudioRunning) { // wakeup thread to flush buffers + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + Debug(3,"Start on Flush\n"); + } + // FIXME: waiting on zero isn't correct, but currently works + if (!atomic_read(&AudioRingFilled)) { + break; + } + usleep(1 * 1000); // avoid hot polling } Debug(3, "audio: audio flush %dms\n", i); } @@ -2676,8 +2684,8 @@ int AudioSetup(int *freq, int *channels, int passthrough) void AudioPlay(void) { if (!AudioPaused) { - Debug(3, "audio: not paused, check the code\n"); - return; + Debug(3, "audio: not paused, check the code\n"); + return; } Debug(3, "audio: resumed\n"); AudioPaused = 0; @@ -2690,8 +2698,8 @@ void AudioPlay(void) void AudioPause(void) { if (AudioPaused) { - Debug(3, "audio: already paused, check the code\n"); - return; + Debug(3, "audio: already paused, check the code\n"); + return; } Debug(3, "audio: paused\n"); AudioPaused = 1; @@ -3008,7 +3016,7 @@ void AudioInit(void) } #ifdef USE_AUDIO_THREAD if (AudioUsedModule->Thread) { // supports threads - AudioInitThread(); + AudioInitThread(); } #endif AudioDoingInit = 0; @@ -3025,7 +3033,7 @@ void AudioExit(void) #ifdef USE_AUDIO_THREAD if (AudioUsedModule->Thread) { // supports threads - AudioExitThread(); + AudioExitThread(); } #endif module = AudioUsedModule; @@ -3056,7 +3064,7 @@ void AudioTest(void) Debug(3, "audio/test: loop\n"); for (i = 0; i < 100; ++i) { while (RingBufferFreeBytes(AlsaRingBuffer) > sizeof(buffer)) { - AlsaEnqueue(buffer, sizeof(buffer)); + AlsaEnqueue(buffer, sizeof(buffer)); } usleep(20 * 1000); } @@ -3140,30 +3148,30 @@ int main(int argc, char *const argv[]) break; } if (optind < argc) { - PrintVersion(); - while (optind < argc) { - fprintf(stderr, "Unhandled argument '%s'\n", argv[optind++]); - } - return -1; + PrintVersion(); + while (optind < argc) { + fprintf(stderr, "Unhandled argument '%s'\n", argv[optind++]); + } + return -1; } // // main loop // AudioInit(); for (;;) { - unsigned u; - uint8_t buffer[16 * 1024]; // some random data + unsigned u; + uint8_t buffer[16 * 1024]; // some random data - for (u = 0; u < sizeof(buffer); u++) { - buffer[u] = random() & 0xffff; - } + for (u = 0; u < sizeof(buffer); u++) { + buffer[u] = random() & 0xffff; + } - Debug(3, "audio/test: loop\n"); - for (;;) { - while (RingBufferFreeBytes(AlsaRingBuffer) > sizeof(buffer)) { - AlsaEnqueue(buffer, sizeof(buffer)); - } - } + Debug(3, "audio/test: loop\n"); + for (;;) { + while (RingBufferFreeBytes(AlsaRingBuffer) > sizeof(buffer)) { + AlsaEnqueue(buffer, sizeof(buffer)); + } + } } AudioExit(); diff --git a/codec.c b/codec.c index 2a170d5..6e25809 100644 --- a/codec.c +++ b/codec.c @@ -315,7 +315,7 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) decoder->VideoCtx->pkt_timebase.den = 90000; decoder->VideoCtx->framerate.num = 50; decoder->VideoCtx->framerate.den = 1; - decoder->VideoCtx->extra_hw_frames = 8; // VIDEO_SURFACES_MAX +1 + pthread_mutex_lock(&CodecLockMutex); // open codec @@ -323,6 +323,7 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) deint = 2; #endif #ifdef VAAPI + decoder->VideoCtx->extra_hw_frames = 8; // VIDEO_SURFACES_MAX +1 if (video_codec->capabilities & (AV_CODEC_CAP_AUTO_THREADS)) { Debug(3,"codec: auto threads enabled"); decoder->VideoCtx->thread_count = 0; @@ -1220,23 +1221,23 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) delay = AudioGetDelay(); if (!delay) { - return; + return; } clock_gettime(CLOCK_MONOTONIC, &nowtime); if (!audio_decoder->LastDelay) { - audio_decoder->LastTime = nowtime; - audio_decoder->LastPTS = pts; - audio_decoder->LastDelay = delay; - audio_decoder->Drift = 0; - audio_decoder->DriftFrac = 0; - Debug(3, "codec/audio: inital drift delay %" PRId64 "ms\n", - delay / 90); - return; + audio_decoder->LastTime = nowtime; + audio_decoder->LastPTS = pts; + audio_decoder->LastDelay = delay; + audio_decoder->Drift = 0; + audio_decoder->DriftFrac = 0; + Debug(3, "codec/audio: inital drift delay %" PRId64 "ms\n", + delay / 90); + return; } // collect over some time pts_diff = pts - audio_decoder->LastPTS; if (pts_diff < 10 * 1000 * 90) { - return; + return; } tim_diff = (nowtime.tv_sec - audio_decoder->LastTime.tv_sec) @@ -1262,51 +1263,51 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) // underruns and av_resample have the same time :((( if (abs(drift) > 10 * 90) { // drift too big, pts changed? - Debug(3, "codec/audio: drift(%6d) %3dms reset\n", - audio_decoder->DriftCorr, drift / 90); - audio_decoder->LastDelay = 0; + Debug(3, "codec/audio: drift(%6d) %3dms reset\n", + audio_decoder->DriftCorr, drift / 90); + audio_decoder->LastDelay = 0; #ifdef DEBUG - corr = 0; // keep gcc happy + corr = 0; // keep gcc happy #endif } else { - drift += audio_decoder->Drift; - audio_decoder->Drift = drift; - corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000); - // SPDIF/HDMI passthrough - if ((CodecAudioDrift & CORRECT_AC3) && (!(CodecPassthrough & CodecAC3) - || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3) - && (!(CodecPassthrough & CodecEAC3) - || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) { - audio_decoder->DriftCorr = -corr; - } + drift += audio_decoder->Drift; + audio_decoder->Drift = drift; + corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000); + // SPDIF/HDMI passthrough + if ((CodecAudioDrift & CORRECT_AC3) && (!(CodecPassthrough & CodecAC3) + || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3) + && (!(CodecPassthrough & CodecEAC3) + || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) { + audio_decoder->DriftCorr = -corr; + } - if (audio_decoder->DriftCorr < -20000) { // limit correction - audio_decoder->DriftCorr = -20000; - } else if (audio_decoder->DriftCorr > 20000) { - audio_decoder->DriftCorr = 20000; - } + if (audio_decoder->DriftCorr < -20000) { // limit correction + audio_decoder->DriftCorr = -20000; + } else if (audio_decoder->DriftCorr > 20000) { + audio_decoder->DriftCorr = 20000; + } } // FIXME: this works with libav 0.8, and only with >10ms with ffmpeg 0.10 if (audio_decoder->AvResample && audio_decoder->DriftCorr) { - int distance; + int distance; - // try workaround for buggy ffmpeg 0.10 - if (abs(audio_decoder->DriftCorr) < 2000) { - distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); - } else { - distance = (pts_diff * audio_decoder->HwSampleRate) / (90 * 1000); - } - av_resample_compensate(audio_decoder->AvResample, - audio_decoder->DriftCorr / 10, distance); + // try workaround for buggy ffmpeg 0.10 + if (abs(audio_decoder->DriftCorr) < 2000) { + distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); + } else { + distance = (pts_diff * audio_decoder->HwSampleRate) / (90 * 1000); + } + av_resample_compensate(audio_decoder->AvResample, + audio_decoder->DriftCorr / 10, distance); } if (1) { - static int c; + static int c; - if (!(c++ % 10)) { - Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", - audio_decoder->DriftCorr, drift * 1000 / 90, corr); - } + if (!(c++ % 10)) { + Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", + audio_decoder->DriftCorr, drift * 1000 / 90, corr); + } } } @@ -1324,64 +1325,64 @@ static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder) int err; if (audio_decoder->ReSample) { - audio_resample_close(audio_decoder->ReSample); - audio_decoder->ReSample = NULL; + audio_resample_close(audio_decoder->ReSample); + audio_decoder->ReSample = NULL; } if (audio_decoder->AvResample) { - av_resample_close(audio_decoder->AvResample); - audio_decoder->AvResample = NULL; - audio_decoder->RemainCount = 0; + av_resample_close(audio_decoder->AvResample); + audio_decoder->AvResample = NULL; + audio_decoder->RemainCount = 0; } audio_ctx = audio_decoder->AudioCtx; if ((err = CodecAudioUpdateHelper(audio_decoder, &passthrough))) { - Debug(3, "codec/audio: resample %dHz *%d -> %dHz *%d err %d\n", - audio_ctx->sample_rate, audio_ctx->channels, - audio_decoder->HwSampleRate, audio_decoder->HwChannels,err); + Debug(3, "codec/audio: resample %dHz *%d -> %dHz *%d err %d\n", + audio_ctx->sample_rate, audio_ctx->channels, + audio_decoder->HwSampleRate, audio_decoder->HwChannels,err); - if (err == 1) { - audio_decoder->ReSample = - av_audio_resample_init(audio_decoder->HwChannels, - audio_ctx->channels, audio_decoder->HwSampleRate, - audio_ctx->sample_rate, audio_ctx->sample_fmt, - audio_ctx->sample_fmt, 16, 10, 0, 0.8); - // libav-0.8_pre didn't support 6 -> 2 channels - if (!audio_decoder->ReSample) { - Error(_("codec/audio: resample setup error\n")); + if (err == 1) { + audio_decoder->ReSample = + av_audio_resample_init(audio_decoder->HwChannels, + audio_ctx->channels, audio_decoder->HwSampleRate, + audio_ctx->sample_rate, audio_ctx->sample_fmt, + audio_ctx->sample_fmt, 16, 10, 0, 0.8); + // libav-0.8_pre didn't support 6 -> 2 channels + if (!audio_decoder->ReSample) { + Error(_("codec/audio: resample setup error\n")); + audio_decoder->HwChannels = 0; + audio_decoder->HwSampleRate = 0; + } + return; + } + Debug(3, "codec/audio: audio setup error\n"); + // FIXME: handle errors audio_decoder->HwChannels = 0; audio_decoder->HwSampleRate = 0; - } - return; - } - Debug(3, "codec/audio: audio setup error\n"); - // FIXME: handle errors - audio_decoder->HwChannels = 0; - audio_decoder->HwSampleRate = 0; - return; + return; } if (passthrough) { // pass-through no conversion allowed - return; + return; } // prepare audio drift resample #ifdef USE_AUDIO_DRIFT_CORRECTION if (CodecAudioDrift & CORRECT_PCM) { - if (audio_decoder->AvResample) { - Error(_("codec/audio: overwrite resample\n")); - } - audio_decoder->AvResample = - av_resample_init(audio_decoder->HwSampleRate, - audio_decoder->HwSampleRate, 16, 10, 0, 0.8); - if (!audio_decoder->AvResample) { - Error(_("codec/audio: AvResample setup error\n")); - } else { - // reset drift to some default value - audio_decoder->DriftCorr /= 2; - audio_decoder->DriftFrac = 0; - av_resample_compensate(audio_decoder->AvResample, - audio_decoder->DriftCorr / 10, - 10 * audio_decoder->HwSampleRate); - } + if (audio_decoder->AvResample) { + Error(_("codec/audio: overwrite resample\n")); + } + audio_decoder->AvResample = + av_resample_init(audio_decoder->HwSampleRate, + audio_decoder->HwSampleRate, 16, 10, 0, 0.8); + if (!audio_decoder->AvResample) { + Error(_("codec/audio: AvResample setup error\n")); + } else { + // reset drift to some default value + audio_decoder->DriftCorr /= 2; + audio_decoder->DriftFrac = 0; + av_resample_compensate(audio_decoder->AvResample, + audio_decoder->DriftCorr / 10, + 10 * audio_decoder->HwSampleRate); + } } #endif } @@ -1718,22 +1719,22 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) #endif #ifdef USE_AVRESAMPLE if (audio_decoder->Resample && audio_decoder->DriftCorr) { - int distance; + int distance; - distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); - if (avresample_set_compensation(audio_decoder->Resample, - audio_decoder->DriftCorr / 10, distance)) { - Debug(3, "codec/audio: swr_set_compensation failed\n"); - } + distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); + if (avresample_set_compensation(audio_decoder->Resample, + audio_decoder->DriftCorr / 10, distance)) { + Debug(3, "codec/audio: swr_set_compensation failed\n"); + } } #endif if (1) { - static int c; + static int c; - if (!(c++ % 10)) { - Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", - audio_decoder->DriftCorr, drift * 1000 / 90, corr); - } + if (!(c++ % 10)) { + Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", + audio_decoder->DriftCorr, drift * 1000 / 90, corr); + } } #else AudioSetClock(pts); @@ -1825,55 +1826,56 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) AVCodecContext *audio_ctx = audio_decoder->AudioCtx; if (audio_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { - int ret; - AVPacket pkt[1]; - AVFrame *frame = audio_decoder->Frame; + int ret; + AVPacket pkt[1]; + AVFrame *frame = audio_decoder->Frame; - av_frame_unref(frame); - *pkt = *avpkt; // use copy - ret = avcodec_send_packet(audio_ctx, pkt); - if (ret < 0) { - Debug(3, "codec: sending audio packet failed"); - return; - } - ret = avcodec_receive_frame(audio_ctx, frame); - if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { - Debug(3, "codec: receiving audio frame failed"); - return; - } - if (ret >= 0) { - // update audio clock - if (avpkt->pts != (int64_t) AV_NOPTS_VALUE) { - CodecAudioSetClock(audio_decoder, avpkt->pts); - } - // format change - if (audio_decoder->Passthrough != CodecPassthrough || audio_decoder->SampleRate != audio_ctx->sample_rate - || audio_decoder->Channels != audio_ctx->channels) { - CodecAudioUpdateFormat(audio_decoder); - } - if (!audio_decoder->HwSampleRate || !audio_decoder->HwChannels) { - return; // unsupported sample format - } - if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) { - return; - } - if (audio_decoder->Resample) { - uint8_t outbuf[8192 * 2 * 8]; - uint8_t *out[1]; - - out[0] = outbuf; - ret = swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels), - (const uint8_t **)frame->extended_data, frame->nb_samples); - if (ret > 0) { - if (!(audio_decoder->Passthrough & CodecPCM)) { - CodecReorderAudioFrame((int16_t *) outbuf, ret * 2 * audio_decoder->HwChannels, - audio_decoder->HwChannels); - } - AudioEnqueue(outbuf, ret * 2 * audio_decoder->HwChannels); + av_frame_unref(frame); + *pkt = *avpkt; // use copy + ret = avcodec_send_packet(audio_ctx, pkt); + if (ret < 0) { + Debug(3, "codec: sending audio packet failed"); + return; + } + ret = avcodec_receive_frame(audio_ctx, frame); + if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { + Debug(3, "codec: receiving audio frame failed"); + return; + } + + if (ret >= 0) { + // update audio clock + if (avpkt->pts != (int64_t) AV_NOPTS_VALUE) { + CodecAudioSetClock(audio_decoder, avpkt->pts); + } + // format change + if (audio_decoder->Passthrough != CodecPassthrough || audio_decoder->SampleRate != audio_ctx->sample_rate + || audio_decoder->Channels != audio_ctx->channels) { + CodecAudioUpdateFormat(audio_decoder); + } + if (!audio_decoder->HwSampleRate || !audio_decoder->HwChannels) { + return; // unsupported sample format + } + if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) { + return; + } + if (audio_decoder->Resample) { + uint8_t outbuf[8192 * 2 * 8]; + uint8_t *out[1]; + + out[0] = outbuf; + ret = swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels), + (const uint8_t **)frame->extended_data, frame->nb_samples); + if (ret > 0) { + if (!(audio_decoder->Passthrough & CodecPCM)) { + CodecReorderAudioFrame((int16_t *) outbuf, ret * 2 * audio_decoder->HwChannels, + audio_decoder->HwChannels); + } + AudioEnqueue(outbuf, ret * 2 * audio_decoder->HwChannels); + } + return; + } } - return; - } - } } } diff --git a/openglosd.cpp b/openglosd.cpp index ee4f848..75c63b5 100644 --- a/openglosd.cpp +++ b/openglosd.cpp @@ -14,6 +14,10 @@ void ConvertColor(const GLint &colARGB, glm::vec4 &col) { col.b = ((colARGB & 0x000000FF) ) / 255.0; } +extern "C" void OSD_get_context(); +extern "C" void OSD_get_shared_context(); +extern "C" void OSD_release_context(); + /**************************************************************************************** * cShader ****************************************************************************************/ @@ -119,7 +123,7 @@ void main() \ #else const char *rectVertexShader = -"\n\ +"\n \ \ layout (location = 0) in vec2 position; \ out vec4 rectCol; \ @@ -134,8 +138,9 @@ void main() \ "; const char *rectFragmentShader = -"\n\ +"\n \ \ +precision mediump float; \ in vec4 rectCol; \ out vec4 color; \ \ @@ -146,7 +151,7 @@ void main() \ "; const char *textureVertexShader = -"\n\ +"\n \ \ layout (location = 0) in vec2 position; \ layout (location = 1) in vec2 texCoords; \ @@ -166,7 +171,8 @@ void main() \ "; const char *textureFragmentShader = -"\n\ +"\n \ +precision mediump float; \ in vec2 TexCoords; \ in vec4 alphaValue; \ out vec4 color; \ @@ -180,7 +186,7 @@ void main() \ "; const char *textVertexShader = -"\n\ +"\n \ \ layout (location = 0) in vec2 position; \ layout (location = 1) in vec2 texCoords; \ @@ -200,7 +206,8 @@ void main() \ "; const char *textFragmentShader = -"\n\ +"\n \ +precision mediump float; \ in vec2 TexCoords; \ in vec4 textColor; \ \ @@ -287,12 +294,14 @@ bool cShader::Compile(const char *vertexCode, const char *fragmentCode) { sVertex = glCreateShader(GL_VERTEX_SHADER); glShaderSource(sVertex, 1, &vertexCode, NULL); glCompileShader(sVertex); +// esyslog("[softhddev]:SHADER:VERTEX %s\n",vertexCode); if (!CheckCompileErrors(sVertex)) return false; // Fragment Shader sFragment = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(sFragment, 1, &fragmentCode, NULL); glCompileShader(sFragment); +// esyslog("[softhddev]:SHADER:FRAGMENT %s\n",fragmentCode); if (!CheckCompileErrors(sFragment)) return false; // link Program @@ -315,14 +324,14 @@ bool cShader::CheckCompileErrors(GLuint object, bool program) { glGetShaderiv(object, GL_COMPILE_STATUS, &success); if (!success) { glGetShaderInfoLog(object, 1024, NULL, infoLog); - esyslog("[softhddev]:SHADER: Compile-time error: Type: %d - %s", type, infoLog); + esyslog("[softhddev]:SHADER: Compile-time error: Type: %d - \n%s\n", type, infoLog); return false; } } else { glGetProgramiv(object, GL_LINK_STATUS, &success); if (!success) { glGetProgramInfoLog(object, 1024, NULL, infoLog); - esyslog("[softhddev]:SHADER: Link-time error: Type: %d", type); + esyslog("[softhddev]:SHADER: Link-time error: Type: %d - \n%s\n", type, infoLog); return false; } } @@ -365,6 +374,10 @@ void cOglGlyph::BindTexture(void) { void cOglGlyph::LoadTexture(FT_BitmapGlyph ftGlyph) { // Disable byte-alignment restriction +#ifdef VAAPI + OSD_release_context(); + OSD_get_shared_context(); +#endif glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); @@ -386,6 +399,11 @@ void cOglGlyph::LoadTexture(FT_BitmapGlyph ftGlyph) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glBindTexture(GL_TEXTURE_2D, 0); glPixelStorei(GL_UNPACK_ALIGNMENT, 4); +#ifdef VAAPI + OSD_release_context(); + OSD_get_context(); +#endif + } @@ -550,6 +568,10 @@ cOglFb::~cOglFb(void) { bool cOglFb::Init(void) { initiated = true; +#ifdef VAAPI + OSD_release_context(); + OSD_get_shared_context(); +#endif glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); @@ -562,10 +584,19 @@ bool cOglFb::Init(void) { glBindFramebuffer(GL_FRAMEBUFFER, fb); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0); + if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { - esyslog("[softhddev]ERROR: Framebuffer is not complete!\n"); + esyslog("[softhddev]ERROR: %d Framebuffer is not complete!\n",__LINE__); +#ifdef VAAPI + OSD_release_context(); + OSD_get_context(); +#endif return false; } +#ifdef VAAPI + OSD_release_context(); + OSD_get_context(); +#endif return true; } @@ -619,6 +650,7 @@ cOglOutputFb::~cOglOutputFb(void) { bool cOglOutputFb::Init(void) { initiated = true; + glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); @@ -634,19 +666,16 @@ bool cOglOutputFb::Init(void) { esyslog("[softhddev]ERROR::cOglOutputFb: Framebuffer is not complete!"); return false; } - return true; } void cOglOutputFb::BindWrite(void) { -// glVDPAUMapSurfacesNV(1, &surface); if (!initiated) Init(); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fb); } void cOglOutputFb::Unbind(void) { -// glVDPAUUnmapSurfacesNV(1, &surface); glBindFramebuffer(GL_FRAMEBUFFER, 0); } @@ -882,6 +911,8 @@ extern unsigned char *posd; //} #endif + + bool cOglCmdCopyBufferToOutputFb::Execute(void) { int i; pthread_mutex_lock(&OSDMutex); @@ -896,6 +927,7 @@ bool cOglCmdCopyBufferToOutputFb::Execute(void) { glReadPixels(0, 0 ,fb->Width(), fb->Height(),GL_BGRA,GL_UNSIGNED_BYTE,posd); #else fb->Blit(x, y + fb->Height(), x + fb->Width(), y); + glFlush(); #endif ActivateOsd(oFb->texture,x, y, fb->Width() ,fb->Height()); @@ -1323,6 +1355,10 @@ cOglCmdDrawImage::~cOglCmdDrawImage(void) { bool cOglCmdDrawImage::Execute(void) { GLuint texture; +#ifdef VAAPI + OSD_release_context(); + OSD_get_shared_context(); +#endif glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); glTexImage2D( @@ -1341,7 +1377,11 @@ bool cOglCmdDrawImage::Execute(void) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glBindTexture(GL_TEXTURE_2D, 0); - +#ifdef VAAPI + OSD_release_context(); + OSD_get_context(); +#endif + GLfloat x1 = x; //left GLfloat y1 = y; //top GLfloat x2 = x + width; //right @@ -1430,6 +1470,10 @@ cOglCmdStoreImage::~cOglCmdStoreImage(void) { } bool cOglCmdStoreImage::Execute(void) { +#ifdef VAAPI + OSD_release_context(); + OSD_get_shared_context(); +#endif glGenTextures(1, &imageRef->texture); glBindTexture(GL_TEXTURE_2D, imageRef->texture); glTexImage2D( @@ -1448,6 +1492,10 @@ bool cOglCmdStoreImage::Execute(void) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glBindTexture(GL_TEXTURE_2D, 0); +#ifdef VAAPI + OSD_release_context(); + OSD_get_context(); +#endif return true; } @@ -2041,11 +2089,17 @@ cOglOsd::cOglOsd(int Left, int Top, uint Level, std::shared_ptr oglT posd = MALLOC(unsigned char, osdWidth * osdHeight * 4); #endif //create output framebuffer +#ifdef VAAPI + OSD_release_context(); + OSD_get_shared_context(); +#endif if (!oFb) { oFb = new cOglOutputFb(osdWidth, osdHeight); oglThread->DoCmd(new cOglCmdInitOutputFb(oFb)); } - +#ifdef VAAPI + OSD_release_context(); +#endif } cOglOsd::~cOglOsd() { diff --git a/shaders.h b/shaders.h index bf2e26f..83afc03 100644 --- a/shaders.h +++ b/shaders.h @@ -1,54 +1,54 @@ // shader +#ifdef CUVID +char vertex_osd[] = {"\ +#version 330\n\ +in vec2 vertex_position;\n\ +in vec2 vertex_texcoord0;\n\ +out vec2 texcoord0;\n\ +void main() {\n\ +gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ +texcoord0 = vertex_texcoord0;\n\ +}\n"}; + +char fragment_osd[] = {"\ +#version 330\n\ +#define texture1D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +uniform sampler2D texture0;\n\ +void main() {\n\ +vec4 color; \n\ +color = vec4(texture(texture0, texcoord0));\n\ +out_color = color;\n\ +}\n"}; char vertex[] = {"\ -#version 330\n\ +#version 310 es\n\ in vec2 vertex_position;\n\ in vec2 vertex_texcoord0;\n\ out vec2 texcoord0;\n\ in vec2 vertex_texcoord1;\n\ out vec2 texcoord1;\n\ -in vec2 vertex_texcoord2;\n\ -out vec2 texcoord2;\n\ -in vec2 vertex_texcoord3;\n\ -out vec2 texcoord3;\n\ -in vec2 vertex_texcoord4;\n\ -out vec2 texcoord4;\n\ -in vec2 vertex_texcoord5;\n\ -out vec2 texcoord5;\n\ void main() {\n\ gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ texcoord0 = vertex_texcoord0;\n\ texcoord1 = vertex_texcoord1;\n\ -texcoord2 = vertex_texcoord2;\n\ -texcoord3 = vertex_texcoord3;\n\ -texcoord4 = vertex_texcoord4;\n\ -texcoord5 = vertex_texcoord5;\n\ }\n"}; - char fragment[] = {"\ -#version 330\n\ +#version 310 es\n\ #define texture1D texture\n\ #define texture3D texture\n\ +precision mediump float; \ layout(location = 0) out vec4 out_color;\n\ in vec2 texcoord0;\n\ in vec2 texcoord1;\n\ -in vec2 texcoord2;\n\ -in vec2 texcoord3;\n\ -in vec2 texcoord4;\n\ -in vec2 texcoord5;\n\ uniform mat3 colormatrix;\n\ uniform vec3 colormatrix_c;\n\ uniform sampler2D texture0;\n\ -//uniform vec2 texture_size0;\n\ -//uniform mat2 texture_rot0;\n\ -//uniform vec2 pixel_size0;\n\ uniform sampler2D texture1;\n\ -//uniform vec2 texture_size1;\n\ -//uniform mat2 texture_rot1;\n\ -//uniform vec2 pixel_size1;\n\ -//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ void main() {\n\ vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\ @@ -61,27 +61,18 @@ out_color = color;\n\ }\n"}; char fragment_bt2100[] = {"\ -#version 330\n\ +#version 310 es\n \ #define texture1D texture\n\ #define texture3D texture\n\ +precision mediump float; \ layout(location = 0) out vec4 out_color;\n\ in vec2 texcoord0;\n\ in vec2 texcoord1;\n\ -in vec2 texcoord2;\n\ -in vec2 texcoord3;\n\ -in vec2 texcoord4;\n\ -in vec2 texcoord5;\n\ uniform mat3 colormatrix;\n\ uniform vec3 colormatrix_c;\n\ uniform mat3 cms_matrix;\n\ uniform sampler2D texture0;\n\ -//uniform vec2 texture_size0;\n\ -//uniform mat2 texture_rot0;\n\ -//uniform vec2 pixel_size0;\n\ uniform sampler2D texture1;\n\ -//uniform vec2 texture_size1;\n\ -//uniform mat2 texture_rot1;\n\ -//uniform vec2 pixel_size1;\n\ //#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ void main() {\n\ vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ @@ -99,6 +90,98 @@ color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\ out_color = color;\n\ }\n"}; + +#else +char vertex_osd[] = {"\ +\n\ +in vec2 vertex_position;\n\ +in vec2 vertex_texcoord0;\n\ +out vec2 texcoord0;\n\ +void main() {\n\ +gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ +texcoord0 = vertex_texcoord0;\n\ +}\n"}; + +char fragment_osd[] = {"\ +\n\ +#define texture1D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +uniform sampler2D texture0;\n\ +void main() {\n\ +vec4 color; \n\ +color = vec4(texture(texture0, texcoord0));\n\ +out_color = color;\n\ +}\n"}; + +char vertex[] = {"\ +\n\ +in vec2 vertex_position;\n\ +in vec2 vertex_texcoord0;\n\ +out vec2 texcoord0;\n\ +in vec2 vertex_texcoord1;\n\ +out vec2 texcoord1;\n\ +void main() {\n\ +gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ +texcoord0 = vertex_texcoord0;\n\ +texcoord1 = vertex_texcoord1;\n\ +}\n"}; + +char fragment[] = {"\ +\n\ +#define texture1D texture\n\ +#define texture3D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +in vec2 texcoord1;\n\ +uniform mat3 colormatrix;\n\ +uniform vec3 colormatrix_c;\n\ +uniform sampler2D texture0;\n\ +uniform sampler2D texture1;\n\ +//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ +void main() {\n\ +vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ +color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\ +color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;\n\ +// color conversion\n\ +color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ +color.a = 1.0;\n\ +// color mapping\n\ +out_color = color;\n\ +}\n"}; + +char fragment_bt2100[] = {"\ +\n \ +#define texture1D texture\n\ +#define texture3D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +in vec2 texcoord1;\n\ +uniform mat3 colormatrix;\n\ +uniform vec3 colormatrix_c;\n\ +uniform mat3 cms_matrix;\n\ +uniform sampler2D texture0;\n\ +uniform sampler2D texture1;\n\ +//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ +void main() {\n\ +vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ +color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;\n\ +color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n\ +// color conversion\n\ +color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ +color.a = 1.0;\n\ +// color mapping\n\ +color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ +color.rgb = pow(color.rgb, vec3(2.4));\n\ +color.rgb = cms_matrix * color.rgb;\n\ +color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ +color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\ +out_color = color;\n\ +}\n"}; +#endif /* Color conversion matrix: RGB = m * YUV + c * m is in row-major matrix, with m[row][col], e.g.: * [ a11 a12 a13 ] float m[3][3] = { { a11, a12, a13 }, @@ -113,12 +196,12 @@ out_color = color;\n\ * The matrix might also be used for other conversions and colorspaces. */ struct mp_cmat { - float m[3][3]; // colormatrix - float c[3]; //colormatrix_c + GLfloat m[3][3]; // colormatrix + GLfloat c[3]; //colormatrix_c }; struct mp_mat { - float m[3][3]; + GLfloat m[3][3]; }; // YUV input limited range (16-235 for luma, 16-240 for chroma) @@ -185,10 +268,6 @@ static const struct gl_vao_entry vertex_vao[] = { {"position", 2, GL_FLOAT, false, offsetof(struct vertex, position)}, {"texcoord0", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[0])}, {"texcoord1", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[1])}, - {"texcoord2", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[2])}, - {"texcoord3", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[3])}, - {"texcoord4", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[4])}, - {"texcoord5", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[5])}, {0} }; @@ -198,7 +277,8 @@ static void compile_attach_shader(GLuint program, { GLuint shader; GLint status, log_length; - + char log[4000]; + GLsizei len; shader = glCreateShader(type); glShaderSource(shader, 1, &source, NULL); glCompileShader(shader); @@ -206,9 +286,10 @@ static void compile_attach_shader(GLuint program, glGetShaderiv(shader, GL_COMPILE_STATUS, &status); log_length = 0; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length); -Debug(3,"compile Status %d loglen %d\n",status,log_length); + glGetShaderInfoLog(shader,4000,&len,log); + GlxCheck(); +Debug(3,"compile Status %d loglen %d >%s<\n",status,log_length,log); - GlxCheck(); glAttachShader(program, shader); glDeleteShader(shader); } @@ -223,10 +304,24 @@ static void link_shader(GLuint program) log_length = 0; glGetProgramiv(program, GL_INFO_LOG_LENGTH, &log_length); Debug(3,"Link Status %d loglen %d\n",status,log_length); - - } +static GLuint sc_generate_osd(GLuint gl_prog) { + + Debug(3,"vor create osd\n"); + gl_prog = glCreateProgram(); + Debug(3,"vor compile vertex osd\n"); + compile_attach_shader(gl_prog, GL_VERTEX_SHADER, vertex_osd); + Debug(3,"vor compile fragment osd \n"); + compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, fragment_osd); + glBindAttribLocation(gl_prog,0,"vertex_position"); + glBindAttribLocation(gl_prog,1,"vertex_texcoord0"); + + link_shader(gl_prog); + return gl_prog; +} + + static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) { char vname[80]; @@ -284,7 +379,8 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) { if (gl_colormatrix != -1) glProgramUniformMatrix3fv(gl_prog,gl_colormatrix,1,0,m); GlxCheck(); - //glProgramUniform3fv(gl_prog,gl_colormatrix,3,&yuv_bt709.m[0][0]); + Debug(3,"nach set colormatrix\n"); + gl_colormatrix_c = glGetUniformLocation(gl_prog,"colormatrix_c"); Debug(3,"get uniform colormatrix_c %d %f\n",gl_colormatrix_c,*c); if (gl_colormatrix_c != -1) diff --git a/softhddev.c b/softhddev.c index 1a91fa6..fbe44e0 100644 --- a/softhddev.c +++ b/softhddev.c @@ -107,6 +107,7 @@ static VideoStream *AudioSyncStream; ///< video stream for audio/video sync #define AUDIO_MIN_BUFFER_FREE (3072 * 8 * 8) #define AUDIO_BUFFER_SIZE (512 * 1024) ///< audio PES buffer default size static AVPacket AudioAvPkt[1]; ///< audio a/v packet +int AudioDelay = 0; ////////////////////////////////////////////////////////////////////////////// // Audio codec parser @@ -579,7 +580,7 @@ static void PesInit(PesDemux * pesdx) pesdx->Size = PES_MAX_PAYLOAD; pesdx->Buffer = av_malloc(PES_MAX_PAYLOAD + AV_INPUT_BUFFER_PADDING_SIZE); if (!pesdx->Buffer) { - Fatal(_("pesdemux: out of memory\n")); + Fatal(_("pesdemux: out of memory\n")); } PesReset(pesdx); } @@ -672,57 +673,56 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, // FIXME: simple+faster detection, if codec already known r = 0; if (!r && FastMpegCheck(q)) { - r = MpegCheck(q, n); - codec_id = AV_CODEC_ID_MP2; + r = MpegCheck(q, n); + codec_id = AV_CODEC_ID_MP2; } if (!r && FastAc3Check(q)) { - r = Ac3Check(q, n); - codec_id = AV_CODEC_ID_AC3; - if (r > 0 && q[5] > (10 << 3)) { - codec_id = AV_CODEC_ID_EAC3; - } + r = Ac3Check(q, n); + codec_id = AV_CODEC_ID_AC3; + if (r > 0 && q[5] > (10 << 3)) { + codec_id = AV_CODEC_ID_EAC3; + } } if (!r && FastLatmCheck(q)) { - r = LatmCheck(q, n); - codec_id = AV_CODEC_ID_AAC_LATM; + r = LatmCheck(q, n); + codec_id = AV_CODEC_ID_AAC_LATM; } if (!r && FastAdtsCheck(q)) { - r = AdtsCheck(q, n); - codec_id = AV_CODEC_ID_AAC; + r = AdtsCheck(q, n); + codec_id = AV_CODEC_ID_AAC; } if (r < 0) { // need more bytes - break; + break; } if (r > 0) { - AVPacket avpkt[1]; + AVPacket avpkt[1]; - // new codec id, close and open new - if (AudioCodecID != codec_id) { - Debug(3, "pesdemux: new codec %#06x -> %#06x\n", - AudioCodecID, codec_id); - CodecAudioClose(MyAudioDecoder); - CodecAudioOpen(MyAudioDecoder, codec_id); - AudioCodecID = codec_id; - } - av_init_packet(avpkt); - avpkt->data = (void *)q; - avpkt->size = r; - avpkt->pts = pesdx->PTS; - avpkt->dts = pesdx->DTS; - // FIXME: not aligned for ffmpeg - CodecAudioDecode(MyAudioDecoder, avpkt); - pesdx->PTS = AV_NOPTS_VALUE; - pesdx->DTS = AV_NOPTS_VALUE; - pesdx->Skip += r; - // FIXME: switch to decoder state - //pesdx->State = PES_MPEG_DECODE; - break; + // new codec id, close and open new + if (AudioCodecID != codec_id) { + Debug(3, "pesdemux: new codec %#06x -> %#06x\n", + AudioCodecID, codec_id); + CodecAudioClose(MyAudioDecoder); + CodecAudioOpen(MyAudioDecoder, codec_id); + AudioCodecID = codec_id; + } + av_init_packet(avpkt); + avpkt->data = (void *)q; + avpkt->size = r; + avpkt->pts = pesdx->PTS; + avpkt->dts = pesdx->DTS; + // FIXME: not aligned for ffmpeg + CodecAudioDecode(MyAudioDecoder, avpkt); + pesdx->PTS = AV_NOPTS_VALUE; + pesdx->DTS = AV_NOPTS_VALUE; + pesdx->Skip += r; + // FIXME: switch to decoder state + //pesdx->State = PES_MPEG_DECODE; + break; } if (AudioCodecID != AV_CODEC_ID_NONE) { - // shouldn't happen after we have a vaild codec - // detected - Debug(4, "pesdemux: skip @%d %02x\n", pesdx->Skip, - q[0]); + // shouldn't happen after we have a vaild codec + // detected + Debug(4, "pesdemux: skip @%d %02x\n", pesdx->Skip,q[0]); } // try next byte ++pesdx->Skip; @@ -1024,9 +1024,15 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) if (SkipAudio || !MyAudioDecoder) { // skip audio return size; } - if (StreamFreezed) { // stream freezed + if (StreamFreezed ) { // stream freezed return 0; } + if (AudioDelay) { + Debug(3,"AudioDelay %dms\n",AudioDelay); + usleep(AudioDelay/90); + AudioDelay = 0; + return 0; + } if (NewAudioStream) { // this clears the audio ringbuffer indirect, open and setup does it CodecAudioClose(MyAudioDecoder); @@ -1205,9 +1211,9 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) // new codec id, close and open new if (AudioCodecID != codec_id) { - CodecAudioClose(MyAudioDecoder); - CodecAudioOpen(MyAudioDecoder, codec_id); - AudioCodecID = codec_id; + CodecAudioClose(MyAudioDecoder); + CodecAudioOpen(MyAudioDecoder, codec_id); + AudioCodecID = codec_id; } av_init_packet(avpkt); avpkt->data = (void *)p; @@ -1257,6 +1263,7 @@ int PlayTsAudio(const uint8_t * data, int size) if (StreamFreezed) { // stream freezed return 0; } + if (NewAudioStream) { // this clears the audio ringbuffer indirect, open and setup does it CodecAudioClose(MyAudioDecoder); @@ -1279,7 +1286,13 @@ int PlayTsAudio(const uint8_t * data, int size) return 0; } #endif - + if (AudioDelay) { + Debug(3,"AudioDelay %dms\n",AudioDelay); + usleep(AudioDelay*1000); + AudioDelay = 0; +// TsDemuxer(tsdx, data, size); // insert dummy audio + + } return TsDemuxer(tsdx, data, size); } @@ -2498,27 +2511,27 @@ int SetPlayMode(int play_mode) case 0: // audio/video from decoder // tell video parser we get new stream if (MyVideoStream->Decoder && !MyVideoStream->SkipStream) { - // clear buffers on close configured always or replay only - if (ConfigVideoClearOnSwitch || MyVideoStream->ClearClose) { - Clear(); // flush all buffers - MyVideoStream->ClearClose = 0; - } - if (MyVideoStream->CodecID != AV_CODEC_ID_NONE) { - MyVideoStream->NewStream = 1; - MyVideoStream->InvalidPesCounter = 0; - // tell hw decoder we are closing stream - VideoSetClosing(MyVideoStream->HwDecoder); - VideoResetStart(MyVideoStream->HwDecoder); + // clear buffers on close configured always or replay only + if (ConfigVideoClearOnSwitch || MyVideoStream->ClearClose) { + Clear(); // flush all buffers + MyVideoStream->ClearClose = 0; + } + if (MyVideoStream->CodecID != AV_CODEC_ID_NONE) { + MyVideoStream->NewStream = 1; + MyVideoStream->InvalidPesCounter = 0; + // tell hw decoder we are closing stream + VideoSetClosing(MyVideoStream->HwDecoder); + VideoResetStart(MyVideoStream->HwDecoder); #ifdef DEBUG - VideoSwitch = GetMsTicks(); - Debug(3, "video: new stream start\n"); + VideoSwitch = GetMsTicks(); + Debug(3, "video: new stream start\n"); #endif - } + } } if (MyAudioDecoder) { // tell audio parser we have new stream - if (AudioCodecID != AV_CODEC_ID_NONE) { - NewAudioStream = 1; - } + if (AudioCodecID != AV_CODEC_ID_NONE) { + NewAudioStream = 1; + } } break; case 1: // audio/video from player diff --git a/video.c b/video.c index 5c23b0e..63f1996 100644 --- a/video.c +++ b/video.c @@ -67,11 +67,7 @@ #include /* POSIX Terminal Control Definitions */ #include /* UNIX Standard Definitions */ #include /* ERROR Number Definitions */ -#include /* ioctl() */ -//#include -//#include -//#include -//#inclde +#include /* ioctl() */ #include #include @@ -105,15 +101,12 @@ #include #include -//#include -#ifdef xcb_USE_GLX -#include -#endif -//#include + #ifdef USE_SCREENSAVER #include #include #endif + //#include //#include @@ -142,10 +135,10 @@ typedef enum #ifdef USE_GLX #include -#include // For GL_COLOR_BUFFER_BIT +//#include // For GL_COLOR_BUFFER_BIT //#include // For GL_COLOR_BUFFER_BIT //#include -#include +//#include // only for gluErrorString #include #include @@ -153,36 +146,42 @@ typedef enum #endif #include +#include #ifdef CUVID -//#define CUDA_API_PER_THREAD_DEFAULT_STREAM -#include // For GL_COLOR_BUFFER_BIT +//#include // For GL_COLOR_BUFFER_BIT //#include // For GL_COLOR_BUFFER_BIT #include -//#include #include -//#include #include #include #include "drvapi_error_string.h" -// CUDA includes #define __DEVICE_TYPES_H__ #endif #ifdef VAAPI + #include #include #include #define TO_AVHW_DEVICE_CTX(x) ((AVHWDeviceContext*)x->data) #define TO_AVHW_FRAMES_CTX(x) ((AVHWFramesContext*)x->data) - #define TO_VAAPI_DEVICE_CTX(x) ((AVVAAPIDeviceContext*)TO_AVHW_DEVICE_CTX(x)->hwctx) #define TO_VAAPI_FRAMES_CTX(x) ((AVVAAPIFramesContext*)TO_AVHW_FRAMES_CTX(x)->hwctx) #endif +#include +//#define EGL_EGLEXT_PROTOTYPES +#include +#include +#ifndef GL_OES_EGL_image +typedef void* GLeglImageOES; +#endif +#ifndef EGL_KHR_image +typedef void *EGLImageKHR; +#endif #ifdef PLACEBO - #define VK_USE_PLATFORM_XCB_KHR #include #include @@ -194,21 +193,6 @@ typedef enum #include #include -// support old ffmpeg versions <1.0 -#if 0 //LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,18,102) -#define AVCodecID CodecID -#define AV_CODEC_ID_H263 CODEC_ID_H263 -#define AV_CODEC_ID_H264 CODEC_ID_H264 -#define AV_CODEC_ID_MPEG1VIDEO CODEC_ID_MPEG1VIDEO -#define AV_CODEC_ID_MPEG2VIDEO CODEC_ID_MPEG2VIDEO -#define AV_CODEC_ID_MPEG4 CODEC_ID_MPEG4 -#define AV_CODEC_ID_VC1 CODEC_ID_VC1 -#define AV_CODEC_ID_WMV3 CODEC_ID_WMV3 -#endif - -#include -#include - #if defined(YADIF) || defined (VAAPI) #include #include @@ -231,7 +215,7 @@ typedef enum #include "audio.h" #include "codec.h" - + //---------------------------------------------------------------------------- // Declarations @@ -350,11 +334,17 @@ typedef struct { // Defines //---------------------------------------------------------------------------- -#define CODEC_SURFACES_MAX 16 ///< maximal of surfaces +#define CODEC_SURFACES_MAX 12 // + #define VIDEO_SURFACES_MAX 6 ///< video output surfaces for queue //#define OUTPUT_SURFACES_MAX 4 ///< output surfaces for flip page - +#ifdef VAAPI +#define SWAP_BUFFER_SIZE 3 +#endif +#ifdef CUVID +#define SWAP_BUFFER_SIZE 1 +#endif //---------------------------------------------------------------------------- // Variables //---------------------------------------------------------------------------- @@ -455,6 +445,7 @@ static xcb_atom_t NetWmStateAbove; extern uint32_t VideoSwitch; ///< ticks for channel switch #endif extern void AudioVideoReady(int64_t); ///< tell audio video is ready +extern int AudioDelay; #ifdef USE_VIDEO_THREAD @@ -484,9 +475,10 @@ static int OsdDirtyX; ///< osd dirty area x static int OsdDirtyY; ///< osd dirty area y static int OsdDirtyWidth; ///< osd dirty area width static int OsdDirtyHeight; ///< osd dirty area height -#ifdef USE_OPENGLOSD + + static void (*VideoEventCallback)(void) = NULL; /// callback function to notify VDR about Video Events -#endif + static int64_t VideoDeltaPTS; ///< FIXME: fix pts #ifdef USE_SCREENSAVER @@ -494,21 +486,44 @@ static char DPMSDisabled; ///< flag we have disabled dpms static char EnableDPMSatBlackScreen; ///< flag we should enable dpms at black screen #endif -static int GlxEnabled; ///< use GLX +static int EglEnabled; ///< use EGL static int GlxVSyncEnabled = 1; ///< enable/disable v-sync -static GLXContext GlxSharedContext; ///< shared gl context -static GLXContext GlxContext; ///< our gl context -#ifdef USE_VIDEO_THREAD -static GLXContext GlxThreadContext; ///< our gl context for the thread +#ifdef CUVID + static GLXContext eglSharedContext; ///< shared gl context + static GLXContext eglContext; ///< our gl context + + #ifdef USE_VIDEO_THREAD + static GLXContext eglThreadContext; ///< our gl context for the thread + #endif + static XVisualInfo *GlxVisualInfo; ///< our gl visual + static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContext context); + GLXContext OSDcontext; +#else + static EGLContext eglSharedContext; ///< shared gl context + + static EGLContext eglContext; ///< our gl context + static EGLConfig eglConfig; + static EGLDisplay eglDisplay; + static EGLSurface eglSurface,eglOSDSurface; + static EGLint eglAttrs[10]; + static int eglVersion = 2; + static EGLImageKHR (EGLAPIENTRY *CreateImageKHR)(EGLDisplay, EGLContext, + EGLenum, EGLClientBuffer, + const EGLint *); + static EGLBoolean (EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR); + static void (EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES); + + #ifdef USE_VIDEO_THREAD + static EGLContext eglThreadContext; ///< our gl context for the thread + #endif + static void GlxSetupWindow(xcb_window_t window, int width, int height, EGLContext context); + EGLContext OSDcontext; #endif -static XVisualInfo *GlxVisualInfo; ///< our gl visual - static GLuint OsdGlTextures[2]; ///< gl texture for OSD static int OsdIndex=0; ///< index into OsdGlTextures -static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContext context); -GLXContext OSDcontext; + //---------------------------------------------------------------------------- // Common Functions @@ -584,6 +599,7 @@ static void VideoSetPts(int64_t * pts_p, int interlaced, return; } } else { // first new clock value + Debug(3,"++++++++++++++++++++++++++++++++++++starte audio\n"); AudioVideoReady(pts); } if (*pts_p != pts) { @@ -749,8 +765,6 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, #ifdef USE_GLX - - /// /// GLX extension functions ///@{ @@ -764,20 +778,77 @@ static PFNGLXGETVIDEOSYNCSGIPROC GlxGetVideoSyncSGI; static PFNGLXSWAPINTERVALSGIPROC GlxSwapIntervalSGI; #endif -///@} - /// /// GLX check error. /// -static void GlxCheck(void) -{ - GLenum err; - - if ((err = glGetError()) != GL_NO_ERROR) { - Debug(3, "video/glx: error %d '%s'\n", err, gluErrorString(err)); - } +#define GlxCheck(void)\ +{\ + GLenum err;\ +\ + if ((err = glGetError()) != GL_NO_ERROR) {\ + Debug(3, "video/glx: error %s:%d %d '%s'\n",__FILE__,__LINE__, err, gluErrorString(err));\ + }\ } +char *eglErrorString(EGLint error) +{ + switch(error) + { + case EGL_SUCCESS: return "No error"; + case EGL_NOT_INITIALIZED: return "EGL not initialized or failed to initialize"; + case EGL_BAD_ACCESS: return "Resource inaccessible"; + case EGL_BAD_ALLOC: return "Cannot allocate resources"; + case EGL_BAD_ATTRIBUTE: return "Unrecognized attribute or attribute value"; + case EGL_BAD_CONTEXT: return "Invalid EGL context"; + case EGL_BAD_CONFIG: return "Invalid EGL frame buffer configuration"; + case EGL_BAD_CURRENT_SURFACE: return "Current surface is no longer valid"; + case EGL_BAD_DISPLAY: return "Invalid EGL display"; + case EGL_BAD_SURFACE: return "Invalid surface"; + case EGL_BAD_MATCH: return "Inconsistent arguments"; + case EGL_BAD_PARAMETER: return "Invalid argument"; + case EGL_BAD_NATIVE_PIXMAP: return "Invalid native pixmap"; + case EGL_BAD_NATIVE_WINDOW: return "Invalid native window"; + case EGL_CONTEXT_LOST: return "Context lost"; + } + return "Unknown error "; +} +/// +/// egl check error. +/// +#define EglCheck(void) \ +{\ + EGLint err;\ +\ + if ((err = eglGetError()) != EGL_SUCCESS) {\ + Debug(3, "video/egl: %s:%d error %d %s\n", __FILE__,__LINE__,err,eglErrorString(err));\ + }\ +} + + +#ifdef VAAPI + +void OSD_get_shared_context() +{ + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); +// EglCheck(); +} + +void OSD_get_context() +{ + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, OSDcontext); +// EglCheck(); +} + +void OSD_release_context() +{ + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); +// EglCheck(); +} + +#endif + + + /// /// GLX check if a GLX extension is supported. /// @@ -801,253 +872,6 @@ static int GlxIsExtensionSupported(const char *ext) return 0; } -/// -/// Setup GLX decoder -/// -/// @param width input video textures width -/// @param height input video textures height -/// @param[OUT] textures created and prepared textures -/// -static void GlxSetupDecoder(int width, int height, GLuint * textures) -{ - int i; - - glEnable(GL_TEXTURE_2D); // create 2d texture - glGenTextures(2, textures); - GlxCheck(); - for (i = 0; i < 2; ++i) { - glBindTexture(GL_TEXTURE_2D, textures[i]); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); - glPixelStorei(GL_UNPACK_ALIGNMENT, 4); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); - glBindTexture(GL_TEXTURE_2D, 0); - } - glDisable(GL_TEXTURE_2D); - - GlxCheck(); -} - -/// -/// Render texture. -/// -/// @param texture 2d texture -/// @param x window x -/// @param y window y -/// @param width window width -/// @param height window height -/// -static inline void GlxRenderTexture(GLuint texture, int x, int y, int width, int height, int flip) -{ - - glEnable(GL_TEXTURE_2D); - glBindTexture(GL_TEXTURE_2D, texture); - -// glColor4f(1.0f, 1.0f, 1.0f, 1.0f); // no color - - glBegin(GL_QUADS); { - if (!flip) { - glTexCoord2f(1.0f, 1.0f); - glVertex2i(x + width, y + height); - glTexCoord2f(0.0f, 1.0f); - glVertex2i(x, y + height); - glTexCoord2f(0.0f, 0.0f); - glVertex2i(x, y); - glTexCoord2f(1.0f, 0.0f); - glVertex2i(x + width, y); - } else { - glTexCoord2f(1.0f, 1.0f); - glVertex2i(x + width, y); - glTexCoord2f(0.0f, 1.0f); - glVertex2i(x, y); - glTexCoord2f(0.0f, 0.0f); - glVertex2i(x, y+height); - glTexCoord2f(1.0f, 0.0f); - glVertex2i(x + width, y+height); - } - } - glEnd(); - - glBindTexture(GL_TEXTURE_2D, 0); - glDisable(GL_TEXTURE_2D); -} - -/// -/// Upload OSD texture. -/// -/// @param x x coordinate texture -/// @param y y coordinate texture -/// @param width argb image width -/// @param height argb image height -/// @param argb argb image -/// -static void GlxUploadOsdTexture(int x, int y, int width, int height, - const uint8_t * argb) -{ - // FIXME: use other / faster uploads - // ARB_pixelbuffer_object GL_PIXEL_UNPACK_BUFFER glBindBufferARB() - // glMapBuffer() glUnmapBuffer() - - glEnable(GL_TEXTURE_2D); // upload 2d texture - - glBindTexture(GL_TEXTURE_2D, OsdGlTextures[OsdIndex]); - glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, width, height, GL_BGRA, GL_UNSIGNED_BYTE, argb); - glBindTexture(GL_TEXTURE_2D, 0); - - glDisable(GL_TEXTURE_2D); - -} - -/// -/// GLX initialize OSD. -/// -/// @param width osd width -/// @param height osd height -/// -static void GlxOsdInit(__attribute__((unused))int width, __attribute__((unused))int height) -{ - int i; -#ifdef DEBUG - if (!GlxEnabled) { - Debug(3, "video/glx: %s called without glx enabled\n", __FUNCTION__); - OsdGlTextures[0] = 0; - return; - } -#endif - - Debug(3, "video/glx: osd init context %p <-> %p\n", glXGetCurrentContext(), GlxContext); - -#ifndef USE_OPENGLOSD - // - // create a RGBA texture. - // - glEnable(GL_TEXTURE_2D); // create 2d texture(s) - glGenTextures(2, OsdGlTextures); - for (i = 0; i < 2; ++i) { - glBindTexture(GL_TEXTURE_2D, OsdGlTextures[i]); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); - glPixelStorei(GL_UNPACK_ALIGNMENT, 4); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); - } - glBindTexture(GL_TEXTURE_2D, 0); - glDisable(GL_TEXTURE_2D); -#else - OsdGlTextures[0] = 0; -#endif -} - -/// -/// GLX cleanup osd. -/// -static void GlxOsdExit(void) -{ - if (OsdGlTextures[0]) { - glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext ); - glDeleteTextures(2, OsdGlTextures); - OsdGlTextures[0] = 0; - OsdGlTextures[1] = 0; - } -} - -/// -/// Upload ARGB image to texture. -/// -/// @param xi x-coordinate in argb image -/// @param yi y-coordinate in argb image -/// @paran height height in pixel in argb image -/// @paran width width in pixel in argb image -/// @param pitch pitch of argb image -/// @param argb 32bit ARGB image data -/// @param x x-coordinate on screen of argb image -/// @param y y-coordinate on screen of argb image -/// -/// @note looked by caller -/// -static void GlxOsdDrawARGB(int xi, int yi, int width, int height, int pitch, - const uint8_t * argb, int x, int y) -{ - uint8_t *tmp; - -#ifdef DEBUG - uint32_t start; - uint32_t end; -#endif - -#ifdef DEBUG - if (!GlxEnabled) { - Debug(3, "video/glx: %s called without glx enabled\n", __FUNCTION__); - return; - } - start = GetMsTicks(); -#endif - - // set glx context - if (!glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext)) { - Error(_("video/glx: can't make glx context current\n")); - return; - } - // FIXME: faster way - tmp = malloc(width * height * 4); - if (tmp) { - int i; - - for (i = 0; i < height; ++i) { - memcpy(tmp + i * width * 4, argb + xi * 4 + (i + yi) * pitch, width * 4); - } - GlxUploadOsdTexture(x, y, width, height, tmp); - glXMakeCurrent(XlibDisplay, None, NULL); - free(tmp); - } -#ifdef DEBUG - end = GetMsTicks(); - - Debug(4, "video/glx: osd upload %dx%d%+d%+d %dms %d\n", width, height, x, y, end - start, width * height * 4); -#endif -} - -/// -/// Clear OSD texture. -/// -/// @note looked by caller -/// -static void GlxOsdClear(void) -{ - void *texbuf; - -#ifdef USE_OPENGLOSD - return; -#endif - -#ifdef DEBUG - if (!GlxEnabled) { - Debug(3, "video/glx: %s called without glx enabled\n", __FUNCTION__); - return; - } - - Debug(3, "video/glx: osd context %p <-> %p\n", glXGetCurrentContext(), GlxContext); -#endif - - // FIXME: any opengl function to clear an area? - // FIXME: if not; use zero buffer - // FIXME: if not; use dirty area - - // set glx context - if (!glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext)) { - Error(_("video/glx: can't make glx context current\n")); - return; - } - - texbuf = calloc(OsdWidth * OsdHeight, 4); - GlxUploadOsdTexture(0, 0, OsdWidth, OsdHeight, texbuf); - glXMakeCurrent(XlibDisplay, None, NULL); - free(texbuf); -} - /// /// Setup GLX window. /// @@ -1056,7 +880,11 @@ static void GlxOsdClear(void) /// @param height window height /// @param context GLX context /// +#ifdef CUVID static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContext context) +#else +static void GlxSetupWindow(xcb_window_t window, int width, int height, EGLContext context) +#endif { #ifdef DEBUG uint32_t start; @@ -1064,33 +892,35 @@ static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContex int i; unsigned count; #endif - + #ifdef PLACEBO_ return; #endif + Debug(3, "video/egl: %s %x %dx%d context: %p", __FUNCTION__, window, width, height, context); - Debug(3, "video/glx: %s %x %dx%d context:%p", __FUNCTION__, window, width, height, context); - - // set glx context - if (!glXMakeCurrent(XlibDisplay, window, context)) { - Fatal(_("video/glx: can't make glx context current\n")); - GlxEnabled = 0; + // set gl context +#ifdef CUVID + if (!glXMakeCurrent(XlibDisplay, window, context)) { +#else + if (!eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, context )) { +#endif + Fatal(_("video/egl: GlxSetupWindow can't make egl context current\n")); + EglEnabled = 0; return; } + Debug(3, "video/egl: ok\n"); - Debug(3, "video/glx: ok\n"); - -#ifdef DEBUG +#ifdef CUVID // check if v-sync is working correct end = GetMsTicks(); for (i = 0; i < 10; ++i) { start = end; - + glClear(GL_COLOR_BUFFER_BIT); glXSwapBuffers(XlibDisplay, window); end = GetMsTicks(); - + GlxGetVideoSyncSGI(&count); Debug(4, "video/glx: %5d frame rate %dms\n", count, end - start); // nvidia can queue 5 swaps @@ -1098,61 +928,25 @@ static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContex Warning(_("video/glx: no v-sync\n")); } } + GLenum err = glewInit(); + if (err != GLEW_OK) { + Debug(3,"Error: %s\n", glewGetErrorString(err)); + } + GlxCheck(); #endif - // viewpoint - GlxCheck(); glViewport(0, 0, width, height); - glDepthRange(-1.0, 1.0); - glClearColor(0.0f, 0.0f, 0.0f, 0.0f); - glColor3f(1.0f, 1.0f, 1.0f); - glClearDepth(1.0); - GlxCheck(); - if (glewInit()) - Fatal(_("glewinit failed\n")); - - glMatrixMode(GL_PROJECTION); - glLoadIdentity(); - glOrtho(0.0, width, height, 0.0, -1.0, 1.0); - GlxCheck(); - - glMatrixMode(GL_MODELVIEW); - glLoadIdentity(); - - glDisable(GL_DEPTH_TEST); // setup 2d drawing - glDepthMask(GL_FALSE); - glDisable(GL_CULL_FACE); -#ifdef USE_DOUBLEBUFFER - glDrawBuffer(GL_BACK); -#else - glDrawBuffer(GL_FRONT); + GlxCheck(); +#ifdef VAAPI + OSD_release_context(); #endif - glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); - glEnable(GL_BLEND); - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); - -#ifdef DEBUG -#ifdef USE_DOUBLEBUFFER - glDrawBuffer(GL_FRONT); - glClearColor(0.0f, 0.0f, 0.0f, 1.0f); - glClear(GL_COLOR_BUFFER_BIT); - glDrawBuffer(GL_BACK); -#endif -#endif - - // clear - glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // intial background color - glClear(GL_COLOR_BUFFER_BIT); -#ifdef DEBUG - glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // background color -#endif - GlxCheck(); } /// /// Initialize GLX. /// -static void GlxInit(void) +#ifdef CUVID +static void EglInit(void) { XVisualInfo *vi=NULL; @@ -1180,7 +974,7 @@ static void GlxInit(void) None }; int fbcount; - + GLXContext context; int major; int minor; @@ -1192,11 +986,10 @@ static void GlxInit(void) int redSize, greenSize, blueSize; if (!glXQueryVersion(XlibDisplay, &major, &minor)) { - Error(_("video/glx: no GLX support\n")); - GlxEnabled = 0; - return; + Fatal(_("video/glx: no GLX support\n")); } Info(_("video/glx: glx version %d.%d\n"), major, minor); + // // check which extension are supported @@ -1249,41 +1042,21 @@ static void GlxInit(void) glXGetFBConfigAttrib(XlibDisplay, fbc[0], GLX_BLUE_SIZE, &blueSize); Debug(3,"RGB size %d:%d:%d\n",redSize, greenSize, blueSize); - if (!vi) { - Fatal(_("video/glx: can't get a RGB visual\n")); - GlxEnabled = 0; - return; - } - if (!vi->visual) { - Fatal(_("video/glx: no valid visual found\n")); - GlxEnabled = 0; - return; - } - if (vi->bits_per_rgb < 8) { - Fatal(_("video/glx: need atleast 8-bits per RGB\n")); - GlxEnabled = 0; - return; - } Debug(3, "Chosen visual ID = 0x%x\n", vi->visualid ); context = glXCreateContext(XlibDisplay, vi, NULL, GL_TRUE); if (!context) { Fatal(_("video/glx: can't create glx context\n")); - GlxEnabled = 0; - return; } - GlxSharedContext = context; - context = glXCreateContext(XlibDisplay, vi, GlxSharedContext, GL_TRUE); + eglSharedContext = context; + context = glXCreateContext(XlibDisplay, vi, eglSharedContext, GL_TRUE); if (!context) { Fatal(_("video/glx: can't create glx context\n")); - GlxEnabled = 0; - glXDestroyContext(XlibDisplay, GlxSharedContext); - GlxSharedContext = 0; - return; } - GlxContext = context; - + eglContext = context; + + EglEnabled = 1; GlxVisualInfo = vi; Debug(3, "video/glx: visual %#02x depth %u\n", (unsigned)vi->visualid, vi->depth); @@ -1355,38 +1128,90 @@ static void GlxInit(void) } +#else +static void EglInit(void) +{ + int redSize, greenSize, blueSize, alphaSize; + +#ifdef PLACEBO + return; +#endif + EGLContext context; + // create egl context + make_egl(); + GLenum err = glewInit(); + if (err != GLEW_OK) { + Debug(3,"Error: %s\n", glewGetErrorString(err)); + } + + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_BLUE_SIZE, &blueSize); + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_RED_SIZE, &redSize); + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_GREEN_SIZE, &greenSize); + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_ALPHA_SIZE, &alphaSize); + Debug(3,"RGB size %d:%d:%d Alpha %d\n",redSize, greenSize, blueSize,alphaSize); + + eglSharedContext = eglContext; + + context = eglCreateContext(eglDisplay, eglConfig, eglSharedContext, eglAttrs); + EglCheck(); + if (!context) { + Fatal(_("video/egl: can't create egl context\n")); + } + eglContext = context; +} +#endif + /// /// Cleanup GLX. /// -static void GlxExit(void) +static void EglExit(void) { - Debug(3, "video/glx: %s\n", __FUNCTION__); + Debug(3, "video/egl: %s\n", __FUNCTION__); #ifdef PLACEBO return; #endif - glFinish(); - // must destroy glx - if (glXGetCurrentContext() == GlxContext) { + // must destroy contet +#ifdef CUVID + // must destroy glx + if (glXGetCurrentContext() == eglContext) { // if currently used, set to none glXMakeCurrent(XlibDisplay, None, NULL); } - if (GlxSharedContext) { - glXDestroyContext(XlibDisplay, GlxSharedContext); + if (eglSharedContext) { + glXDestroyContext(XlibDisplay, eglSharedContext); GlxCheck(); } - if (GlxContext) { - glXDestroyContext(XlibDisplay, GlxContext); + if (eglContext) { + glXDestroyContext(XlibDisplay, eglContext); GlxCheck(); } - if (GlxThreadContext) { - glXDestroyContext(XlibDisplay, GlxThreadContext); + if (eglThreadContext) { + glXDestroyContext(XlibDisplay, eglThreadContext); GlxCheck(); } - // FIXME: must free GlxVisualInfo +#else + if (eglGetCurrentContext() == eglContext) { + // if currently used, set to none + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); + } + if (eglSharedContext) { + eglDestroyContext(eglDisplay, eglSharedContext); + EglCheck(); + } + if (eglContext) { + eglDestroyContext(eglDisplay, eglContext); + EglCheck(); + } + if (eglThreadContext) { + eglDestroyContext(eglDisplay, eglThreadContext); + EglCheck(); + } + eglTerminate(eglDisplay); +#endif } #endif @@ -1688,9 +1513,13 @@ typedef struct _cuvid_decoder_ #ifdef CUVID CUarray cu_array[CODEC_SURFACES_MAX+1][2]; CUgraphicsResource cu_res[CODEC_SURFACES_MAX+1][2]; - GLuint gl_textures[(CODEC_SURFACES_MAX+1)*2]; // where we will copy the CUDA result CUcontext cuda_ctx; #endif + GLuint gl_textures[(CODEC_SURFACES_MAX+1)*2]; // where we will copy the CUDA result +#ifdef VAAPI + EGLImageKHR images[(CODEC_SURFACES_MAX+1)*2]; + int fds[(CODEC_SURFACES_MAX+1)*2]; +#endif #ifdef PLACEBO struct pl_image pl_images[CODEC_SURFACES_MAX+1]; // images for Placebo chain // const struct pl_tex *pl_tex_in[CODEC_SURFACES_MAX+1][2]; // Textures in image @@ -1760,7 +1589,7 @@ GLuint vao_buffer; // GLuint gl_shader=0,gl_prog = 0,gl_fbo=0; // shader programm GLint gl_colormatrix,gl_colormatrix_c; GLuint OSDfb=0; -GLuint OSDtexture; +GLuint OSDtexture,gl_prog_osd=0; int OSDx,OSDy,OSDxsize,OSDysize; @@ -1896,10 +1725,14 @@ static void CuvidDestroySurfaces(CuvidDecoder * decoder) Debug(3, "video/cuvid: %s\n", __FUNCTION__); #ifndef PLACEBO - glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext); +#ifdef CUVID + glXMakeCurrent(XlibDisplay, VideoWindow, eglContext); GlxCheck(); +#else + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext); + EglCheck(); #endif - +#endif for (i=0;iSurfacesNeeded;i++) { if (decoder->frames[i]) { av_frame_free(&decoder->frames[i]); @@ -1916,7 +1749,18 @@ static void CuvidDestroySurfaces(CuvidDecoder * decoder) pl_tex_destroy(p->gpu,&decoder->pl_images[i].planes[j].texture); } #else +#ifdef CUVID checkCudaErrors(cuGraphicsUnregisterResource(decoder->cu_res[i][j])); +#endif +#ifdef VAAPI + if (decoder->images[i*2+j]) { + DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[i*2+j]); + if (decoder->fds[i*2+j]) + close(decoder->fds[i*2+j]); + } + decoder->fds[i*2+j] = 0; + decoder->images[i*2+j] = 0; +#endif #endif } } @@ -1930,7 +1774,7 @@ static void CuvidDestroySurfaces(CuvidDecoder * decoder) pl_renderer_destroy(&p->renderer); p->renderer = pl_renderer_create(p->ctx, p->gpu); #else - glDeleteTextures(CODEC_SURFACES_MAX*2,(GLuint*)&decoder->gl_textures); + glDeleteTextures(CODEC_SURFACES_MAX * 2,(GLuint*)&decoder->gl_textures); GlxCheck(); if (decoder == CuvidDecoders[0]) { // only wenn last decoder closes @@ -2011,6 +1855,21 @@ static void CuvidReleaseSurface(CuvidDecoder * decoder, int surface) pl_tex_destroy(p->gpu,&decoder->pl_images[surface].planes[1].texture); } } +#else +#ifdef VAAPI + if (decoder->images[surface*2]) { + DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[surface*2]); + DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[surface*2+1]); + if (decoder->fds[surface*2]) { + close(decoder->fds[surface*2]); + close(decoder->fds[surface*2+1]); + } + } + decoder->fds[surface*2] = 0; + decoder->fds[surface*2+1] = 0; + decoder->images[surface*2] = 0; + decoder->images[surface*2+1] = 0; +#endif #endif for (i = 0; i < decoder->SurfaceUsedN; ++i) { if (decoder->SurfacesUsed[i] == surface) { @@ -2047,6 +1906,210 @@ int CuvidTestSurfaces() { return 0; } +#ifdef VAAPI +struct mp_egl_config_attr { + int attrib; + const char *name; +}; +#define MPGL_VER(major, minor) (((major) * 100) + (minor) * 10) +#define MPGL_VER_GET_MAJOR(ver) ((unsigned)(ver) / 100) +#define MPGL_VER_GET_MINOR(ver) ((unsigned)(ver) % 100 / 10) +#define MP_EGL_ATTRIB(id) {id, # id} + +static const struct mp_egl_config_attr mp_egl_attribs[] = { + MP_EGL_ATTRIB(EGL_CONFIG_ID), + MP_EGL_ATTRIB(EGL_RED_SIZE), + MP_EGL_ATTRIB(EGL_GREEN_SIZE), + MP_EGL_ATTRIB(EGL_BLUE_SIZE), + MP_EGL_ATTRIB(EGL_ALPHA_SIZE), + MP_EGL_ATTRIB(EGL_COLOR_BUFFER_TYPE), + MP_EGL_ATTRIB(EGL_CONFIG_CAVEAT), + MP_EGL_ATTRIB(EGL_CONFORMANT), +}; + +const int mpgl_preferred_gl_versions[] = { +// 440, +// 430, +// 400, + 330, + 320, + 310, + 300, + 210, + 0 +}; +static bool create_context_cb(EGLDisplay display, + int es_version, + EGLContext *out_context, EGLConfig *out_config) +{ + + EGLenum api; + EGLint rend, *attribs; + const char *name; + + switch (es_version) { + case 0: + api = EGL_OPENGL_API; + rend = EGL_OPENGL_BIT; + name = "Desktop OpenGL"; + break; + case 2: + api = EGL_OPENGL_ES_API; + rend = EGL_OPENGL_ES2_BIT; + name = "GLES 2.x"; + break; + case 3: + api = EGL_OPENGL_ES_API; + rend = EGL_OPENGL_ES3_BIT; + name = "GLES 3.x"; + break; + default: Fatal(_("Wrong ES version \n"));; + } + + Debug(3,"Trying to create %s context.\n", name); + + if (!eglBindAPI(api)) { + Fatal(_(" Could not bind API!\n")); + } + + EGLint attributes8[] = { + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RED_SIZE, 8, + EGL_GREEN_SIZE, 8, + EGL_BLUE_SIZE, 8, + EGL_ALPHA_SIZE, 8, + EGL_RENDERABLE_TYPE, rend, + EGL_NONE + }; + EGLint attributes10[] = { + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RED_SIZE, 10, + EGL_GREEN_SIZE, 10, + EGL_BLUE_SIZE, 10, + EGL_ALPHA_SIZE, 8, + EGL_RENDERABLE_TYPE, rend, + EGL_NONE + }; + EGLint num_configs; + + attribs = attributes10; + +// if (!eglChooseConfig(display, attributes10, NULL, 0, &num_configs)) { // try 10 Bit + attribs = attributes8; + if (!eglChooseConfig(display, attributes8, NULL, 0, &num_configs)) { // try 8 Bit + num_configs = 0; + } +// } + + EGLConfig *configs = malloc(sizeof(EGLConfig) * num_configs); + if (!eglChooseConfig(display, attribs, configs, num_configs, &num_configs)) + num_configs = 0; + + if (!num_configs) { + free (configs); + Debug(3,"Could not choose EGLConfig for %s!\n", name); + return false; + } + +// for (int n = 0; n < num_configs; n++) +// dump_egl_config(ctx->log, MSGL_TRACE, display, configs[n]); + + int chosen = 0; + + EGLConfig config = configs[chosen]; + + free(configs); + +// MP_DBG(ctx, "Chosen EGLConfig:\n"); +// dump_egl_config(ctx->log, MSGL_DEBUG, display, config); + + EGLContext *egl_ctx = NULL; + + if (es_version) { +// if (!ra_gl_ctx_test_version(ctx, MPGL_VER(es_version, 0), true)) +// return false; + + eglAttrs[0] = EGL_CONTEXT_CLIENT_VERSION; + eglAttrs[1] = es_version; + eglAttrs[2] = EGL_NONE; + + + egl_ctx = eglCreateContext(display, config, EGL_NO_CONTEXT, eglAttrs); + } else { + for (int n = 0; mpgl_preferred_gl_versions[n]; n++) { + int ver = mpgl_preferred_gl_versions[n]; + + eglAttrs[0] = EGL_CONTEXT_MAJOR_VERSION; + eglAttrs[1] = MPGL_VER_GET_MAJOR(ver); + eglAttrs[2] = EGL_CONTEXT_MINOR_VERSION; + eglAttrs[3] = MPGL_VER_GET_MINOR(ver); + eglAttrs[4] = EGL_CONTEXT_OPENGL_PROFILE_MASK; + eglAttrs[5] = ver >= 320 ? EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT : 0; + eglAttrs[6] = EGL_NONE; + + + egl_ctx = eglCreateContext(display, config, EGL_NO_CONTEXT, eglAttrs); + EglCheck(); + if (egl_ctx) { + Debug(3,"Use %d GLVersion\n",ver); + break; + } + } + } + + if (!egl_ctx) { + Debug(3,"Could not create EGL context for %s!\n", name); + return false; + } + + *out_context = egl_ctx; + *out_config = config; + eglVersion = es_version; + return true; +} + +make_egl() { + + CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR"); + DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR"); + EGLImageTargetTexture2DOES = (void *)eglGetProcAddress("glEGLImageTargetTexture2DOES"); + + if (!CreateImageKHR || !DestroyImageKHR || !EGLImageTargetTexture2DOES) + Fatal(_("Can't get EGL Extentions\n")); + + eglDisplay = eglGetDisplay(XlibDisplay); + + if (!eglInitialize(eglDisplay, NULL, NULL)) { + Fatal(_("Could not initialize EGL.\n")); + } +#if CUVID + if (!create_context_cb(eglDisplay, 3, &eglContext, &eglConfig)) { +#else + if (!create_context_cb(eglDisplay, 0, &eglContext, &eglConfig)) { +#endif + Fatal(_("Could not create EGL Context\n")); + } + int vID, n; + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_NATIVE_VISUAL_ID, &vID); + Debug(3,"chose visual 0x%x\n", vID); + + eglSurface = eglCreateWindowSurface(eglDisplay, eglConfig, + (EGLNativeWindowType)VideoWindow, NULL); + + if (eglSurface == EGL_NO_SURFACE) { + Fatal(_("Could not create EGL surface!\n")); + } + + if (!eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) + { + Fatal(_("Could not make context current!\n")); + } + EglEnabled = 1; +} +#endif + + + /// /// Allocate new CUVID decoder. /// @@ -2075,6 +2138,7 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) #endif #ifdef VAAPI if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, "/dev/dri/renderD128" , NULL, 0)) != 0) { +// if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, ":0.0" , NULL, 0)) != 0 ) { Fatal("codec: can't allocate HW video codec context err %04x",i); } #endif @@ -2087,6 +2151,7 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) #ifdef VAAPI VaDisplay = TO_VAAPI_DEVICE_CTX(HwDeviceContext)->display; decoder->VaDisplay = VaDisplay; + #endif decoder->Window = VideoWindow; //decoder->VideoX = 0; // done by calloc @@ -2177,8 +2242,13 @@ Debug(3,"cuvid del hw decoder \n"); if (decoder == CuvidDecoders[0]) pthread_mutex_lock(&VideoLockMutex); #ifndef PLACEBO - glXMakeCurrent(XlibDisplay, VideoWindow, GlxSharedContext); +#ifdef CUVID + glXMakeCurrent(XlibDisplay, VideoWindow, eglContext); GlxCheck(); +#else + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext); + EglCheck(); +#endif #endif if (decoder->SurfaceFreeN || decoder->SurfaceUsedN) { CuvidDestroySurfaces(decoder); @@ -2209,18 +2279,17 @@ Debug(3,"cuvid del hw decoder \n"); static int CuvidGlxInit(__attribute__((unused))const char *display_name) { #ifndef PLACEBO - GlxEnabled = 1; - - GlxInit(); - if (GlxEnabled) { - GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, GlxContext); + + EglInit(); + if (EglEnabled) { + GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, eglContext); } - if (!GlxEnabled) { - Error(_("video/glx: glx error\n")); + if (!EglEnabled) { + Fatal(_("video/egl: egl init error\n")); } #else - GlxEnabled = 0; + EglEnabled = 0; #endif return 1; @@ -2459,6 +2528,7 @@ createTextureDst(CuvidDecoder * decoder,int anz, unsigned int size_x, unsigned i #endif } #ifdef VAAPI + // copy image and process using CUDA void generateVAAPIImage(CuvidDecoder * decoder,int index, const AVFrame *frame,int image_width , int image_height) { @@ -2479,7 +2549,7 @@ void generateVAAPIImage(CuvidDecoder * decoder,int index, const AVFrame *frame,i return; } vaSyncSurface(decoder->VaDisplay,(unsigned int)frame->data[3]); - + VideoThreadLock(); for (n = 0; n < 2; n++) { // Set DMA_BUF from VAAPI decoder to Textures int id = desc.layers[n].object_index[0]; int fd = desc.objects[id].fd; @@ -2530,13 +2600,8 @@ void generateVAAPIImage(CuvidDecoder * decoder,int index, const AVFrame *frame,i } decoder->pl_images[index].planes[n].texture = pl_tex_create(p->gpu, &tex_params); - - - - } - -// VideoThreadUnlock(); + VideoThreadUnlock(); } #endif @@ -2605,15 +2670,18 @@ void generateCUDAImage(CuvidDecoder * decoder,int index, const AVFrame *frame,in } #endif #else -#ifdef CUVID + void createTextureDst(CuvidDecoder * decoder,int anz, unsigned int size_x, unsigned int size_y, enum AVPixelFormat PixFmt) { int n,i; - - glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext); +#ifdef CUVID + glXMakeCurrent(XlibDisplay, VideoWindow, eglContext); GlxCheck(); +#else + OSD_get_shared_context(); +#endif glGenBuffers(1,&vao_buffer); GlxCheck(); @@ -2640,18 +2708,21 @@ createTextureDst(CuvidDecoder * decoder,int anz, unsigned int size_x, unsigned i glTexImage2D(GL_TEXTURE_2D, 0,n==0?GL_R16:GL_RG16 ,n==0?size_x:size_x/2, n==0?size_y:size_y/2, 0, n==0?GL_RED:GL_RG , GL_UNSIGNED_SHORT, NULL); SDK_CHECK_ERROR_GL(); // register this texture with CUDA - +#ifdef CUVID checkCudaErrors(cuGraphicsGLRegisterImage(&decoder->cu_res[i][n], decoder->gl_textures[i*2+n],GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD)); checkCudaErrors(cuGraphicsMapResources(1, &decoder->cu_res[i][n], 0)); checkCudaErrors(cuGraphicsSubResourceGetMappedArray(&decoder->cu_array[i][n], decoder->cu_res[i][n],0, 0)); checkCudaErrors(cuGraphicsUnmapResources(1, &decoder->cu_res[i][n], 0)); - +#endif } } glBindTexture(GL_TEXTURE_2D, 0); - + GlxCheck(); +#ifdef VAAPI + OSD_release_context(); +#endif } - +#ifdef CUVID // copy image and process using CUDA void generateCUDAImage(CuvidDecoder * decoder,int index, const AVFrame *frame,int image_width , int image_height, int bytes) { @@ -2674,6 +2745,87 @@ void generateCUDAImage(CuvidDecoder * decoder,int index, const AVFrame *frame,in } } #endif +#ifdef VAAPI +#define MP_ARRAY_SIZE(s) (sizeof(s) / sizeof((s)[0])) +#define ADD_ATTRIB(name, value) \ + do { \ + assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \ + attribs[num_attribs++] = (name); \ + attribs[num_attribs++] = (value); \ + attribs[num_attribs] = EGL_NONE; \ + } while(0) + +#define ADD_PLANE_ATTRIBS(plane) do { \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _FD_EXT, \ + desc.objects[desc.layers[n].object_index[plane]].fd); \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _OFFSET_EXT, \ + desc.layers[n].offset[plane]); \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _PITCH_EXT, \ + desc.layers[n].pitch[plane]); \ + } while (0) + +void generateVAAPIImage(CuvidDecoder * decoder,int index, const AVFrame *frame,int image_width , int image_height) +{ + int n,i; + VAStatus status; + static int toggle = 0; + uint64_t first_time; + VADRMPRIMESurfaceDescriptor desc; + + status = vaExportSurfaceHandle(decoder->VaDisplay, (unsigned int)frame->data[3], + VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, + VA_EXPORT_SURFACE_READ_ONLY | + VA_EXPORT_SURFACE_SEPARATE_LAYERS, + &desc); + + if (status != VA_STATUS_SUCCESS) { + printf("Fehler beim export VAAPI Handle\n"); + return; + } + vaSyncSurface(decoder->VaDisplay,(unsigned int)frame->data[3]); + + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); + EglCheck(); + + for (int n = 0; n < 2; n++) { + int attribs[20] = {EGL_NONE}; + int num_attribs = 0; + + ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, desc.layers[n].drm_format); + ADD_ATTRIB(EGL_WIDTH, n==0?image_width:image_width/2); + ADD_ATTRIB(EGL_HEIGHT, n==0?image_height:image_height/2); + + ADD_PLANE_ATTRIBS(0); +#if 0 + if (desc.layers[n].num_planes > 1) + ADD_PLANE_ATTRIBS(1); + if (desc.layers[n].num_planes > 2) + ADD_PLANE_ATTRIBS(2); + if (desc.layers[n].num_planes > 3) + ADD_PLANE_ATTRIBS(3); +#endif + decoder->images[index*2+n] = CreateImageKHR(eglGetCurrentDisplay(),EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs); + + if (!decoder->images[index*2+n]) + goto esh_failed; + + glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[index*2+n]); + EGLImageTargetTexture2DOES(GL_TEXTURE_2D, decoder->images[index*2+n]); + decoder->fds[index*2+n] = desc.objects[desc.layers[n].object_index[0]].fd; + } + glBindTexture(GL_TEXTURE_2D, 0); + eglMakeCurrent(eglDisplay,EGL_NO_SURFACE,EGL_NO_SURFACE,EGL_NO_CONTEXT); + EglCheck(); + return 0; + +esh_failed: + Debug(3,"Failure in generateVAAPIImage\n"); + for (int n = 0; n < desc.num_objects; n++) + close(desc.objects[n].fd); + eglMakeCurrent(eglDisplay,EGL_NO_SURFACE,EGL_NO_SURFACE,EGL_NO_CONTEXT); + EglCheck(); +} +#endif #endif @@ -2730,18 +2882,14 @@ int push_filters(AVCodecContext * dec_ctx,CuvidDecoder * decoder,AVFrame *frame) //printf("Interlaced %d tff %d\n",frame->interlaced_frame,frame->top_field_first); /* pull filtered frames from the filtergraph */ while ((ret = av_buffersink_get_frame(decoder->buffersink_ctx, filt_frame)) >= 0) { -// filt_frame->pts = frame->pts; // Restore orginal pts -// filt_frame->pts += 20 * 90; // prepare for next frame filt_frame->pts /= 2; decoder->Interlaced = 0; // printf("vaapideint video:new %#012" PRIx64 " old %#012" PRIx64 "\n",filt_frame->pts,frame->pts); - CuvidSyncRenderFrame(decoder, dec_ctx, filt_frame); - if (i++ == 0) - filt_frame = av_frame_alloc(); // get new frame -// av_frame_unref(filt_frame); - } + CuvidSyncRenderFrame(decoder, dec_ctx, filt_frame); + filt_frame = av_frame_alloc(); // get new frame -// av_frame_free(&filt_frame); + } + av_frame_free(&filt_frame); av_frame_free(&frame); return ret; } @@ -2873,7 +3021,7 @@ static void CuvidSyncRenderFrame(CuvidDecoder * decoder, const AVCodecContext * video_ctx, const AVFrame * frame); int push_filters(AVCodecContext * dec_ctx,CuvidDecoder * decoder,AVFrame *frame) { - int ret,i; + int ret; AVFrame *filt_frame = av_frame_alloc(); // frame->pts = frame->best_effort_timestamp; @@ -2882,19 +3030,18 @@ int push_filters(AVCodecContext * dec_ctx,CuvidDecoder * decoder,AVFrame *frame) if (av_buffersrc_add_frame_flags(decoder->buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); } - av_frame_copy_props(filt_frame,frame); + //printf("Interlaced %d tff %d\n",frame->interlaced_frame,frame->top_field_first); /* pull filtered frames from the filtergraph */ while ((ret = av_buffersink_get_frame(decoder->buffersink_ctx, filt_frame)) >= 0) { filt_frame->pts /= 2; // Debug(3,"video:new %#012" PRIx64 " old %#012" PRIx64 "\n",filt_frame->pts,frame->pts); CuvidSyncRenderFrame(decoder, dec_ctx, filt_frame); - if (i++ == 0) - filt_frame = av_frame_alloc(); // get new frame -// av_frame_unref(filt_frame); + + filt_frame = av_frame_alloc(); // get new frame } -// av_frame_free(&filt_frame); + av_frame_free(&filt_frame); av_frame_free(&frame); return ret; } @@ -3009,7 +3156,7 @@ end: #endif - +extern void AudioPlay(void); typedef struct CUVIDContext { AVBufferRef *hw_frames_ctx; AVFrame *tmp_frame; @@ -3054,7 +3201,9 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, #ifdef CUVID case AV_PIX_FMT_CUDA: #endif +#ifdef VAAPI case AV_PIX_FMT_VAAPI: +#endif break; default: continue; @@ -3102,16 +3251,19 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, if (1 || video_ctx->width != decoder->InputWidth || video_ctx->height != decoder->InputHeight) { +#ifdef PLACEBO VideoThreadLock(); +#endif CuvidCleanup(decoder); decoder->InputAspect = video_ctx->sample_aspect_ratio; decoder->InputWidth = video_ctx->width; decoder->InputHeight = video_ctx->height; decoder->Interlaced = 0; - decoder->SurfacesNeeded = VIDEO_SURFACES_MAX + 1; CuvidSetupOutput(decoder); +#ifdef PLACEBO VideoThreadUnlock(); +#endif #ifdef PLACEBO // dont show first frame decoder->newchannel = 1; #endif @@ -3180,6 +3332,7 @@ int get_RGB(CuvidDecoder *decoder) { current = decoder->SurfacesRb[decoder->SurfaceRead]; #ifndef PLACEBO +// eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); glGenTextures(1, &texture); GlxCheck(); glBindTexture(GL_TEXTURE_2D, texture); @@ -3199,8 +3352,13 @@ int get_RGB(CuvidDecoder *decoder) { Debug(3,"video/cuvid: grab Framebuffer is not complete!"); return 0; } - + glViewport(0,0,width, height); + GlxCheck(); + +// eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglThreadContext); +// EglCheck(); + if (gl_prog == 0) gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm @@ -3222,24 +3380,35 @@ int get_RGB(CuvidDecoder *decoder) { glUseProgram(0); glActiveTexture(GL_TEXTURE0); - if (OsdShown && decoder->grab == 2) { -#ifndef USE_OPENGLOSD - glXMakeCurrent(XlibDisplay, VideoWindow, GlxThreadContext); - GlxRenderTexture(OsdGlTextures[OsdIndex], 0,0, width, height,1); -#else + if (OsdShown && decoder->grab == 2) { + GLint texLoc; + + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (gl_prog_osd == 0) + gl_prog_osd = sc_generate_osd(gl_prog_osd); // generate shader programm + + glUseProgram(gl_prog_osd); + texLoc = glGetUniformLocation(gl_prog_osd, "texture0"); + glUniform1i(texLoc, 0); + + glActiveTexture(GL_TEXTURE0); + pthread_mutex_lock(&OSDMutex); - glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext ); - glViewport(0, 0, width, height); - glMatrixMode(GL_PROJECTION); - glLoadIdentity(); - glOrtho(0.0, width, height, 0.0, -1.0, 1.0); - GlxCheck(); - GlxRenderTexture(OSDtexture, 0,0, width, height,1); + glBindTexture(GL_TEXTURE_2D,OSDtexture); + render_pass_quad(1, 0, 0); pthread_mutex_unlock(&OSDMutex); + + glUseProgram(0); + glActiveTexture(GL_TEXTURE0); +#ifdef CUVID + glXMakeCurrent(XlibDisplay, VideoWindow, eglContext); +#else + eglMakeCurrent(eglDisplay, eglSurface,eglSurface, eglThreadContext); #endif -// glXMakeCurrent(XlibDisplay, VideoWindow, GlxSharedContext); - glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext); } + glFlush(); Debug(3,"Read pixels %d %d\n",width,height); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); @@ -3716,9 +3885,10 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, int surface; enum AVColorSpace color; -#if 1 +#ifdef CUVID if (skipwait > 1) { skipwait--; + av_frame_free(&frame); return; } #endif @@ -3742,7 +3912,7 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, CuvidUpdateOutput(decoder); } #endif - decoder->Closing = 0; +// decoder->Closing = 0; color = frame->colorspace; if (color == AVCOL_SPC_UNSPECIFIED) // if unknown color = AVCOL_SPC_BT709; @@ -3787,24 +3957,25 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, if (surface == -1) { // no free surfaces Debug(3,"no more surfaces\n"); + av_frame_free(&frame); return; } - if (!decoder->Closing) { - VideoSetPts(&decoder->PTS, decoder->Interlaced, video_ctx, frame); - } + if (!decoder->Closing) { + VideoSetPts(&decoder->PTS, decoder->Interlaced, video_ctx, frame); + } -#ifdef VAAPI // old copy via host ram +#if defined (VAAPI) && defined (PLACEBO) // old copy via host ram { AVFrame *output; - VideoThreadLock(); + int t = decoder->PixFmt==AV_PIX_FMT_NV12?1:2; - struct pl_rect3d rc1 = {0,0,0,w,h,0}; if (p->has_dma_buf) { // Vulkan supports DMA_BUF no copy required generateVAAPIImage(decoder,surface,frame,w,h); } else { // we need to Copy the frame via RAM + VideoThreadLock(); vaSyncSurface(decoder->VaDisplay,(unsigned int)frame->data[3]); output = av_frame_alloc(); // av_frame_ref(output,frame); @@ -3830,17 +4001,21 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, .rc.z1 = 0, }); av_frame_free(&output); + VideoThreadUnlock(); } - VideoThreadUnlock(); - } -#endif -#ifdef CUVID -// first_time = GetusTicks(); + } +#else +#ifdef CUVID // copy to texture generateCUDAImage(decoder,surface,frame,w,h,decoder->PixFmt==AV_PIX_FMT_NV12?1:2); -// printf("generate CUDA Image %d\n",(GetusTicks()-first_time)/1000000); +#endif +#ifdef VAAPI + // copy to texture + generateVAAPIImage(decoder,surface,frame,w,h); #endif +#endif + CuvidQueueVideoSurface(decoder, surface, 1); decoder->frames[surface] = frame; return; @@ -4056,14 +4231,17 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused))int lev // img->color.light = PL_COLOR_LIGHT_SCENE_709_1886; // img->color.light = PL_COLOR_LIGHT_DISPLAY; break; +#ifdef CUVID case AVCOL_SPC_BT2020_NCL: img->repr.sys = PL_COLOR_SYSTEM_BT_2020_NC; + memcpy(&img->repr,&pl_color_repr_uhdtv,sizeof(struct pl_color_repr)); memcpy(&img->color,&pl_color_space_bt2020_hlg,sizeof(struct pl_color_space)); deband.grain = 0.0f; // no grain in HDR // img->color.primaries = PL_COLOR_PRIM_BT_2020; // img->color.transfer = PL_COLOR_TRC_HLG; // img->color.light = PL_COLOR_LIGHT_SCENE_HLG; break; +#endif default: // fallback img->repr.sys = PL_COLOR_SYSTEM_BT_709; memcpy(&img->color,&pl_color_space_bt709,sizeof(struct pl_color_space)); @@ -4254,6 +4432,7 @@ static void CuvidDisplayFrame(void) int filled; CuvidDecoder *decoder; int RTS_flag; + int valid_frame = 0; #ifdef PLACEBO @@ -4269,14 +4448,17 @@ static void CuvidDisplayFrame(void) #endif #ifndef PLACEBO - glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext); - if (CuvidDecoderN) CuvidDecoders[0]->Frameproc = (float)(GetusTicks()-last_time)/1000000.0; -// printf("Time used %2.2f\n",CuvidDecoders[0]->Frameproc); - glXWaitVideoSyncSGI (2, (Count + 1) % 2, &Count); // wait for previous frame to swap +#ifdef CUVID + glXMakeCurrent(XlibDisplay, VideoWindow, eglContext); + glXWaitVideoSyncSGI (2, (Count + 1) % 2, &Count); // wait for previous frame to swap last_time = GetusTicks(); +#else + eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); + EglCheck(); +#endif glClear(GL_COLOR_BUFFER_BIT); #else @@ -4292,7 +4474,9 @@ static void CuvidDisplayFrame(void) //printf("Sleep %d\n",15000-diff); usleep((15000 - diff));// * 1000); } else if (skipwait != 1) { - // usleep(15000); +#ifdef CUVID + usleep(15000); +#endif } #endif @@ -4300,8 +4484,9 @@ static void CuvidDisplayFrame(void) return; //last_time = GetusTicks(); + +#ifdef CUVID VideoThreadLock(); -#if 1 if (!first) { // last_time = GetusTicks(); if (!pl_swapchain_submit_frame(p->swapchain)) @@ -4311,24 +4496,25 @@ static void CuvidDisplayFrame(void) } #endif first = 0; -#if 0 - fdiff = (float)(GetusTicks()-first_time)/1000.0; - if (fdiff > 20100.0 || fdiff < 19900.0) - printf("roundtrip %2.2f\n",fdiff); - first_time = GetusTicks(); -#endif last_time = GetusTicks(); while (!pl_swapchain_start_frame(p->swapchain, &frame)) { // get new frame wait for previous to swap usleep(5); } -last_time = GetusTicks(); +// last_time = GetusTicks(); //printf("wait for frame %d\n",(GetusTicks()-last_time)/1000000); - if (!frame.fbo) + if (!frame.fbo) { +#ifdef CUVID + VideoThreadUnlock(); +#endif return; + } +#ifdef VAAPI + VideoThreadLock(); +#endif pl_render_target_from_swapchain(&target, &frame); // make target frame @@ -4371,9 +4557,7 @@ last_time = GetusTicks(); default: memcpy(&target.color,&pl_color_space_monitor,sizeof(struct pl_color_space)); break; - } - - + } #endif // // Render videos into output @@ -4398,9 +4582,8 @@ last_time = GetusTicks(); } continue; } - + valid_frame = 1; #ifdef PLACEBO - if (OsdShown == 1) { // New OSD opened pthread_mutex_lock(&OSDMutex); make_osd_overlay(OSDx,OSDy,OSDxsize,OSDysize); @@ -4444,31 +4627,51 @@ last_time = GetusTicks(); #ifndef PLACEBO // add osd to surface // - if (OsdShown) { -#ifndef USE_OPENGLOSD - glXMakeCurrent(XlibDisplay, VideoWindow, GlxThreadContext); - GlxRenderTexture(OsdGlTextures[OsdIndex], 0,0, VideoWindowWidth, VideoWindowHeight,0); -#else + if (OsdShown && valid_frame) { + int y; + + GLint texLoc; + + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (gl_prog_osd == 0) + gl_prog_osd = sc_generate_osd(gl_prog_osd); // generate shader programm + + glUseProgram(gl_prog_osd); + texLoc = glGetUniformLocation(gl_prog_osd, "texture0"); + glUniform1i(texLoc, 0); + + glActiveTexture(GL_TEXTURE0); + pthread_mutex_lock(&OSDMutex); - glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext ); - glViewport(0, 0, VideoWindowWidth, VideoWindowHeight); - glMatrixMode(GL_PROJECTION); - glLoadIdentity(); - glOrtho(0.0, VideoWindowWidth, VideoWindowHeight, 0.0, -1.0, 1.0); - GlxCheck(); - GlxRenderTexture(OSDtexture, 0,0, VideoWindowWidth, VideoWindowHeight,0); + glBindTexture(GL_TEXTURE_2D,OSDtexture); + render_pass_quad(0, 0, 0); pthread_mutex_unlock(&OSDMutex); -#endif -// glXMakeCurrent(XlibDisplay, VideoWindow, GlxSharedContext); - glXMakeCurrent(XlibDisplay, VideoWindow, GlxContext); + + glUseProgram(0); + glActiveTexture(GL_TEXTURE0); +// eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); } #endif #ifdef PLACEBO +#ifdef VAAPI + // first_time = GetusTicks(); + if (!pl_swapchain_submit_frame(p->swapchain)) + Fatal(_("Failed to submit swapchain buffer\n")); + pl_swapchain_swap_buffers(p->swapchain); // swap buffers + // printf("submit and swap %d us\n",(GetusTicks()-first_time)/1000); +#endif VideoThreadUnlock(); #else +#ifdef CUVID glXGetVideoSyncSGI (&Count); // get current frame glXSwapBuffers(XlibDisplay, VideoWindow); +#else + eglSwapBuffers(eglDisplay, eglSurface); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); +#endif #endif // FIXME: CLOCK_MONOTONIC_RAW @@ -4517,7 +4720,7 @@ static int64_t CuvidGetClock(const CuvidDecoder * decoder) return decoder->PTS - 20 * 90 * (2 * atomic_read(&decoder->SurfacesFilled) - decoder->SurfaceField - 2 + 2); } // + 2 in driver queue - return decoder->PTS - 20 * 90 * (atomic_read(&decoder->SurfacesFilled)); // +2 + return decoder->PTS - 20 * 90 * (atomic_read(&decoder->SurfacesFilled)+SWAP_BUFFER_SIZE-1); // +2 } /// @@ -4587,6 +4790,7 @@ void CuvidGetStats(CuvidDecoder * decoder, int *missed, int *duped, /// /// @param decoder CUVID hw decoder /// + static void CuvidSyncDecoder(CuvidDecoder * decoder) { int filled; @@ -4648,30 +4852,40 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder) // both clocks are known int diff; - diff = video_clock - audio_clock - VideoAudioDelay; + diff = video_clock - audio_clock + VideoAudioDelay; diff = (decoder->LastAVDiff + diff) / 2; decoder->LastAVDiff = diff; // decoder->Frameproc = diff/90; // printf("Roundtrip sync %d\n",(GetusTicks()-last_time)/1000); // last_time = GetusTicks(); -#if 1 +#ifdef CUVID1 if (skipwait <= 1) { if ((diff/90) > 55) { skipwait = 1; - } else if ((diff/90) < -25) { + } else if ((diff/90) < -200 && filled > 1) { skipwait = 3; + decoder->SyncCounter = 1; + } else if ((diff/90) < -100 && filled > 1) { + skipwait = 2; + decoder->SyncCounter = 1; } else { + decoder->SyncCounter = 1; skipwait = 0; } } -#endif +#else skipwait =0; -// printf(" Diff %d filled %d skipwait %d \n",diff/90,filled,skipwait); - +#endif +#if 0 + if (abs(diff/90)> 55 ) { + printf(" Diff %d filled %d skipwait %d \n",diff/90,filled,skipwait); + } +#endif if (abs(diff) > 5000 * 90) { // more than 5s err = CuvidMessage(2, "video: audio/video difference too big\n"); - decoder->SyncCounter = 1; - goto out; +// decoder->SyncCounter = 1; +// usleep(10); +// goto out; } else if (diff > 100 * 90) { // FIXME: this quicker sync step, did not work with new code! err = CuvidMessage(4, "video: slow down video, duping frame\n"); @@ -4687,8 +4901,11 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder) err = CuvidMessage(3, "video: speed up video, droping frame\n"); ++decoder->FramesDropped; CuvidAdvanceDecoderFrame(decoder); - if (filled >2) - CuvidAdvanceDecoderFrame(decoder); + +// if ((AudioDelay == 0) && (filled < 3)) +// AudioDelay = abs(diff/90); +// if (filled >2 && diff < -55) +// CuvidAdvanceDecoderFrame(decoder); // filled = atomic_read(&decoder->SurfacesFilled); // Debug(3,"hinter drop frame filled %d\n",atomic_read(&decoder->SurfacesFilled)); decoder->SyncCounter = 1; @@ -4710,7 +4927,7 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder) // check if next field is available //JOJO if (decoder->SurfaceField && filled <= 1 + 2 * decoder->Interlaced) { - if (decoder->SurfaceField && filled < 1 + 2 * decoder->Interlaced) { + if (decoder->SurfaceField && filled <= 1 + 2 * decoder->Interlaced) { if (filled < 1 + 2 * decoder->Interlaced) { ++decoder->FramesDuped; #if 0 @@ -4834,8 +5051,8 @@ static void CuvidSetVideoMode(void) Debug(3,"Set video mode %dx%d\n",VideoWindowWidth,VideoWindowHeight); - if (GlxEnabled) { - GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, GlxThreadContext); + if (EglEnabled) { + GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, eglContext); } #ifdef PLACEBO @@ -4903,6 +5120,9 @@ static void CuvidDisplayHandlerThread(void) allfull = 0; err = VideoDecodeInput(decoder->Stream); } else { +#ifdef PLACEBO + usleep(1000); +#endif err = VideoPollInput(decoder->Stream); } // decoder can be invalid here @@ -4933,7 +5153,7 @@ static void CuvidDisplayHandlerThread(void) } #ifdef PLACEBO - usleep(1000); +// usleep(1000); #endif // all decoder buffers are full @@ -5011,10 +5231,10 @@ static const VideoModule CuvidModule = { .SetVideoMode = CuvidSetVideoMode, // .ResetAutoCrop = CuvidResetAutoCrop, .DisplayHandlerThread = CuvidDisplayHandlerThread, - .OsdClear = GlxOsdClear, - .OsdDrawARGB = GlxOsdDrawARGB, - .OsdInit = GlxOsdInit, - .OsdExit = GlxOsdExit, +// .OsdClear = GlxOsdClear, +// .OsdDrawARGB = GlxOsdDrawARGB, +// .OsdInit = GlxOsdInit, +// .OsdExit = GlxOsdExit, // .OsdClear = CuvidOsdClear, // .OsdDrawARGB = CuvidOsdDrawARGB, // .OsdInit = CuvidOsdInit, @@ -5206,10 +5426,9 @@ void VideoOsdClear(void) #ifdef PLACEBO OsdShown = 0; - #else VideoThreadLock(); - VideoUsedModule->OsdClear(); +// VideoUsedModule->OsdClear(); OsdDirtyX = OsdWidth; // reset dirty area OsdDirtyY = OsdHeight; OsdDirtyWidth = 0; @@ -5254,16 +5473,15 @@ void VideoOsdDrawARGB(int xi, int yi, int width, int height, int pitch, if (y + height > OsdDirtyY + OsdDirtyHeight) { OsdDirtyHeight = y + height - OsdDirtyY; } - Debug(4, "video: osd dirty %dx%d%+d%+d -> %dx%d%+d%+d\n", width, height, x, + Debug(3, "video: osd dirty %dx%d%+d%+d -> %dx%d%+d%+d\n", width, height, x, y, OsdDirtyWidth, OsdDirtyHeight, OsdDirtyX, OsdDirtyY); - VideoUsedModule->OsdDrawARGB(xi, yi, width, height, pitch, argb, x, y); - OsdShown = 1; + VideoThreadUnlock(); } -#ifdef USE_OPENGLOSD + void ActivateOsd(GLuint texture, int x, int y, int xsize, int ysize) { //printf("OSD open %d %d %d %d\n",x,y,xsize,ysize); OsdShown = 1; @@ -5274,7 +5492,7 @@ void ActivateOsd(GLuint texture, int x, int y, int xsize, int ysize) { OSDxsize = xsize; OSDysize = ysize; } -#endif + @@ -5336,10 +5554,6 @@ void VideoOsdInit(void) OsdWidth = VideoWindowWidth; OsdHeight = VideoWindowHeight; } - - VideoThreadLock(); - VideoUsedModule->OsdInit(OsdWidth, OsdHeight); - VideoThreadUnlock(); VideoOsdClear(); } @@ -5348,9 +5562,6 @@ void VideoOsdInit(void) /// void VideoOsdExit(void) { - VideoThreadLock(); - VideoUsedModule->OsdExit(); - VideoThreadUnlock(); OsdDirtyWidth = 0; OsdDirtyHeight = 0; } @@ -5517,12 +5728,12 @@ void VideoPollEvent(void) VideoEvent(); } } -#ifdef USE_OPENGLOSD + void VideoSetVideoEventCallback(void (*videoEventCallback)(void)) { VideoEventCallback = videoEventCallback; } -#endif + //---------------------------------------------------------------------------- // Thread //---------------------------------------------------------------------------- @@ -5651,12 +5862,7 @@ void InitPlacebo(){ p->swapchain = pl_vulkan_create_swapchain(p->vk, &(struct pl_vulkan_swapchain_params) { .surface = p->pSurface, .present_mode = VK_PRESENT_MODE_FIFO_KHR, -#ifdef VAAPI - .swapchain_depth = 2, -#endif -#ifdef CUVID - .swapchain_depth = 1, -#endif + .swapchain_depth = SWAP_BUFFER_SIZE, }); if (!p->swapchain) { @@ -5682,22 +5888,25 @@ static void *VideoDisplayHandlerThread(void *dummy) #ifdef CUVID CUcontext cuda_ctx; #endif -#ifdef PLACEBO_ -// InitPlacebo(); -#endif - + EGLint contextAttrs[] = { + EGL_CONTEXT_CLIENT_VERSION, 3, + EGL_NONE +}; prctl(PR_SET_NAME,"cuvid video",0,0,0); - if (GlxEnabled) { - Debug(3, "video/glx: thread context %p <-> %p\n",glXGetCurrentContext(), GlxThreadContext); - Debug(3, "video/glx: context %p <-> %p\n", glXGetCurrentContext(),GlxContext); - - GlxThreadContext = glXCreateContext(XlibDisplay, GlxVisualInfo, GlxSharedContext,GL_TRUE); - if (!GlxThreadContext) { - Error(_("video/glx: can't create glx context\n")); + + if (EglEnabled) { +#ifdef CUVID + eglThreadContext = glXCreateContext(XlibDisplay, GlxVisualInfo, eglSharedContext,GL_TRUE); +#else + eglThreadContext = eglCreateContext(eglDisplay, eglConfig, eglSharedContext, contextAttrs); +#endif + if (!eglThreadContext) { + EglCheck(); + Fatal(_("video/egl: can't create thread egl context\n")); return NULL; } // set glx context - GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, GlxThreadContext); +// GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, eglThreadContext); } for (;;) { @@ -5761,7 +5970,11 @@ static void VideoThreadInit(void) { #ifndef PLACEBO - glXMakeCurrent(XlibDisplay, None, NULL); +#ifdef CUVID + glXMakeCurrent(XlibDisplay, None, NULL); +#else + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext); +#endif #endif pthread_mutex_init(&VideoMutex, NULL); pthread_mutex_init(&VideoLockMutex, NULL); @@ -6684,21 +6897,19 @@ void VideoSetVideoMode( __attribute__ ((unused)) return; // same size nothing todo } -#ifdef USE_OPENGLOSD + if (VideoEventCallback) { sleep(1); VideoEventCallback(); Debug(3,"call back set video mode %d %d\n",width,height); } -#endif - VideoOsdExit(); - + VideoThreadLock(); VideoWindowWidth = width; VideoWindowHeight = height; VideoUsedModule->SetVideoMode(); VideoThreadUnlock(); - VideoOsdInit(); + } /// @@ -7128,36 +7339,17 @@ void VideoInit(const char *display_name) if (!VideoWindowWidth) { VideoWindowWidth = (VideoWindowHeight * 16) / 9; } - // - // prepare opengl - // -#ifdef USE_GLX - // FIXME: module selected below - if (0) { - GlxInit(); - // FIXME: use root window? - VideoCreateWindow(screen->root, GlxVisualInfo->visualid, GlxVisualInfo->depth); - GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, GlxContext); - } else -#endif + // + // Create output window + // - // - // Create output window - // - if (1) { // FIXME: use window mode - VideoCreateWindow(screen->root, screen->root_visual, screen->root_depth); - } else { - // FIXME: support embedded mode - VideoWindow = screen->root; - - // FIXME: VideoWindowHeight VideoWindowWidth - } + VideoCreateWindow(screen->root, screen->root_visual, screen->root_depth); Debug(3, "video: window prepared\n"); // - // prepare hardware decoder CUVID + // prepare hardware decoder // for (i = 0; i < (int)(sizeof(VideoModules) / sizeof(*VideoModules)); ++i) { // FIXME: support list of drivers and include display name @@ -7188,15 +7380,11 @@ void VideoInit(const char *display_name) //xcb_prefetch_maximum_request_length(Connection); xcb_flush(Connection); + #ifdef PLACEBO InitPlacebo(); #endif - // I would like to start threads here, but this produces: - // [xcb] Unknown sequence number while processing queue - // [xcb] Most likely this is a multi-threaded client and XInitThreads - // has not been called - //VideoPollEvent(); - //VideoThreadInit(); + } /// @@ -7221,9 +7409,10 @@ void VideoExit(void) #ifdef USE_VIDEO_THREAD VideoThreadExit(); // destroy all mutexes #endif + #ifdef USE_GLX - if (GlxEnabled) { - GlxExit(); // delete all contexts + if (EglEnabled) { + EglExit(); // delete all contexts } #endif @@ -7281,24 +7470,41 @@ void VideoExit(void) XlibDisplay = NULL; Connection = 0; } +} -} - + int GlxInitopengl() { #ifndef PLACEBO - while (GlxSharedContext == NULL || GlxContext == NULL) { +#ifdef VAAPI + EGLint contextAttrs[] = { + EGL_CONTEXT_CLIENT_VERSION, 3, + EGL_NONE + }; +#endif + while (eglSharedContext == NULL || eglContext == NULL) { sleep(1); // wait until Init from video thread is ready // printf("GlxConext %p\n",GlxSharedContext); } - - OSDcontext = glXCreateContext(XlibDisplay, GlxVisualInfo, GlxSharedContext,GL_TRUE); +#ifdef CUVID + OSDcontext = glXCreateContext(XlibDisplay, GlxVisualInfo, eglSharedContext,GL_TRUE); +#else + OSDcontext = eglCreateContext(eglDisplay, eglConfig, eglSharedContext, contextAttrs); +#endif if (!OSDcontext) { - Debug(3,"video/osd: can't create glx context\n"); + Debug(3,"video/osd: can't create OSD egl context\n"); return 0; } - Debug(3,"Create OSD GLX context\n"); + Debug(3,"Create OSD egl context\n"); +#ifdef VAAPI + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, OSDcontext); + EglCheck(); +#else glXMakeCurrent(XlibDisplay, VideoWindow, OSDcontext); + GlxCheck(); +#endif + + #endif return 1;