diff --git a/.indent.pro b/.indent.pro new file mode 100644 index 0000000..05998d9 --- /dev/null +++ b/.indent.pro @@ -0,0 +1,37 @@ +--blank-lines-before-block-comments +--blank-lines-after-declarations +--blank-lines-after-procedures +--no-blank-lines-after-commas +--braces-on-if-line +--no-blank-before-sizeof +--comment-indentation41 +--declaration-comment-column41 +--no-comment-delimiters-on-blank-lines +--swallow-optional-blank-lines +--dont-format-comments +--parameter-indentation4 +--indent-level4 +--line-comments-indentation0 +--cuddle-else +--cuddle-do-while +--brace-indent0 +--case-brace-indentation0 +//--start-left-side-of-comments +--leave-preprocessor-space +//--continuation-indentation8 +--case-indentation4 +--else-endif-column0 +--no-space-after-casts +--declaration-indentation1 +--dont-line-up-parentheses +--no-space-after-function-call-names +--space-special-semicolon +--tab-size4 +--no-tabs +--line-length119 +--comment-line-length119 +--honour-newlines +--dont-break-procedure-type +--break-before-boolean-operator +--continuation-indentation4 +--ignore-newlines diff --git a/Makefile b/Makefile index 1dc5e71..1b33939 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # # Makefile for a Video Disk Recorder plugin -# +# # $Id: 2a41981a57e5e83036463c6a08c84b86ed9d2be3 $ # The official name of this plugin. @@ -11,13 +11,13 @@ ### Configuration (edit this for your needs) # comment out if not needed -# what kind of driver do we make - +# what kind of driver do we make - # if VAAPI is enabled the drivername is softhdvaapi # if CUVID is enabled the drivername is softhdcuvid #VAAPI=1 CUVID=1 -# use libplacebo - available for both drivers +# use libplacebo - available for both drivers #LIBPLACEBO=1 # use YADIF deint - only available with cuvid @@ -92,7 +92,7 @@ TMPDIR ?= /tmp ### The compiler options: -export CFLAGS = $(call PKGCFG,cflags) +export CFLAGS = $(call PKGCFG,cflags) export CXXFLAGS = $(call PKGCFG,cxxflags) ifeq ($(CFLAGS),) @@ -136,16 +136,16 @@ endif ifeq ($(OPENGL),1) CONFIG += -DUSE_GLX _CFLAGS += $(shell pkg-config --cflags gl glu glew) -#LIBS += $(shell pkg-config --libs glu glew) +#LIBS += $(shell pkg-config --libs glu glew) _CFLAGS += $(shell pkg-config --cflags freetype2) LIBS += $(shell pkg-config --libs freetype2) endif ifeq ($(VAAPI),1) -CONFIG += -DVAAPI +CONFIG += -DVAAPI #LIBPLACEBO=1 PLUGIN = softhdvaapi -LIBS += -lEGL +LIBS += -lEGL endif ifeq ($(LIBPLACEBO),1) @@ -155,7 +155,7 @@ endif ifeq ($(CUVID),1) CONFIG += -DUSE_PIP # PIP support CONFIG += -DCUVID # enable CUVID decoder -LIBS += -lEGL -lGL +LIBS += -lEGL -lGL ifeq ($(YADIF),1) CONFIG += -DYADIF # Yadif only with CUVID endif @@ -173,7 +173,7 @@ SOFILE = libvdr-$(PLUGIN).so # -# Test that libswresample is available +# Test that libswresample is available # #ifneq (exists, $(shell pkg-config libswresample && echo exists)) # $(warning ******************************************************************) @@ -182,7 +182,7 @@ SOFILE = libvdr-$(PLUGIN).so #endif # -# Test and set config for libavutil +# Test and set config for libavutil # ifneq (exists, $(shell pkg-config libavutil && echo exists)) $(warning ******************************************************************) @@ -193,7 +193,7 @@ _CFLAGS += $(shell pkg-config --cflags libavutil) LIBS += $(shell pkg-config --libs libavutil) # -# Test and set config for libswscale +# Test and set config for libswscale # ifneq (exists, $(shell pkg-config libswscale && echo exists)) $(warning ******************************************************************) @@ -233,10 +233,10 @@ endif #_CFLAGS += $(shell pkg-config --cflags libavcodec x11 x11-xcb xcb xcb-icccm) #LIBS += -lrt $(shell pkg-config --libs libavcodec x11 x11-xcb xcb xcb-icccm) -_CFLAGS += $(shell pkg-config --cflags x11 x11-xcb xcb xcb-icccm) -LIBS += -lrt $(shell pkg-config --libs x11 x11-xcb xcb xcb-icccm) +_CFLAGS += $(shell pkg-config --cflags x11 x11-xcb xcb xcb-icccm) +LIBS += -lrt $(shell pkg-config --libs x11 x11-xcb xcb xcb-icccm) -_CFLAGS += -I/usr/local/cuda/include +_CFLAGS += -I/usr/local/cuda/include _CFLAGS += -I./opengl -I./ LIBS += -L/usr/lib64 @@ -247,10 +247,10 @@ LIBS += -lplacebo -lglut endif ifeq ($(CUVID),1) -LIBS += -lcuda -L/usr/local/cuda/targets/x86_64-linux/lib -lcudart -lnvcuvid +LIBS += -lcuda -L/usr/local/cuda/targets/x86_64-linux/lib -lcudart -lnvcuvid endif -LIBS += -lGLEW -lGLU -ldl +LIBS += -lGLEW -lGLU -ldl ### Includes and Defines (add further entries here): INCLUDES += @@ -261,16 +261,16 @@ DEFINES += -DPLUGIN_NAME_I18N='"$(PLUGIN)"' -D_GNU_SOURCE $(CONFIG) \ ### Make it standard override CXXFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \ - -g -Wextra -Winit-self -Werror=overloaded-virtual -std=c++0x -override CFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \ + -g -Wextra -Winit-self -Werror=overloaded-virtual -std=c++0x +override CFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \ -g -W -Wextra -Winit-self -Wdeclaration-after-statement ### The object files (add further files here): -OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o +OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o ifeq ($(OPENGLOSD),1) -OBJS += openglosd.o +OBJS += openglosd.o endif SRCS = $(wildcard $(OBJS:.o=.c)) softhdcuvid.cpp @@ -290,11 +290,11 @@ $(DEPFILE): Makefile ### Internationalization (I18N): -PODIR = po -I18Npo = $(wildcard $(PODIR)/*.po) -I18Nmo = $(addsuffix .mo, $(foreach file, $(I18Npo), $(basename $(file)))) +PODIR = po +I18Npo = $(wildcard $(PODIR)/*.po) +I18Nmo = $(addsuffix .mo, $(foreach file, $(I18Npo), $(basename $(file)))) I18Nmsgs = $(addprefix $(DESTDIR)$(LOCDIR)/, $(addsuffix /LC_MESSAGES/vdr-$(PLUGIN).mo, $(notdir $(foreach file, $(I18Npo), $(basename $(file)))))) -I18Npot = $(PODIR)/$(PLUGIN).pot +I18Npot = $(PODIR)/$(PLUGIN).pot %.mo: %.po msgfmt -c -o $@ $< @@ -322,7 +322,7 @@ $(OBJS): Makefile $(SOFILE): $(OBJS) shaders.h - $(CXX) $(CXXFLAGS) $(LDFLAGS) -shared $(OBJS) $(LIBS) -o $@ + $(CXX) $(CXXFLAGS) $(LDFLAGS) -shared $(OBJS) $(LIBS) -o $@ install-lib: $(SOFILE) install -D $^ $(DESTDIR)$(LIBDIR)/$^.$(APIVERSION) @@ -343,13 +343,11 @@ clean: ## Private Targets: -HDRS= $(wildcard *.h) +HDRS= $(wildcard *.h) indent: for i in $(SRCS) $(HDRS); do \ indent $$i; \ - unexpand -a $$i | sed -e s/constconst/const/ > $$i.up; \ - mv $$i.up $$i; \ done video_test: video.c Makefile diff --git a/audio.c b/audio.c index 0a16054..54f6ad0 100644 --- a/audio.c +++ b/audio.c @@ -1,48 +1,48 @@ /// -/// @file audio.c @brief Audio module +/// @file audio.c @brief Audio module /// -/// Copyright (c) 2009 - 2014 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2014 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: 77fa65030b179e78c13d0bf69a7cc417dae89e1a $ +/// $Id: 77fa65030b179e78c13d0bf69a7cc417dae89e1a $ ////////////////////////////////////////////////////////////////////////////// /// -/// @defgroup Audio The audio module. +/// @defgroup Audio The audio module. /// -/// This module contains all audio output functions. +/// This module contains all audio output functions. /// -/// ALSA PCM/Mixer api is supported. -/// @see http://www.alsa-project.org/alsa-doc/alsa-lib +/// ALSA PCM/Mixer api is supported. +/// @see http://www.alsa-project.org/alsa-doc/alsa-lib /// -/// @note alsa async playback is broken, don't use it! +/// @note alsa async playback is broken, don't use it! /// -/// OSS PCM/Mixer api is supported. -/// @see http://manuals.opensound.com/developer/ +/// OSS PCM/Mixer api is supported. +/// @see http://manuals.opensound.com/developer/ /// /// -/// @todo FIXME: there can be problems with little/big endian. +/// @todo FIXME: there can be problems with little/big endian. /// #ifdef DEBUG #undef DEBUG #endif -//#define USE_ALSA ///< enable alsa support -//#define USE_OSS ///< enable OSS support -#define USE_AUDIO_THREAD ///< use thread for audio playback -#define USE_AUDIO_MIXER ///< use audio module mixer +//#define USE_ALSA ///< enable alsa support +//#define USE_OSS ///< enable OSS support +#define USE_AUDIO_THREAD ///< use thread for audio playback +#define USE_AUDIO_MIXER ///< use audio module mixer #include #include @@ -53,8 +53,8 @@ #include #include -#define _(str) gettext(str) ///< gettext shortcut -#define _N(str) str ///< gettext_noop shortcut +#define _(str) gettext(str) ///< gettext shortcut +#define _N(str) str ///< gettext_noop shortcut #ifdef USE_ALSA #include @@ -91,14 +91,14 @@ #endif #endif -#include "iatomic.h" // portable atomic_t +#include "iatomic.h" // portable atomic_t #include "ringbuffer.h" #include "misc.h" #include "audio.h" //---------------------------------------------------------------------------- -// Declarations +// Declarations //---------------------------------------------------------------------------- /** @@ -106,88 +106,88 @@ */ typedef struct _audio_module_ { - const char *Name; ///< audio output module name + const char *Name; ///< audio output module name - int (*const Thread) (void); ///< module thread handler - void (*const FlushBuffers) (void); ///< flush sample buffers - int64_t(*const GetDelay) (void); ///< get current audio delay - void (*const SetVolume) (int); ///< set output volume - int (*const Setup) (int *, int *, int); ///< setup channels, samplerate - void (*const Play) (void); ///< play audio - void (*const Pause) (void); ///< pause audio - void (*const Init) (void); ///< initialize audio output module - void (*const Exit) (void); ///< cleanup audio output module + int (*const Thread)(void); ///< module thread handler + void (*const FlushBuffers)(void); ///< flush sample buffers + int64_t(*const GetDelay) (void); ///< get current audio delay + void (*const SetVolume)(int); ///< set output volume + int (*const Setup)(int *, int *, int); ///< setup channels, samplerate + void (*const Play)(void); ///< play audio + void (*const Pause)(void); ///< pause audio + void (*const Init)(void); ///< initialize audio output module + void (*const Exit)(void); ///< cleanup audio output module } AudioModule; -static const AudioModule NoopModule; ///< forward definition of noop module +static const AudioModule NoopModule; ///< forward definition of noop module //---------------------------------------------------------------------------- -// Variables +// Variables //---------------------------------------------------------------------------- -char AudioAlsaDriverBroken; ///< disable broken driver message -char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix -char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix +char AudioAlsaDriverBroken; ///< disable broken driver message +char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix +char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix -static const char *AudioModuleName; ///< which audio module to use +static const char *AudioModuleName; ///< which audio module to use /// Selected audio module. static const AudioModule *AudioUsedModule = &NoopModule; -static const char *AudioPCMDevice; ///< PCM device name -static const char *AudioPassthroughDevice; ///< Passthrough device name -static char AudioAppendAES; ///< flag automatic append AES -static const char *AudioMixerDevice; ///< mixer device name -static const char *AudioMixerChannel; ///< mixer channel name -static char AudioDoingInit; ///> flag in init, reduce error -static volatile char AudioRunning; ///< thread running / stopped -static volatile char AudioPaused; ///< audio paused -static volatile char AudioVideoIsReady; ///< video ready start early -static int AudioSkip; ///< skip audio to sync to video -int AudioDelay; /// delay audio to sync to video +static const char *AudioPCMDevice; ///< PCM device name +static const char *AudioPassthroughDevice; ///< Passthrough device name +static char AudioAppendAES; ///< flag automatic append AES +static const char *AudioMixerDevice; ///< mixer device name +static const char *AudioMixerChannel; ///< mixer channel name +static char AudioDoingInit; ///> flag in init, reduce error +static volatile char AudioRunning; ///< thread running / stopped +static volatile char AudioPaused; ///< audio paused +static volatile char AudioVideoIsReady; ///< video ready start early +static int AudioSkip; ///< skip audio to sync to video +int AudioDelay; /// delay audio to sync to video -static const int AudioBytesProSample = 2; ///< number of bytes per sample +static const int AudioBytesProSample = 2; ///< number of bytes per sample -static int AudioBufferTime = 336; ///< audio buffer time in ms +static int AudioBufferTime = 336; ///< audio buffer time in ms #ifdef USE_AUDIO_THREAD -static pthread_t AudioThread; ///< audio play thread -static pthread_mutex_t AudioMutex; ///< audio condition mutex -static pthread_cond_t AudioStartCond; ///< condition variable -static char AudioThreadStop; ///< stop audio thread +static pthread_t AudioThread; ///< audio play thread +static pthread_mutex_t AudioMutex; ///< audio condition mutex +static pthread_cond_t AudioStartCond; ///< condition variable +static char AudioThreadStop; ///< stop audio thread #else -static const int AudioThread; ///< dummy audio thread +static const int AudioThread; ///< dummy audio thread #endif -static char AudioSoftVolume; ///< flag use soft volume -static char AudioNormalize; ///< flag use volume normalize -static char AudioCompression; ///< flag use compress volume -static char AudioMute; ///< flag muted -static int AudioAmplifier; ///< software volume factor -static int AudioNormalizeFactor; ///< current normalize factor -static const int AudioMinNormalize = 100; ///< min. normalize factor -static int AudioMaxNormalize; ///< max. normalize factor -static int AudioCompressionFactor; ///< current compression factor -static int AudioMaxCompression; ///< max. compression factor -static int AudioStereoDescent; ///< volume descent for stereo -static int AudioVolume; ///< current volume (0 .. 1000) +static char AudioSoftVolume; ///< flag use soft volume +static char AudioNormalize; ///< flag use volume normalize +static char AudioCompression; ///< flag use compress volume +static char AudioMute; ///< flag muted +static int AudioAmplifier; ///< software volume factor +static int AudioNormalizeFactor; ///< current normalize factor +static const int AudioMinNormalize = 100; ///< min. normalize factor +static int AudioMaxNormalize; ///< max. normalize factor +static int AudioCompressionFactor; ///< current compression factor +static int AudioMaxCompression; ///< max. compression factor +static int AudioStereoDescent; ///< volume descent for stereo +static int AudioVolume; ///< current volume (0 .. 1000) -extern int VideoAudioDelay; ///< import audio/video delay +extern int VideoAudioDelay; ///< import audio/video delay /// default ring buffer size ~2s 8ch 16bit (3 * 5 * 7 * 8) static const unsigned AudioRingBufferSize = 3 * 5 * 7 * 8 * 2 * 1000; -static int AudioChannelsInHw[9]; ///< table which channels are supported +static int AudioChannelsInHw[9]; ///< table which channels are supported enum _audio_rates -{ ///< sample rates enumeration +{ ///< sample rates enumeration // HW: 32000 44100 48000 88200 96000 176400 192000 - //Audio32000, ///< 32.0Khz - Audio44100, ///< 44.1Khz - Audio48000, ///< 48.0Khz - //Audio88200, ///< 88.2Khz - //Audio96000, ///< 96.0Khz - //Audio176400, ///< 176.4Khz - Audio192000, ///< 192.0Khz - AudioRatesMax ///< max index + //Audio32000, ///< 32.0Khz + Audio44100, ///< 44.1Khz + Audio48000, ///< 48.0Khz + //Audio88200, ///< 88.2Khz + //Audio96000, ///< 96.0Khz + //Audio176400, ///< 176.4Khz + Audio192000, ///< 192.0Khz + AudioRatesMax ///< max index }; /// table which rates are supported @@ -202,17 +202,17 @@ static const unsigned AudioRatesTable[AudioRatesMax] = { }; //---------------------------------------------------------------------------- -// filter +// filter //---------------------------------------------------------------------------- -static const int AudioNormSamples = 4096; ///< number of samples +static const int AudioNormSamples = 4096; ///< number of samples -#define AudioNormMaxIndex 128 ///< number of average values +#define AudioNormMaxIndex 128 ///< number of average values /// average of n last sample blocks static uint32_t AudioNormAverage[AudioNormMaxIndex]; -static int AudioNormIndex; ///< index into average table -static int AudioNormReady; ///< index counter -static int AudioNormCounter; ///< sample counter +static int AudioNormIndex; ///< index into average table +static int AudioNormReady; ///< index counter +static int AudioNormCounter; ///< sample counter /** ** Audio normalizer. @@ -233,66 +233,65 @@ static void AudioNormalizer(int16_t * samples, int count) l = count / AudioBytesProSample; data = samples; do { - n = l; - if (AudioNormCounter + n > AudioNormSamples) { - n = AudioNormSamples - AudioNormCounter; - } - avg = AudioNormAverage[AudioNormIndex]; - for (i = 0; i < n; ++i) { - int t; + n = l; + if (AudioNormCounter + n > AudioNormSamples) { + n = AudioNormSamples - AudioNormCounter; + } + avg = AudioNormAverage[AudioNormIndex]; + for (i = 0; i < n; ++i) { + int t; - t = data[i]; - avg += (t * t) / AudioNormSamples; - } - AudioNormAverage[AudioNormIndex] = avg; - AudioNormCounter += n; - if (AudioNormCounter >= AudioNormSamples) { - if (AudioNormReady < AudioNormMaxIndex) { - AudioNormReady++; - } else { - avg = 0; - for (i = 0; i < AudioNormMaxIndex; ++i) { - avg += AudioNormAverage[i] / AudioNormMaxIndex; - } + t = data[i]; + avg += (t * t) / AudioNormSamples; + } + AudioNormAverage[AudioNormIndex] = avg; + AudioNormCounter += n; + if (AudioNormCounter >= AudioNormSamples) { + if (AudioNormReady < AudioNormMaxIndex) { + AudioNormReady++; + } else { + avg = 0; + for (i = 0; i < AudioNormMaxIndex; ++i) { + avg += AudioNormAverage[i] / AudioNormMaxIndex; + } - // calculate normalize factor - if (avg > 0) { - factor = ((INT16_MAX / 8) * 1000U) / (uint32_t) sqrt(avg); - // smooth normalize - AudioNormalizeFactor = - (AudioNormalizeFactor * 500 + factor * 500) / 1000; - if (AudioNormalizeFactor < AudioMinNormalize) { - AudioNormalizeFactor = AudioMinNormalize; - } - if (AudioNormalizeFactor > AudioMaxNormalize) { - AudioNormalizeFactor = AudioMaxNormalize; - } - } else { - factor = 1000; - } - Debug(4, "audio/noramlize: avg %8d, fac=%6.3f, norm=%6.3f\n", - avg, factor / 1000.0, AudioNormalizeFactor / 1000.0); - } + // calculate normalize factor + if (avg > 0) { + factor = ((INT16_MAX / 8) * 1000U) / (uint32_t) sqrt(avg); + // smooth normalize + AudioNormalizeFactor = (AudioNormalizeFactor * 500 + factor * 500) / 1000; + if (AudioNormalizeFactor < AudioMinNormalize) { + AudioNormalizeFactor = AudioMinNormalize; + } + if (AudioNormalizeFactor > AudioMaxNormalize) { + AudioNormalizeFactor = AudioMaxNormalize; + } + } else { + factor = 1000; + } + Debug(4, "audio/noramlize: avg %8d, fac=%6.3f, norm=%6.3f\n", avg, factor / 1000.0, + AudioNormalizeFactor / 1000.0); + } - AudioNormIndex = (AudioNormIndex + 1) % AudioNormMaxIndex; - AudioNormCounter = 0; - AudioNormAverage[AudioNormIndex] = 0U; - } - data += n; - l -= n; + AudioNormIndex = (AudioNormIndex + 1) % AudioNormMaxIndex; + AudioNormCounter = 0; + AudioNormAverage[AudioNormIndex] = 0U; + } + data += n; + l -= n; } while (l > 0); // apply normalize factor for (i = 0; i < count / AudioBytesProSample; ++i) { - int t; + int t; - t = (samples[i] * AudioNormalizeFactor) / 1000; - if (t < INT16_MIN) { - t = INT16_MIN; - } else if (t > INT16_MAX) { - t = INT16_MAX; - } - samples[i] = t; + t = (samples[i] * AudioNormalizeFactor) / 1000; + if (t < INT16_MIN) { + t = INT16_MIN; + } else if (t > INT16_MAX) { + t = INT16_MAX; + } + samples[i] = t; } } @@ -306,7 +305,7 @@ static void AudioResetNormalizer(void) AudioNormCounter = 0; AudioNormReady = 0; for (i = 0; i < AudioNormMaxIndex; ++i) { - AudioNormAverage[i] = 0U; + AudioNormAverage[i] = 0U; } AudioNormalizeFactor = 1000; } @@ -326,44 +325,43 @@ static void AudioCompressor(int16_t * samples, int count) // find loudest sample max_sample = 0; for (i = 0; i < count / AudioBytesProSample; ++i) { - int t; + int t; - t = abs(samples[i]); - if (t > max_sample) { - max_sample = t; - } + t = abs(samples[i]); + if (t > max_sample) { + max_sample = t; + } } // calculate compression factor if (max_sample > 0) { - factor = (INT16_MAX * 1000) / max_sample; - // smooth compression (FIXME: make configurable?) - AudioCompressionFactor = - (AudioCompressionFactor * 950 + factor * 50) / 1000; - if (AudioCompressionFactor > factor) { - AudioCompressionFactor = factor; // no clipping - } - if (AudioCompressionFactor > AudioMaxCompression) { - AudioCompressionFactor = AudioMaxCompression; - } + factor = (INT16_MAX * 1000) / max_sample; + // smooth compression (FIXME: make configurable?) + AudioCompressionFactor = (AudioCompressionFactor * 950 + factor * 50) / 1000; + if (AudioCompressionFactor > factor) { + AudioCompressionFactor = factor; // no clipping + } + if (AudioCompressionFactor > AudioMaxCompression) { + AudioCompressionFactor = AudioMaxCompression; + } } else { - return; // silent nothing todo + return; // silent nothing todo } - Debug(4, "audio/compress: max %5d, fac=%6.3f, com=%6.3f\n", max_sample, - factor / 1000.0, AudioCompressionFactor / 1000.0); + Debug(4, "audio/compress: max %5d, fac=%6.3f, com=%6.3f\n", max_sample, factor / 1000.0, + AudioCompressionFactor / 1000.0); // apply compression factor for (i = 0; i < count / AudioBytesProSample; ++i) { - int t; + int t; - t = (samples[i] * AudioCompressionFactor) / 1000; - if (t < INT16_MIN) { - t = INT16_MIN; - } else if (t > INT16_MAX) { - t = INT16_MAX; - } - samples[i] = t; + t = (samples[i] * AudioCompressionFactor) / 1000; + if (t < INT16_MIN) { + t = INT16_MIN; + } else if (t > INT16_MAX) { + t = INT16_MAX; + } + samples[i] = t; } } @@ -374,7 +372,7 @@ static void AudioResetCompressor(void) { AudioCompressionFactor = 2000; if (AudioCompressionFactor > AudioMaxCompression) { - AudioCompressionFactor = AudioMaxCompression; + AudioCompressionFactor = AudioMaxCompression; } } @@ -392,20 +390,20 @@ static void AudioSoftAmplifier(int16_t * samples, int count) // silence if (AudioMute || !AudioAmplifier) { - memset(samples, 0, count); - return; + memset(samples, 0, count); + return; } for (i = 0; i < count / AudioBytesProSample; ++i) { - int t; + int t; - t = (samples[i] * AudioAmplifier) / 1000; - if (t < INT16_MIN) { - t = INT16_MIN; - } else if (t > INT16_MAX) { - t = INT16_MAX; - } - samples[i] = t; + t = (samples[i] * AudioAmplifier) / 1000; + if (t < INT16_MIN) { + t = INT16_MIN; + } else if (t > INT16_MAX) { + t = INT16_MAX; + } + samples[i] = t; } } @@ -423,11 +421,11 @@ static void AudioMono2Stereo(const int16_t * in, int frames, int16_t * out) int i; for (i = 0; i < frames; ++i) { - int t; + int t; - t = in[i]; - out[i * 2 + 0] = t; - out[i * 2 + 1] = t; + t = in[i]; + out[i * 2 + 0] = t; + out[i * 2 + 1] = t; } } @@ -443,7 +441,7 @@ static void AudioStereo2Mono(const int16_t * in, int frames, int16_t * out) int i; for (i = 0; i < frames; i += 2) { - out[i / 2] = (in[i + 0] + in[i + 1]) / 2; + out[i / 2] = (in[i + 0] + in[i + 1]) / 2; } } @@ -459,74 +457,73 @@ static void AudioStereo2Mono(const int16_t * in, int frames, int16_t * out) ** @param frames number of frames in sample buffer ** @param out output sample buffer */ -static void AudioSurround2Stereo(const int16_t * in, int in_chan, int frames, - int16_t * out) +static void AudioSurround2Stereo(const int16_t * in, int in_chan, int frames, int16_t * out) { while (frames--) { - int l; - int r; + int l; + int r; - switch (in_chan) { - case 3: // stereo or surround? =>stereo - l = in[0] * 600; // L - r = in[1] * 600; // R - l += in[2] * 400; // C - r += in[2] * 400; - break; - case 4: // quad or surround? =>quad - l = in[0] * 600; // L - r = in[1] * 600; // R - l += in[2] * 400; // Ls - r += in[3] * 400; // Rs - break; - case 5: // 5.0 - l = in[0] * 500; // L - r = in[1] * 500; // R - l += in[2] * 200; // Ls - r += in[3] * 200; // Rs - l += in[4] * 300; // C - r += in[4] * 300; - break; - case 6: // 5.1 - l = in[0] * 400; // L - r = in[1] * 400; // R - l += in[2] * 200; // Ls - r += in[3] * 200; // Rs - l += in[4] * 300; // C - r += in[4] * 300; - l += in[5] * 100; // LFE - r += in[5] * 100; - break; - case 7: // 7.0 - l = in[0] * 400; // L - r = in[1] * 400; // R - l += in[2] * 200; // Ls - r += in[3] * 200; // Rs - l += in[4] * 300; // C - r += in[4] * 300; - l += in[5] * 100; // RL - r += in[6] * 100; // RR - break; - case 8: // 7.1 - l = in[0] * 400; // L - r = in[1] * 400; // R - l += in[2] * 150; // Ls - r += in[3] * 150; // Rs - l += in[4] * 250; // C - r += in[4] * 250; - l += in[5] * 100; // LFE - r += in[5] * 100; - l += in[6] * 100; // RL - r += in[7] * 100; // RR - break; - default: - abort(); - } - in += in_chan; + switch (in_chan) { + case 3: // stereo or surround? =>stereo + l = in[0] * 600; // L + r = in[1] * 600; // R + l += in[2] * 400; // C + r += in[2] * 400; + break; + case 4: // quad or surround? =>quad + l = in[0] * 600; // L + r = in[1] * 600; // R + l += in[2] * 400; // Ls + r += in[3] * 400; // Rs + break; + case 5: // 5.0 + l = in[0] * 500; // L + r = in[1] * 500; // R + l += in[2] * 200; // Ls + r += in[3] * 200; // Rs + l += in[4] * 300; // C + r += in[4] * 300; + break; + case 6: // 5.1 + l = in[0] * 400; // L + r = in[1] * 400; // R + l += in[2] * 200; // Ls + r += in[3] * 200; // Rs + l += in[4] * 300; // C + r += in[4] * 300; + l += in[5] * 100; // LFE + r += in[5] * 100; + break; + case 7: // 7.0 + l = in[0] * 400; // L + r = in[1] * 400; // R + l += in[2] * 200; // Ls + r += in[3] * 200; // Rs + l += in[4] * 300; // C + r += in[4] * 300; + l += in[5] * 100; // RL + r += in[6] * 100; // RR + break; + case 8: // 7.1 + l = in[0] * 400; // L + r = in[1] * 400; // R + l += in[2] * 150; // Ls + r += in[3] * 150; // Rs + l += in[4] * 250; // C + r += in[4] * 250; + l += in[5] * 100; // LFE + r += in[5] * 100; + l += in[6] * 100; // RL + r += in[7] * 100; // RR + break; + default: + abort(); + } + in += in_chan; - out[0] = l / 1000; - out[1] = r / 1000; - out += 2; + out[0] = l / 1000; + out[1] = r / 1000; + out += 2; } } @@ -539,18 +536,17 @@ static void AudioSurround2Stereo(const int16_t * in, int in_chan, int frames, ** @param out output sample buffer ** @param out_chan nr. of output channels */ -static void AudioUpmix(const int16_t * in, int in_chan, int frames, - int16_t * out, int out_chan) +static void AudioUpmix(const int16_t * in, int in_chan, int frames, int16_t * out, int out_chan) { while (frames--) { - int i; + int i; - for (i = 0; i < in_chan; ++i) { // copy existing channels - *out++ = *in++; - } - for (; i < out_chan; ++i) { // silents missing channels - *out++ = 0; - } + for (i = 0; i < in_chan; ++i) { // copy existing channels + *out++ = *in++; + } + for (; i < out_chan; ++i) { // silents missing channels + *out++ = 0; + } } } @@ -570,80 +566,78 @@ static void AudioUpmix(const int16_t * in, int in_chan, int frames, ** @param out output sample buffer ** @param out_chan nr. of output channels */ -static void AudioResample(const int16_t * in, int in_chan, int frames, - int16_t * out, int out_chan) +static void AudioResample(const int16_t * in, int in_chan, int frames, int16_t * out, int out_chan) { switch (in_chan * 8 + out_chan) { - case 1 * 8 + 1: - case 2 * 8 + 2: - case 3 * 8 + 3: - case 4 * 8 + 4: - case 5 * 8 + 5: - case 6 * 8 + 6: - case 7 * 8 + 7: - case 8 * 8 + 8: // input = output channels - memcpy(out, in, frames * in_chan * AudioBytesProSample); - break; - case 2 * 8 + 1: - AudioStereo2Mono(in, frames, out); - break; - case 1 * 8 + 2: - AudioMono2Stereo(in, frames, out); - break; - case 3 * 8 + 2: - case 4 * 8 + 2: - case 5 * 8 + 2: - case 6 * 8 + 2: - case 7 * 8 + 2: - case 8 * 8 + 2: - AudioSurround2Stereo(in, in_chan, frames, out); - break; - case 5 * 8 + 6: - case 3 * 8 + 8: - case 5 * 8 + 8: - case 6 * 8 + 8: - AudioUpmix(in, in_chan, frames, out, out_chan); - break; + case 1 * 8 + 1: + case 2 * 8 + 2: + case 3 * 8 + 3: + case 4 * 8 + 4: + case 5 * 8 + 5: + case 6 * 8 + 6: + case 7 * 8 + 7: + case 8 * 8 + 8: // input = output channels + memcpy(out, in, frames * in_chan * AudioBytesProSample); + break; + case 2 * 8 + 1: + AudioStereo2Mono(in, frames, out); + break; + case 1 * 8 + 2: + AudioMono2Stereo(in, frames, out); + break; + case 3 * 8 + 2: + case 4 * 8 + 2: + case 5 * 8 + 2: + case 6 * 8 + 2: + case 7 * 8 + 2: + case 8 * 8 + 2: + AudioSurround2Stereo(in, in_chan, frames, out); + break; + case 5 * 8 + 6: + case 3 * 8 + 8: + case 5 * 8 + 8: + case 6 * 8 + 8: + AudioUpmix(in, in_chan, frames, out, out_chan); + break; - default: - Error("audio: unsupported %d -> %d channels resample\n", in_chan, - out_chan); - // play silence - memset(out, 0, frames * out_chan * AudioBytesProSample); - break; + default: + Error("audio: unsupported %d -> %d channels resample\n", in_chan, out_chan); + // play silence + memset(out, 0, frames * out_chan * AudioBytesProSample); + break; } } #endif //---------------------------------------------------------------------------- -// ring buffer +// ring buffer //---------------------------------------------------------------------------- -#define AUDIO_RING_MAX 8 ///< number of audio ring buffers +#define AUDIO_RING_MAX 8 ///< number of audio ring buffers /** ** Audio ring buffer. */ typedef struct _audio_ring_ring_ { - char FlushBuffers; ///< flag: flush buffers - char Passthrough; ///< flag: use pass-through (AC-3, ...) - int16_t PacketSize; ///< packet size - unsigned HwSampleRate; ///< hardware sample rate in Hz - unsigned HwChannels; ///< hardware number of channels - unsigned InSampleRate; ///< input sample rate in Hz - unsigned InChannels; ///< input number of channels - int64_t PTS; ///< pts clock - RingBuffer *RingBuffer; ///< sample ring buffer + char FlushBuffers; ///< flag: flush buffers + char Passthrough; ///< flag: use pass-through (AC-3, ...) + int16_t PacketSize; ///< packet size + unsigned HwSampleRate; ///< hardware sample rate in Hz + unsigned HwChannels; ///< hardware number of channels + unsigned InSampleRate; ///< input sample rate in Hz + unsigned InChannels; ///< input number of channels + int64_t PTS; ///< pts clock + RingBuffer *RingBuffer; ///< sample ring buffer } AudioRingRing; /// ring of audio ring buffers static AudioRingRing AudioRing[AUDIO_RING_MAX]; -static int AudioRingWrite; ///< audio ring write pointer -static int AudioRingRead; ///< audio ring read pointer -static atomic_t AudioRingFilled; ///< how many of the ring is used -static unsigned AudioStartThreshold; ///< start play, if filled +static int AudioRingWrite; ///< audio ring write pointer +static int AudioRingRead; ///< audio ring read pointer +static atomic_t AudioRingFilled; ///< how many of the ring is used +static unsigned AudioStartThreshold; ///< start play, if filled /** ** Add sample-rate, number of channels change to ring. @@ -663,26 +657,26 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough) // search supported sample-rates for (u = 0; u < AudioRatesMax; ++u) { - if (AudioRatesTable[u] == sample_rate) { - goto found; - } - if (AudioRatesTable[u] > sample_rate) { - break; - } + if (AudioRatesTable[u] == sample_rate) { + goto found; + } + if (AudioRatesTable[u] > sample_rate) { + break; + } } Error(_("audio: %dHz sample-rate unsupported\n"), sample_rate); - return -1; // unsupported sample-rate + return -1; // unsupported sample-rate found: if (!AudioChannelMatrix[u][channels]) { - Error(_("audio: %d channels unsupported\n"), channels); - return -1; // unsupported nr. of channels + Error(_("audio: %d channels unsupported\n"), channels); + return -1; // unsupported nr. of channels } - if (atomic_read(&AudioRingFilled) == AUDIO_RING_MAX) { // no free slot - // FIXME: can wait for ring buffer empty - Error(_("audio: out of ring buffers\n")); - return -1; + if (atomic_read(&AudioRingFilled) == AUDIO_RING_MAX) { // no free slot + // FIXME: can wait for ring buffer empty + Error(_("audio: out of ring buffers\n")); + return -1; } AudioRingWrite = (AudioRingWrite + 1) % AUDIO_RING_MAX; @@ -696,17 +690,16 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough) AudioRing[AudioRingWrite].PTS = INT64_C(0x8000000000000000); RingBufferReset(AudioRing[AudioRingWrite].RingBuffer); - Debug(3, "audio: %d ring buffer prepared\n", - atomic_read(&AudioRingFilled) + 1); + Debug(3, "audio: %d ring buffer prepared\n", atomic_read(&AudioRingFilled) + 1); atomic_inc(&AudioRingFilled); #ifdef USE_AUDIO_THREAD if (AudioThread) { - // tell thread, that there is something todo - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); - Debug(3,"Start on AudioRingAdd\n"); + // tell thread, that there is something todo + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + Debug(3, "Start on AudioRingAdd\n"); } #endif @@ -721,8 +714,8 @@ static void AudioRingInit(void) int i; for (i = 0; i < AUDIO_RING_MAX; ++i) { - // ~2s 8ch 16bit - AudioRing[i].RingBuffer = RingBufferNew(AudioRingBufferSize); + // ~2s 8ch 16bit + AudioRing[i].RingBuffer = RingBufferNew(AudioRingBufferSize); } atomic_set(&AudioRingFilled, 0); } @@ -735,12 +728,12 @@ static void AudioRingExit(void) int i; for (i = 0; i < AUDIO_RING_MAX; ++i) { - if (AudioRing[i].RingBuffer) { - RingBufferDel(AudioRing[i].RingBuffer); - AudioRing[i].RingBuffer = NULL; - } - AudioRing[i].HwSampleRate = 0; // checked for valid setup - AudioRing[i].InSampleRate = 0; + if (AudioRing[i].RingBuffer) { + RingBufferDel(AudioRing[i].RingBuffer); + AudioRing[i].RingBuffer = NULL; + } + AudioRing[i].HwSampleRate = 0; // checked for valid setup + AudioRing[i].InSampleRate = 0; } AudioRingRead = 0; AudioRingWrite = 0; @@ -749,23 +742,23 @@ static void AudioRingExit(void) #ifdef USE_ALSA //============================================================================ -// A L S A +// A L S A //============================================================================ //---------------------------------------------------------------------------- -// Alsa variables +// Alsa variables //---------------------------------------------------------------------------- -static snd_pcm_t *AlsaPCMHandle; ///< alsa pcm handle -static char AlsaCanPause; ///< hw supports pause -static int AlsaUseMmap; ///< use mmap +static snd_pcm_t *AlsaPCMHandle; ///< alsa pcm handle +static char AlsaCanPause; ///< hw supports pause +static int AlsaUseMmap; ///< use mmap -static snd_mixer_t *AlsaMixer; ///< alsa mixer handle -static snd_mixer_elem_t *AlsaMixerElem; ///< alsa pcm mixer element -static int AlsaRatio; ///< internal -> mixer ratio * 1000 +static snd_mixer_t *AlsaMixer; ///< alsa mixer handle +static snd_mixer_elem_t *AlsaMixerElem; ///< alsa pcm mixer element +static int AlsaRatio; ///< internal -> mixer ratio * 1000 //---------------------------------------------------------------------------- -// alsa pcm +// alsa pcm //---------------------------------------------------------------------------- /** @@ -782,123 +775,114 @@ static int AlsaPlayRingbuffer(void) int first; first = 1; - for (;;) { // loop for ring buffer wrap - int avail; - int n; - int err; - int frames; - const void *p; + for (;;) { // loop for ring buffer wrap + int avail; + int n; + int err; + int frames; + const void *p; - // how many bytes can be written? - n = snd_pcm_avail_update(AlsaPCMHandle); - if (n < 0) { - if (n == -EAGAIN) { - continue; - } - Warning(_("audio/alsa: avail underrun error? '%s'\n"), - snd_strerror(n)); - err = snd_pcm_recover(AlsaPCMHandle, n, 0); - if (err >= 0) { - continue; - } - Error(_("audio/alsa: snd_pcm_avail_update(): %s\n"), - snd_strerror(n)); - return -1; - } - avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, n); - if (avail < 256) { // too much overhead - if (first) { - // happens with broken alsa drivers - if (AudioThread) { - if (!AudioAlsaDriverBroken) { - Error(_("audio/alsa: broken driver %d state '%s'\n"), - avail, - snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); - } - // try to recover - if (snd_pcm_state(AlsaPCMHandle) - == SND_PCM_STATE_PREPARED) { - if ((err = snd_pcm_start(AlsaPCMHandle)) < 0) { - Error(_("audio/alsa: snd_pcm_start(): %s\n"), - snd_strerror(err)); - } - } - usleep(5 * 1000); - } - } - Debug(4, "audio/alsa: break state '%s'\n", - snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); - break; - } + // how many bytes can be written? + n = snd_pcm_avail_update(AlsaPCMHandle); + if (n < 0) { + if (n == -EAGAIN) { + continue; + } + Warning(_("audio/alsa: avail underrun error? '%s'\n"), snd_strerror(n)); + err = snd_pcm_recover(AlsaPCMHandle, n, 0); + if (err >= 0) { + continue; + } + Error(_("audio/alsa: snd_pcm_avail_update(): %s\n"), snd_strerror(n)); + return -1; + } + avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, n); + if (avail < 256) { // too much overhead + if (first) { + // happens with broken alsa drivers + if (AudioThread) { + if (!AudioAlsaDriverBroken) { + Error(_("audio/alsa: broken driver %d state '%s'\n"), avail, + snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); + } + // try to recover + if (snd_pcm_state(AlsaPCMHandle) + == SND_PCM_STATE_PREPARED) { + if ((err = snd_pcm_start(AlsaPCMHandle)) < 0) { + Error(_("audio/alsa: snd_pcm_start(): %s\n"), snd_strerror(err)); + } + } + usleep(5 * 1000); + } + } + Debug(4, "audio/alsa: break state '%s'\n", snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); + break; + } - n = RingBufferGetReadPointer(AudioRing[AudioRingRead].RingBuffer, &p); - if (!n) { // ring buffer empty - if (first) { // only error on first loop - Debug(4, "audio/alsa: empty buffers %d\n", avail); - // ring buffer empty - // AlsaLowWaterMark = 1; - return 1; - } - return 0; - } - if (n < avail) { // not enough bytes in ring buffer - avail = n; - } - if (!avail) { // full or buffer empty - break; - } - // muting pass-through AC-3, can produce disturbance - if (AudioMute || (AudioSoftVolume - && !AudioRing[AudioRingRead].Passthrough)) { - // FIXME: quick&dirty cast - AudioSoftAmplifier((int16_t *) p, avail); - // FIXME: if not all are written, we double amplify them - } - frames = snd_pcm_bytes_to_frames(AlsaPCMHandle, avail); + n = RingBufferGetReadPointer(AudioRing[AudioRingRead].RingBuffer, &p); + if (!n) { // ring buffer empty + if (first) { // only error on first loop + Debug(4, "audio/alsa: empty buffers %d\n", avail); + // ring buffer empty + // AlsaLowWaterMark = 1; + return 1; + } + return 0; + } + if (n < avail) { // not enough bytes in ring buffer + avail = n; + } + if (!avail) { // full or buffer empty + break; + } + // muting pass-through AC-3, can produce disturbance + if (AudioMute || (AudioSoftVolume && !AudioRing[AudioRingRead].Passthrough)) { + // FIXME: quick&dirty cast + AudioSoftAmplifier((int16_t *) p, avail); + // FIXME: if not all are written, we double amplify them + } + frames = snd_pcm_bytes_to_frames(AlsaPCMHandle, avail); #ifdef DEBUG - if (avail != snd_pcm_frames_to_bytes(AlsaPCMHandle, frames)) { - Error(_("audio/alsa: bytes lost -> out of sync\n")); - } + if (avail != snd_pcm_frames_to_bytes(AlsaPCMHandle, frames)) { + Error(_("audio/alsa: bytes lost -> out of sync\n")); + } #endif - for (;;) { - if (AlsaUseMmap) { - err = snd_pcm_mmap_writei(AlsaPCMHandle, p, frames); - } else { - err = snd_pcm_writei(AlsaPCMHandle, p, frames); - } - //Debug(3, "audio/alsa: wrote %d/%d frames\n", err, frames); - if (err != frames) { - if (err < 0) { - if (err == -EAGAIN) { - continue; - } - /* - if (err == -EBADFD) { - goto again; - } - */ - Warning(_("audio/alsa: writei underrun error? '%s'\n"), - snd_strerror(err)); - err = snd_pcm_recover(AlsaPCMHandle, err, 0); - if (err >= 0) { - continue; - } - Error(_("audio/alsa: snd_pcm_writei failed: %s\n"), - snd_strerror(err)); - return -1; - } - // this could happen, if underrun happened - Warning(_("audio/alsa: not all frames written\n")); - avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, err); - } - break; - } - RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, avail); - first = 0; - + for (;;) { + if (AlsaUseMmap) { + err = snd_pcm_mmap_writei(AlsaPCMHandle, p, frames); + } else { + err = snd_pcm_writei(AlsaPCMHandle, p, frames); + } + //Debug(3, "audio/alsa: wrote %d/%d frames\n", err, frames); + if (err != frames) { + if (err < 0) { + if (err == -EAGAIN) { + continue; + } + /* + if (err == -EBADFD) { + goto again; + } + */ + Warning(_("audio/alsa: writei underrun error? '%s'\n"), snd_strerror(err)); + err = snd_pcm_recover(AlsaPCMHandle, err, 0); + if (err >= 0) { + continue; + } + Error(_("audio/alsa: snd_pcm_writei failed: %s\n"), snd_strerror(err)); + return -1; + } + // this could happen, if underrun happened + Warning(_("audio/alsa: not all frames written\n")); + avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, err); + } + break; + } + RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, avail); + first = 0; - } + } return 0; } @@ -908,27 +892,27 @@ static int AlsaPlayRingbuffer(void) static void AlsaFlushBuffers(void) { if (AlsaPCMHandle) { - int err; - snd_pcm_state_t state; + int err; + snd_pcm_state_t state; - state = snd_pcm_state(AlsaPCMHandle); - Debug(3, "audio/alsa: flush state %s\n", snd_pcm_state_name(state)); - if (state != SND_PCM_STATE_OPEN) { - if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) { - Error(_("audio: snd_pcm_drop(): %s\n"), snd_strerror(err)); - } - // ****ing alsa crash, when in open state here - if ((err = snd_pcm_prepare(AlsaPCMHandle)) < 0) { - Error(_("audio: snd_pcm_prepare(): %s\n"), snd_strerror(err)); - } - } + state = snd_pcm_state(AlsaPCMHandle); + Debug(3, "audio/alsa: flush state %s\n", snd_pcm_state_name(state)); + if (state != SND_PCM_STATE_OPEN) { + if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) { + Error(_("audio: snd_pcm_drop(): %s\n"), snd_strerror(err)); + } + // ****ing alsa crash, when in open state here + if ((err = snd_pcm_prepare(AlsaPCMHandle)) < 0) { + Error(_("audio: snd_pcm_prepare(): %s\n"), snd_strerror(err)); + } + } } } #ifdef USE_AUDIO_THREAD //---------------------------------------------------------------------------- -// thread playback +// thread playback //---------------------------------------------------------------------------- /** @@ -945,46 +929,44 @@ static int AlsaThread(void) int err; if (!AlsaPCMHandle) { - usleep(24 * 1000); - return -1; + usleep(24 * 1000); + return -1; } for (;;) { - if (AudioPaused) { - return 1; - } - // wait for space in kernel buffers - if ((err = snd_pcm_wait(AlsaPCMHandle, 24)) < 0) { - Warning(_("audio/alsa: wait underrun error? '%s'\n"), - snd_strerror(err)); - err = snd_pcm_recover(AlsaPCMHandle, err, 0); - if (err >= 0) { - continue; - } - Error(_("audio/alsa: snd_pcm_wait(): %s\n"), snd_strerror(err)); - usleep(24 * 1000); - return -1; - } - break; + if (AudioPaused) { + return 1; + } + // wait for space in kernel buffers + if ((err = snd_pcm_wait(AlsaPCMHandle, 24)) < 0) { + Warning(_("audio/alsa: wait underrun error? '%s'\n"), snd_strerror(err)); + err = snd_pcm_recover(AlsaPCMHandle, err, 0); + if (err >= 0) { + continue; + } + Error(_("audio/alsa: snd_pcm_wait(): %s\n"), snd_strerror(err)); + usleep(24 * 1000); + return -1; + } + break; } - if (!err || AudioPaused) { // timeout or some commands - return 1; + if (!err || AudioPaused) { // timeout or some commands + return 1; } - if ((err = AlsaPlayRingbuffer())) { // empty or error - snd_pcm_state_t state; + if ((err = AlsaPlayRingbuffer())) { // empty or error + snd_pcm_state_t state; - if (err < 0) { // underrun error - return -1; - } + if (err < 0) { // underrun error + return -1; + } - state = snd_pcm_state(AlsaPCMHandle); - if (state != SND_PCM_STATE_RUNNING) { - Debug(3, "audio/alsa: stopping play '%s'\n", - snd_pcm_state_name(state)); - return 0; - } + state = snd_pcm_state(AlsaPCMHandle); + if (state != SND_PCM_STATE_RUNNING) { + Debug(3, "audio/alsa: stopping play '%s'\n", snd_pcm_state_name(state)); + return 0; + } - usleep(24 * 1000); // let fill/empty the buffers + usleep(24 * 1000); // let fill/empty the buffers } return 1; } @@ -1006,42 +988,41 @@ static snd_pcm_t *AlsaOpenPCM(int passthrough) // &&|| hell if (!(passthrough && ((device = AudioPassthroughDevice) - || (device = getenv("ALSA_PASSTHROUGH_DEVICE")))) - && !(device = AudioPCMDevice) && !(device = getenv("ALSA_DEVICE"))) { - device = "default"; + || (device = getenv("ALSA_PASSTHROUGH_DEVICE")))) + && !(device = AudioPCMDevice) && !(device = getenv("ALSA_DEVICE"))) { + device = "default"; } - if (!AudioDoingInit) { // reduce blabla during init - Info(_("audio/alsa: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device); + if (!AudioDoingInit) { // reduce blabla during init + Info(_("audio/alsa: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device); } // // for AC3 pass-through try to set the non-audio bit, use AES0=6 // if (passthrough && AudioAppendAES) { #if 0 - // FIXME: not yet finished - char *buf; - const char *s; - int n; + // FIXME: not yet finished + char *buf; + const char *s; + int n; - n = strlen(device); - buf = alloca(n + sizeof(":AES0=6") + 1); - strcpy(buf, device); - if (!(s = strchr(buf, ':'))) { - // no alsa parameters - strcpy(buf + n, ":AES=6"); - } - Debug(3, "audio/alsa: try '%s'\n", buf); + n = strlen(device); + buf = alloca(n + sizeof(":AES0=6") + 1); + strcpy(buf, device); + if (!(s = strchr(buf, ':'))) { + // no alsa parameters + strcpy(buf + n, ":AES=6"); + } + Debug(3, "audio/alsa: try '%s'\n", buf); #endif } // open none blocking; if device is already used, we don't want wait - if ((err = - snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK,SND_PCM_NONBLOCK)) < 0) { - Error(_("audio/alsa: playback open '%s' error: %s\n"), device, snd_strerror(err)); - return NULL; + if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) { + Error(_("audio/alsa: playback open '%s' error: %s\n"), device, snd_strerror(err)); + return NULL; } if ((err = snd_pcm_nonblock(handle, 0)) < 0) { - Error(_("audio/alsa: can't set block mode: %s\n"), snd_strerror(err)); + Error(_("audio/alsa: can't set block mode: %s\n"), snd_strerror(err)); } return handle; } @@ -1058,14 +1039,13 @@ static void AlsaInitPCM(void) int err; if (!(handle = AlsaOpenPCM(0))) { - return; + return; } // FIXME: pass-through and pcm out can support different features snd_pcm_hw_params_alloca(&hw_params); // choose all parameters if ((err = snd_pcm_hw_params_any(handle, hw_params)) < 0) { - Error(_("audio: snd_pcm_hw_params_any: no configurations available: %s\n"), - snd_strerror(err)); + Error(_("audio: snd_pcm_hw_params_any: no configurations available: %s\n"), snd_strerror(err)); } AlsaCanPause = snd_pcm_hw_params_can_pause(hw_params); Info(_("audio/alsa: supports pause: %s\n"), AlsaCanPause ? "yes" : "no"); @@ -1074,7 +1054,7 @@ static void AlsaInitPCM(void) } //---------------------------------------------------------------------------- -// Alsa Mixer +// Alsa Mixer //---------------------------------------------------------------------------- /** @@ -1087,9 +1067,9 @@ static void AlsaSetVolume(int volume) int v; if (AlsaMixer && AlsaMixerElem) { - v = (volume * AlsaRatio) / (1000 * 1000); - snd_mixer_selem_set_playback_volume(AlsaMixerElem, 0, v); - snd_mixer_selem_set_playback_volume(AlsaMixerElem, 1, v); + v = (volume * AlsaRatio) / (1000 * 1000); + snd_mixer_selem_set_playback_volume(AlsaMixerElem, 0, v); + snd_mixer_selem_set_playback_volume(AlsaMixerElem, 1, v); } } @@ -1106,49 +1086,47 @@ static void AlsaInitMixer(void) long alsa_mixer_elem_max; if (!(device = AudioMixerDevice)) { - if (!(device = getenv("ALSA_MIXER"))) { - device = "default"; - } + if (!(device = getenv("ALSA_MIXER"))) { + device = "default"; + } } if (!(channel = AudioMixerChannel)) { - if (!(channel = getenv("ALSA_MIXER_CHANNEL"))) { - channel = "PCM"; - } + if (!(channel = getenv("ALSA_MIXER_CHANNEL"))) { + channel = "PCM"; + } } Debug(3, "audio/alsa: mixer %s - %s open\n", device, channel); snd_mixer_open(&alsa_mixer, 0); if (alsa_mixer && snd_mixer_attach(alsa_mixer, device) >= 0 - && snd_mixer_selem_register(alsa_mixer, NULL, NULL) >= 0 - && snd_mixer_load(alsa_mixer) >= 0) { + && snd_mixer_selem_register(alsa_mixer, NULL, NULL) >= 0 && snd_mixer_load(alsa_mixer) >= 0) { - const char *const alsa_mixer_elem_name = channel; + const char *const alsa_mixer_elem_name = channel; - alsa_mixer_elem = snd_mixer_first_elem(alsa_mixer); - while (alsa_mixer_elem) { - const char *name; + alsa_mixer_elem = snd_mixer_first_elem(alsa_mixer); + while (alsa_mixer_elem) { + const char *name; - name = snd_mixer_selem_get_name(alsa_mixer_elem); - if (!strcasecmp(name, alsa_mixer_elem_name)) { - snd_mixer_selem_get_playback_volume_range(alsa_mixer_elem, - &alsa_mixer_elem_min, &alsa_mixer_elem_max); - AlsaRatio = 1000 * (alsa_mixer_elem_max - alsa_mixer_elem_min); - Debug(3, "audio/alsa: PCM mixer found %ld - %ld ratio %d\n", - alsa_mixer_elem_min, alsa_mixer_elem_max, AlsaRatio); - break; - } + name = snd_mixer_selem_get_name(alsa_mixer_elem); + if (!strcasecmp(name, alsa_mixer_elem_name)) { + snd_mixer_selem_get_playback_volume_range(alsa_mixer_elem, &alsa_mixer_elem_min, &alsa_mixer_elem_max); + AlsaRatio = 1000 * (alsa_mixer_elem_max - alsa_mixer_elem_min); + Debug(3, "audio/alsa: PCM mixer found %ld - %ld ratio %d\n", alsa_mixer_elem_min, alsa_mixer_elem_max, + AlsaRatio); + break; + } - alsa_mixer_elem = snd_mixer_elem_next(alsa_mixer_elem); - } + alsa_mixer_elem = snd_mixer_elem_next(alsa_mixer_elem); + } - AlsaMixer = alsa_mixer; - AlsaMixerElem = alsa_mixer_elem; + AlsaMixer = alsa_mixer; + AlsaMixerElem = alsa_mixer_elem; } else { - Error(_("audio/alsa: can't open mixer '%s'\n"), device); + Error(_("audio/alsa: can't open mixer '%s'\n"), device); } } //---------------------------------------------------------------------------- -// Alsa API +// Alsa API //---------------------------------------------------------------------------- /** @@ -1166,22 +1144,22 @@ static int64_t AlsaGetDelay(void) // setup error if (!AlsaPCMHandle || !AudioRing[AudioRingRead].HwSampleRate) { - return 0L; + return 0L; } // delay in frames in alsa + kernel buffers if ((err = snd_pcm_delay(AlsaPCMHandle, &delay)) < 0) { - //Debug(3, "audio/alsa: no hw delay\n"); - delay = 0L; + //Debug(3, "audio/alsa: no hw delay\n"); + delay = 0L; #ifdef DEBUG } else if (snd_pcm_state(AlsaPCMHandle) != SND_PCM_STATE_RUNNING) { - //Debug(3, "audio/alsa: %ld frames delay ok, but not running\n", delay); + //Debug(3, "audio/alsa: %ld frames delay ok, but not running\n", delay); #endif } //Debug(3, "audio/alsa: %ld frames hw delay\n", delay); // delay can be negative, when underrun occur if (delay < 0) { - delay = 0L; + delay = 0L; } pts = ((int64_t) delay * 90 * 1000) / AudioRing[AudioRingRead].HwSampleRate; @@ -1209,123 +1187,108 @@ static int AlsaSetup(int *freq, int *channels, int passthrough) int err; int delay; - if (!AlsaPCMHandle) { // alsa not running yet - // FIXME: if open fails for fe. pass-through, we never recover - return -1; + if (!AlsaPCMHandle) { // alsa not running yet + // FIXME: if open fails for fe. pass-through, we never recover + return -1; } - if (!AudioAlsaNoCloseOpen) { // close+open to fix HDMI no sound bug - snd_pcm_t *handle; + if (!AudioAlsaNoCloseOpen) { // close+open to fix HDMI no sound bug + snd_pcm_t *handle; - handle = AlsaPCMHandle; - // no lock needed, thread exit in main loop only - //Debug(3, "audio: %s [\n", __FUNCTION__); - AlsaPCMHandle = NULL; // other threads should check handle - snd_pcm_close(handle); - if (AudioAlsaCloseOpenDelay) { - usleep(50 * 1000); // 50ms delay for alsa recovery - } - // FIXME: can use multiple retries - if (!(handle = AlsaOpenPCM(passthrough))) { - return -1; - } - AlsaPCMHandle = handle; - //Debug(3, "audio: %s ]\n", __FUNCTION__); + handle = AlsaPCMHandle; + // no lock needed, thread exit in main loop only + //Debug(3, "audio: %s [\n", __FUNCTION__); + AlsaPCMHandle = NULL; // other threads should check handle + snd_pcm_close(handle); + if (AudioAlsaCloseOpenDelay) { + usleep(50 * 1000); // 50ms delay for alsa recovery + } + // FIXME: can use multiple retries + if (!(handle = AlsaOpenPCM(passthrough))) { + return -1; + } + AlsaPCMHandle = handle; + //Debug(3, "audio: %s ]\n", __FUNCTION__); } for (;;) { - if ((err = - snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, - AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : - SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1, - 96 * 1000))) { - // try reduced buffer size (needed for sunxi) - // FIXME: alternativ make this configurable - if ((err = - snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, - AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : - SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1, - 72 * 1000))) { + if ((err = + snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, + AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1, + 96 * 1000))) { + // try reduced buffer size (needed for sunxi) + // FIXME: alternativ make this configurable + if ((err = + snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, + AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : SND_PCM_ACCESS_RW_INTERLEAVED, *channels, + *freq, 1, 72 * 1000))) { - /* - if ( err == -EBADFD ) { - snd_pcm_close(AlsaPCMHandle); - AlsaPCMHandle = NULL; - continue; - } - */ + /* + if ( err == -EBADFD ) { + snd_pcm_close(AlsaPCMHandle); + AlsaPCMHandle = NULL; + continue; + } + */ - if (!AudioDoingInit) { - Error(_("audio/alsa: set params error: %s\n"), - snd_strerror(err)); - } - // FIXME: must stop sound, AudioChannels ... invalid - return -1; - } - } - break; + if (!AudioDoingInit) { + Error(_("audio/alsa: set params error: %s\n"), snd_strerror(err)); + } + // FIXME: must stop sound, AudioChannels ... invalid + return -1; + } + } + break; } // this is disabled, no advantages! - if (0) { // no underruns allowed, play silence - snd_pcm_sw_params_t *sw_params; - snd_pcm_uframes_t boundary; + if (0) { // no underruns allowed, play silence + snd_pcm_sw_params_t *sw_params; + snd_pcm_uframes_t boundary; - snd_pcm_sw_params_alloca(&sw_params); - err = snd_pcm_sw_params_current(AlsaPCMHandle, sw_params); - if (err < 0) { - Error(_("audio: snd_pcm_sw_params_current failed: %s\n"), - snd_strerror(err)); - } - if ((err = snd_pcm_sw_params_get_boundary(sw_params, &boundary)) < 0) { - Error(_("audio: snd_pcm_sw_params_get_boundary failed: %s\n"), - snd_strerror(err)); - } - Debug(4, "audio/alsa: boundary %lu frames\n", boundary); - if ((err = - snd_pcm_sw_params_set_stop_threshold(AlsaPCMHandle, sw_params, - boundary)) < 0) { - Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), - snd_strerror(err)); - } - if ((err = - snd_pcm_sw_params_set_silence_size(AlsaPCMHandle, sw_params, - boundary)) < 0) { - Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), - snd_strerror(err)); - } - if ((err = snd_pcm_sw_params(AlsaPCMHandle, sw_params)) < 0) { - Error(_("audio: snd_pcm_sw_params failed: %s\n"), - snd_strerror(err)); - } + snd_pcm_sw_params_alloca(&sw_params); + err = snd_pcm_sw_params_current(AlsaPCMHandle, sw_params); + if (err < 0) { + Error(_("audio: snd_pcm_sw_params_current failed: %s\n"), snd_strerror(err)); + } + if ((err = snd_pcm_sw_params_get_boundary(sw_params, &boundary)) < 0) { + Error(_("audio: snd_pcm_sw_params_get_boundary failed: %s\n"), snd_strerror(err)); + } + Debug(4, "audio/alsa: boundary %lu frames\n", boundary); + if ((err = snd_pcm_sw_params_set_stop_threshold(AlsaPCMHandle, sw_params, boundary)) < 0) { + Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), snd_strerror(err)); + } + if ((err = snd_pcm_sw_params_set_silence_size(AlsaPCMHandle, sw_params, boundary)) < 0) { + Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), snd_strerror(err)); + } + if ((err = snd_pcm_sw_params(AlsaPCMHandle, sw_params)) < 0) { + Error(_("audio: snd_pcm_sw_params failed: %s\n"), snd_strerror(err)); + } } // update buffer snd_pcm_get_params(AlsaPCMHandle, &buffer_size, &period_size); - Debug(3, "audio/alsa: buffer size %lu %zdms, period size %lu %zdms\n", - buffer_size, snd_pcm_frames_to_bytes(AlsaPCMHandle, - buffer_size) * 1000 / (*freq * *channels * AudioBytesProSample), - period_size, snd_pcm_frames_to_bytes(AlsaPCMHandle, - period_size) * 1000 / (*freq * *channels * AudioBytesProSample)); - Debug(3, "audio/alsa: state %s\n", - snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); + Debug(3, "audio/alsa: buffer size %lu %zdms, period size %lu %zdms\n", buffer_size, + snd_pcm_frames_to_bytes(AlsaPCMHandle, buffer_size) * 1000 / (*freq * *channels * AudioBytesProSample), + period_size, snd_pcm_frames_to_bytes(AlsaPCMHandle, + period_size) * 1000 / (*freq * *channels * AudioBytesProSample)); + Debug(3, "audio/alsa: state %s\n", snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); AudioStartThreshold = snd_pcm_frames_to_bytes(AlsaPCMHandle, period_size); // buffer time/delay in ms delay = AudioBufferTime; if (VideoAudioDelay > 0) { - delay += VideoAudioDelay / 90; + delay += VideoAudioDelay / 90; } - if (AudioStartThreshold < - (*freq * *channels * AudioBytesProSample * delay) / 1000U) { - AudioStartThreshold = (*freq * *channels * AudioBytesProSample * delay) / 1000U; + if (AudioStartThreshold < (*freq * *channels * AudioBytesProSample * delay) / 1000U) { + AudioStartThreshold = (*freq * *channels * AudioBytesProSample * delay) / 1000U; } // no bigger, than 1/3 the buffer if (AudioStartThreshold > AudioRingBufferSize / 3) { - AudioStartThreshold = AudioRingBufferSize / 3; + AudioStartThreshold = AudioRingBufferSize / 3; } if (!AudioDoingInit) { - Info(_("audio/alsa: start delay %ums\n"), (AudioStartThreshold * 1000) - / (*freq * *channels * AudioBytesProSample)); + Info(_("audio/alsa: start delay %ums\n"), (AudioStartThreshold * 1000) + / (*freq * *channels * AudioBytesProSample)); } return 0; @@ -1339,17 +1302,17 @@ static void AlsaPlay(void) int err; if (AlsaCanPause) { - if ((err = snd_pcm_pause(AlsaPCMHandle, 0))) { - Error(_("audio/alsa: snd_pcm_pause(): %s\n"), snd_strerror(err)); - } + if ((err = snd_pcm_pause(AlsaPCMHandle, 0))) { + Error(_("audio/alsa: snd_pcm_pause(): %s\n"), snd_strerror(err)); + } } else { - if ((err = snd_pcm_prepare(AlsaPCMHandle)) < 0) { - Error(_("audio/alsa: snd_pcm_prepare(): %s\n"), snd_strerror(err)); - } + if ((err = snd_pcm_prepare(AlsaPCMHandle)) < 0) { + Error(_("audio/alsa: snd_pcm_prepare(): %s\n"), snd_strerror(err)); + } } #ifdef DEBUG if (snd_pcm_state(AlsaPCMHandle) == SND_PCM_STATE_PAUSED) { - Error(_("audio/alsa: still paused\n")); + Error(_("audio/alsa: still paused\n")); } #endif } @@ -1362,24 +1325,24 @@ static void AlsaPause(void) int err; if (AlsaCanPause) { - if ((err = snd_pcm_pause(AlsaPCMHandle, 1))) { - Error(_("snd_pcm_pause(): %s\n"), snd_strerror(err)); - } + if ((err = snd_pcm_pause(AlsaPCMHandle, 1))) { + Error(_("snd_pcm_pause(): %s\n"), snd_strerror(err)); + } } else { - if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) { - Error(_("snd_pcm_drop(): %s\n"), snd_strerror(err)); - } + if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) { + Error(_("snd_pcm_drop(): %s\n"), snd_strerror(err)); + } } } /** ** Empty log callback */ -static void AlsaNoopCallback( __attribute__ ((unused)) - const char *file, __attribute__ ((unused)) - int line, __attribute__ ((unused)) - const char *function, __attribute__ ((unused)) - int err, __attribute__ ((unused)) +static void AlsaNoopCallback( __attribute__((unused)) + const char *file, __attribute__((unused)) + int line, __attribute__((unused)) + const char *function, __attribute__((unused)) + int err, __attribute__((unused)) const char *fmt, ...) { } @@ -1406,13 +1369,13 @@ static void AlsaInit(void) static void AlsaExit(void) { if (AlsaPCMHandle) { - snd_pcm_close(AlsaPCMHandle); - AlsaPCMHandle = NULL; + snd_pcm_close(AlsaPCMHandle); + AlsaPCMHandle = NULL; } if (AlsaMixer) { - snd_mixer_close(AlsaMixer); - AlsaMixer = NULL; - AlsaMixerElem = NULL; + snd_mixer_close(AlsaMixer); + AlsaMixer = NULL; + AlsaMixerElem = NULL; } } @@ -1439,20 +1402,20 @@ static const AudioModule AlsaModule = { #ifdef USE_OSS //============================================================================ -// O S S +// O S S //============================================================================ //---------------------------------------------------------------------------- -// OSS variables +// OSS variables //---------------------------------------------------------------------------- -static int OssPcmFildes = -1; ///< pcm file descriptor -static int OssMixerFildes = -1; ///< mixer file descriptor -static int OssMixerChannel; ///< mixer channel index -static int OssFragmentTime; ///< fragment time in ms +static int OssPcmFildes = -1; ///< pcm file descriptor +static int OssMixerFildes = -1; ///< mixer file descriptor +static int OssMixerChannel; ///< mixer channel index +static int OssFragmentTime; ///< fragment time in ms //---------------------------------------------------------------------------- -// OSS pcm +// OSS pcm //---------------------------------------------------------------------------- /** @@ -1468,53 +1431,52 @@ static int OssPlayRingbuffer(void) first = 1; for (;;) { - audio_buf_info bi; - const void *p; - int n; + audio_buf_info bi; + const void *p; + int n; - if (ioctl(OssPcmFildes, SNDCTL_DSP_GETOSPACE, &bi) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"), - strerror(errno)); - return -1; - } - Debug(4, "audio/oss: %d bytes free\n", bi.bytes); + if (ioctl(OssPcmFildes, SNDCTL_DSP_GETOSPACE, &bi) == -1) { + Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"), strerror(errno)); + return -1; + } + Debug(4, "audio/oss: %d bytes free\n", bi.bytes); - n = RingBufferGetReadPointer(AudioRing[AudioRingRead].RingBuffer, &p); - if (!n) { // ring buffer empty - if (first) { // only error on first loop - return 1; - } - return 0; - } - if (n < bi.bytes) { // not enough bytes in ring buffer - bi.bytes = n; - } - if (bi.bytes <= 0) { // full or buffer empty - break; // bi.bytes could become negative! - } + n = RingBufferGetReadPointer(AudioRing[AudioRingRead].RingBuffer, &p); + if (!n) { // ring buffer empty + if (first) { // only error on first loop + return 1; + } + return 0; + } + if (n < bi.bytes) { // not enough bytes in ring buffer + bi.bytes = n; + } + if (bi.bytes <= 0) { // full or buffer empty + break; // bi.bytes could become negative! + } - if (AudioSoftVolume && !AudioRing[AudioRingRead].Passthrough) { - // FIXME: quick&dirty cast - AudioSoftAmplifier((int16_t *) p, bi.bytes); - // FIXME: if not all are written, we double amplify them - } - for (;;) { - n = write(OssPcmFildes, p, bi.bytes); - if (n != bi.bytes) { - if (n < 0) { - if (n == EAGAIN) { - continue; - } - Error(_("audio/oss: write error: %s\n"), strerror(errno)); - return 1; - } - Warning(_("audio/oss: error not all bytes written\n")); - } - break; - } - // advance how many could written - RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, n); - first = 0; + if (AudioSoftVolume && !AudioRing[AudioRingRead].Passthrough) { + // FIXME: quick&dirty cast + AudioSoftAmplifier((int16_t *) p, bi.bytes); + // FIXME: if not all are written, we double amplify them + } + for (;;) { + n = write(OssPcmFildes, p, bi.bytes); + if (n != bi.bytes) { + if (n < 0) { + if (n == EAGAIN) { + continue; + } + Error(_("audio/oss: write error: %s\n"), strerror(errno)); + return 1; + } + Warning(_("audio/oss: error not all bytes written\n")); + } + break; + } + // advance how many could written + RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, n); + first = 0; } return 0; @@ -1526,18 +1488,17 @@ static int OssPlayRingbuffer(void) static void OssFlushBuffers(void) { if (OssPcmFildes != -1) { - // flush kernel buffers - if (ioctl(OssPcmFildes, SNDCTL_DSP_HALT_OUTPUT, NULL) < 0) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_HALT_OUTPUT): %s\n"), - strerror(errno)); - } + // flush kernel buffers + if (ioctl(OssPcmFildes, SNDCTL_DSP_HALT_OUTPUT, NULL) < 0) { + Error(_("audio/oss: ioctl(SNDCTL_DSP_HALT_OUTPUT): %s\n"), strerror(errno)); + } } } #ifdef USE_AUDIO_THREAD //---------------------------------------------------------------------------- -// thread playback +// thread playback //---------------------------------------------------------------------------- /** @@ -1552,41 +1513,41 @@ static int OssThread(void) int err; if (!OssPcmFildes) { - usleep(OssFragmentTime * 1000); - return -1; + usleep(OssFragmentTime * 1000); + return -1; } for (;;) { - struct pollfd fds[1]; + struct pollfd fds[1]; - if (AudioPaused) { - return 1; - } - // wait for space in kernel buffers - fds[0].fd = OssPcmFildes; - fds[0].events = POLLOUT | POLLERR; - // wait for space in kernel buffers - err = poll(fds, 1, OssFragmentTime); - if (err < 0) { - if (err == EAGAIN) { - continue; - } - Error(_("audio/oss: error poll %s\n"), strerror(errno)); - usleep(OssFragmentTime * 1000); - return -1; - } - break; + if (AudioPaused) { + return 1; + } + // wait for space in kernel buffers + fds[0].fd = OssPcmFildes; + fds[0].events = POLLOUT | POLLERR; + // wait for space in kernel buffers + err = poll(fds, 1, OssFragmentTime); + if (err < 0) { + if (err == EAGAIN) { + continue; + } + Error(_("audio/oss: error poll %s\n"), strerror(errno)); + usleep(OssFragmentTime * 1000); + return -1; + } + break; } - if (!err || AudioPaused) { // timeout or some commands - return 1; + if (!err || AudioPaused) { // timeout or some commands + return 1; } - if ((err = OssPlayRingbuffer())) { // empty / error - if (err < 0) { // underrun error - return -1; - } - pthread_yield(); - usleep(OssFragmentTime * 1000); // let fill/empty the buffers - return 0; + if ((err = OssPlayRingbuffer())) { // empty / error + if (err < 0) { // underrun error + return -1; + } + pthread_yield(); + usleep(OssFragmentTime * 1000); // let fill/empty the buffers + return 0; } return 1; @@ -1608,19 +1569,17 @@ static int OssOpenPCM(int passthrough) // &&|| hell if (!(passthrough && ((device = AudioPassthroughDevice) - || (device = getenv("OSS_PASSTHROUGHDEV")))) - && !(device = AudioPCMDevice) && !(device = getenv("OSS_AUDIODEV"))) { - device = "/dev/dsp"; + || (device = getenv("OSS_PASSTHROUGHDEV")))) + && !(device = AudioPCMDevice) && !(device = getenv("OSS_AUDIODEV"))) { + device = "/dev/dsp"; } if (!AudioDoingInit) { - Info(_("audio/oss: using %sdevice '%s'\n"), - passthrough ? "pass-through " : "", device); + Info(_("audio/oss: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device); } if ((fildes = open(device, O_WRONLY)) < 0) { - Error(_("audio/oss: can't open dsp device '%s': %s\n"), device, - strerror(errno)); - return -1; + Error(_("audio/oss: can't open dsp device '%s': %s\n"), device, strerror(errno)); + return -1; } return fildes; } @@ -1640,7 +1599,7 @@ static void OssInitPCM(void) } //---------------------------------------------------------------------------- -// OSS Mixer +// OSS Mixer //---------------------------------------------------------------------------- /** @@ -1653,20 +1612,19 @@ static void OssSetVolume(int volume) int v; if (OssMixerFildes != -1) { - v = (volume * 255) / 1000; - v &= 0xff; - v = (v << 8) | v; - if (ioctl(OssMixerFildes, MIXER_WRITE(OssMixerChannel), &v) < 0) { - Error(_("audio/oss: ioctl(MIXER_WRITE): %s\n"), strerror(errno)); - } + v = (volume * 255) / 1000; + v &= 0xff; + v = (v << 8) | v; + if (ioctl(OssMixerFildes, MIXER_WRITE(OssMixerChannel), &v) < 0) { + Error(_("audio/oss: ioctl(MIXER_WRITE): %s\n"), strerror(errno)); + } } } /** ** Mixer channel name table. */ -static const char *OssMixerChannelNames[SOUND_MIXER_NRDEVICES] = - SOUND_DEVICE_NAMES; +static const char *OssMixerChannelNames[SOUND_MIXER_NRDEVICES] = SOUND_DEVICE_NAMES; /** ** Initialize OSS mixer. @@ -1680,46 +1638,44 @@ static void OssInitMixer(void) int i; if (!(device = AudioMixerDevice)) { - if (!(device = getenv("OSS_MIXERDEV"))) { - device = "/dev/mixer"; - } + if (!(device = getenv("OSS_MIXERDEV"))) { + device = "/dev/mixer"; + } } if (!(channel = AudioMixerChannel)) { - if (!(channel = getenv("OSS_MIXER_CHANNEL"))) { - channel = "pcm"; - } + if (!(channel = getenv("OSS_MIXER_CHANNEL"))) { + channel = "pcm"; + } } Debug(3, "audio/oss: mixer %s - %s open\n", device, channel); if ((fildes = open(device, O_RDWR)) < 0) { - Error(_("audio/oss: can't open mixer device '%s': %s\n"), device, - strerror(errno)); - return; + Error(_("audio/oss: can't open mixer device '%s': %s\n"), device, strerror(errno)); + return; } // search channel name if (ioctl(fildes, SOUND_MIXER_READ_DEVMASK, &devmask) < 0) { - Error(_("audio/oss: ioctl(SOUND_MIXER_READ_DEVMASK): %s\n"), - strerror(errno)); - close(fildes); - return; + Error(_("audio/oss: ioctl(SOUND_MIXER_READ_DEVMASK): %s\n"), strerror(errno)); + close(fildes); + return; } for (i = 0; i < SOUND_MIXER_NRDEVICES; ++i) { - if (!strcasecmp(OssMixerChannelNames[i], channel)) { - if (devmask & (1 << i)) { - OssMixerFildes = fildes; - OssMixerChannel = i; - return; - } - Error(_("audio/oss: channel '%s' not supported\n"), channel); - break; - } + if (!strcasecmp(OssMixerChannelNames[i], channel)) { + if (devmask & (1 << i)) { + OssMixerFildes = fildes; + OssMixerChannel = i; + return; + } + Error(_("audio/oss: channel '%s' not supported\n"), channel); + break; + } } Error(_("audio/oss: channel '%s' not found\n"), channel); close(fildes); } //---------------------------------------------------------------------------- -// OSS API +// OSS API //---------------------------------------------------------------------------- /** @@ -1734,26 +1690,24 @@ static int64_t OssGetDelay(void) // setup failure if (OssPcmFildes == -1 || !AudioRing[AudioRingRead].HwSampleRate) { - return 0L; + return 0L; } - if (!AudioRunning) { // audio not running - Error(_("audio/oss: should not happen\n")); - return 0L; + if (!AudioRunning) { // audio not running + Error(_("audio/oss: should not happen\n")); + return 0L; } // delay in bytes in kernel buffers delay = -1; if (ioctl(OssPcmFildes, SNDCTL_DSP_GETODELAY, &delay) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n"), - strerror(errno)); - return 0L; + Error(_("audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n"), strerror(errno)); + return 0L; } if (delay < 0) { - delay = 0; + delay = 0; } pts = ((int64_t) delay * 90 * 1000) - / (AudioRing[AudioRingRead].HwSampleRate * - AudioRing[AudioRingRead].HwChannels * AudioBytesProSample); + / (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels * AudioBytesProSample); return pts; } @@ -1776,86 +1730,81 @@ static int OssSetup(int *sample_rate, int *channels, int passthrough) int delay; audio_buf_info bi; - if (OssPcmFildes == -1) { // OSS not ready - // FIXME: if open fails for fe. pass-through, we never recover - return -1; + if (OssPcmFildes == -1) { // OSS not ready + // FIXME: if open fails for fe. pass-through, we never recover + return -1; } - if (1) { // close+open for pcm / AC-3 - int fildes; + if (1) { // close+open for pcm / AC-3 + int fildes; - fildes = OssPcmFildes; - OssPcmFildes = -1; - close(fildes); - if (!(fildes = OssOpenPCM(passthrough))) { - return -1; - } - OssPcmFildes = fildes; + fildes = OssPcmFildes; + OssPcmFildes = -1; + close(fildes); + if (!(fildes = OssOpenPCM(passthrough))) { + return -1; + } + OssPcmFildes = fildes; } ret = 0; - tmp = AFMT_S16_NE; // native 16 bits + tmp = AFMT_S16_NE; // native 16 bits if (ioctl(OssPcmFildes, SNDCTL_DSP_SETFMT, &tmp) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_SETFMT): %s\n"), strerror(errno)); - // FIXME: stop player, set setup failed flag - return -1; + Error(_("audio/oss: ioctl(SNDCTL_DSP_SETFMT): %s\n"), strerror(errno)); + // FIXME: stop player, set setup failed flag + return -1; } if (tmp != AFMT_S16_NE) { - Error(_("audio/oss: device doesn't support 16 bit sample format.\n")); - // FIXME: stop player, set setup failed flag - return -1; + Error(_("audio/oss: device doesn't support 16 bit sample format.\n")); + // FIXME: stop player, set setup failed flag + return -1; } tmp = *channels; if (ioctl(OssPcmFildes, SNDCTL_DSP_CHANNELS, &tmp) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_CHANNELS): %s\n"), - strerror(errno)); - return -1; + Error(_("audio/oss: ioctl(SNDCTL_DSP_CHANNELS): %s\n"), strerror(errno)); + return -1; } if (tmp != *channels) { - Warning(_("audio/oss: device doesn't support %d channels.\n"), - *channels); - *channels = tmp; - ret = 1; + Warning(_("audio/oss: device doesn't support %d channels.\n"), *channels); + *channels = tmp; + ret = 1; } tmp = *sample_rate; if (ioctl(OssPcmFildes, SNDCTL_DSP_SPEED, &tmp) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_SPEED): %s\n"), strerror(errno)); - return -1; + Error(_("audio/oss: ioctl(SNDCTL_DSP_SPEED): %s\n"), strerror(errno)); + return -1; } if (tmp != *sample_rate) { - Warning(_("audio/oss: device doesn't support %dHz sample rate.\n"), - *sample_rate); - *sample_rate = tmp; - ret = 1; + Warning(_("audio/oss: device doesn't support %dHz sample rate.\n"), *sample_rate); + *sample_rate = tmp; + ret = 1; } #ifdef SNDCTL_DSP_POLICY tmp = 3; if (ioctl(OssPcmFildes, SNDCTL_DSP_POLICY, &tmp) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_POLICY): %s\n"), strerror(errno)); + Error(_("audio/oss: ioctl(SNDCTL_DSP_POLICY): %s\n"), strerror(errno)); } else { - Info("audio/oss: set policy to %d\n", tmp); + Info("audio/oss: set policy to %d\n", tmp); } #endif if (ioctl(OssPcmFildes, SNDCTL_DSP_GETOSPACE, &bi) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"), - strerror(errno)); - bi.fragsize = 4096; - bi.fragstotal = 16; + Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"), strerror(errno)); + bi.fragsize = 4096; + bi.fragstotal = 16; } else { - Debug(3, "audio/oss: %d bytes buffered\n", bi.bytes); + Debug(3, "audio/oss: %d bytes buffered\n", bi.bytes); } OssFragmentTime = (bi.fragsize * 1000) - / (*sample_rate * *channels * AudioBytesProSample); + / (*sample_rate * *channels * AudioBytesProSample); - Debug(3, "audio/oss: buffer size %d %dms, fragment size %d %dms\n", - bi.fragsize * bi.fragstotal, (bi.fragsize * bi.fragstotal * 1000) - / (*sample_rate * *channels * AudioBytesProSample), bi.fragsize, - OssFragmentTime); + Debug(3, "audio/oss: buffer size %d %dms, fragment size %d %dms\n", bi.fragsize * bi.fragstotal, + (bi.fragsize * bi.fragstotal * 1000) + / (*sample_rate * *channels * AudioBytesProSample), bi.fragsize, OssFragmentTime); // start when enough bytes for initial write AudioStartThreshold = (bi.fragsize - 1) * bi.fragstotal; @@ -1863,21 +1812,19 @@ static int OssSetup(int *sample_rate, int *channels, int passthrough) // buffer time/delay in ms delay = AudioBufferTime + 300; if (VideoAudioDelay > 0) { - delay += VideoAudioDelay / 90; + delay += VideoAudioDelay / 90; } - if (AudioStartThreshold < - (*sample_rate * *channels * AudioBytesProSample * delay) / 1000U) { - AudioStartThreshold = - (*sample_rate * *channels * AudioBytesProSample * delay) / 1000U; + if (AudioStartThreshold < (*sample_rate * *channels * AudioBytesProSample * delay) / 1000U) { + AudioStartThreshold = (*sample_rate * *channels * AudioBytesProSample * delay) / 1000U; } // no bigger, than 1/3 the buffer if (AudioStartThreshold > AudioRingBufferSize / 3) { - AudioStartThreshold = AudioRingBufferSize / 3; + AudioStartThreshold = AudioRingBufferSize / 3; } if (!AudioDoingInit) { - Info(_("audio/oss: delay %ums\n"), (AudioStartThreshold * 1000) - / (*sample_rate * *channels * AudioBytesProSample)); + Info(_("audio/oss: delay %ums\n"), (AudioStartThreshold * 1000) + / (*sample_rate * *channels * AudioBytesProSample)); } return ret; @@ -1912,12 +1859,12 @@ static void OssInit(void) static void OssExit(void) { if (OssPcmFildes != -1) { - close(OssPcmFildes); - OssPcmFildes = -1; + close(OssPcmFildes); + OssPcmFildes = -1; } if (OssMixerFildes != -1) { - close(OssMixerFildes); - OssMixerFildes = -1; + close(OssMixerFildes); + OssMixerFildes = -1; } } @@ -1942,7 +1889,7 @@ static const AudioModule OssModule = { #endif // USE_OSS //============================================================================ -// Noop +// Noop //============================================================================ /** @@ -1960,7 +1907,7 @@ static int64_t NoopGetDelay(void) ** ** @param volume volume (0 .. 1000) */ -static void NoopSetVolume( __attribute__ ((unused)) +static void NoopSetVolume( __attribute__((unused)) int volume) { } @@ -1972,9 +1919,9 @@ static void NoopSetVolume( __attribute__ ((unused)) ** @param channels number of channels ** @param passthrough use pass-through (AC-3, ...) device */ -static int NoopSetup( __attribute__ ((unused)) - int *channels, __attribute__ ((unused)) - int *freq, __attribute__ ((unused)) +static int NoopSetup( __attribute__((unused)) + int *channels, __attribute__((unused)) + int *freq, __attribute__((unused)) int passthrough) { return -1; @@ -2003,7 +1950,7 @@ static const AudioModule NoopModule = { }; //---------------------------------------------------------------------------- -// thread playback +// thread playback //---------------------------------------------------------------------------- #ifdef USE_AUDIO_THREAD @@ -2024,28 +1971,25 @@ static int AudioNextRing(void) sample_rate = AudioRing[AudioRingRead].HwSampleRate; channels = AudioRing[AudioRingRead].HwChannels; if (AudioUsedModule->Setup(&sample_rate, &channels, passthrough)) { - Error(_("audio: can't set channels %d sample-rate %dHz\n"), channels, - sample_rate); - // FIXME: handle error - AudioRing[AudioRingRead].HwSampleRate = 0; - AudioRing[AudioRingRead].InSampleRate = 0; - return -1; + Error(_("audio: can't set channels %d sample-rate %dHz\n"), channels, sample_rate); + // FIXME: handle error + AudioRing[AudioRingRead].HwSampleRate = 0; + AudioRing[AudioRingRead].InSampleRate = 0; + return -1; } - AudioSetVolume(AudioVolume); // update channel delta + AudioSetVolume(AudioVolume); // update channel delta AudioResetCompressor(); AudioResetNormalizer(); Debug(3, "audio: a/v next buf(%d,%4zdms)\n", atomic_read(&AudioRingFilled), - (RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer) * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); + (RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer) * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); // stop, if not enough in next buffer used = RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer); - if (AudioStartThreshold * 10 < used || (AudioVideoIsReady - && AudioStartThreshold < used)) { - return 0; + if (AudioStartThreshold * 10 < used || (AudioVideoIsReady && AudioStartThreshold < used)) { + return 0; } return 1; } @@ -2058,119 +2002,115 @@ static int AudioNextRing(void) static void *AudioPlayHandlerThread(void *dummy) { Debug(3, "audio: play thread started\n"); - prctl(PR_SET_NAME,"cuvid audio",0,0,0); + prctl(PR_SET_NAME, "cuvid audio", 0, 0, 0); for (;;) { - // check if we should stop the thread - if (AudioThreadStop) { - Debug(3, "audio: play thread stopped\n"); - return PTHREAD_CANCELED; - } + // check if we should stop the thread + if (AudioThreadStop) { + Debug(3, "audio: play thread stopped\n"); + return PTHREAD_CANCELED; + } - Debug(3, "audio: wait on start condition\n"); - pthread_mutex_lock(&AudioMutex); - AudioRunning = 0; - do { - pthread_cond_wait(&AudioStartCond, &AudioMutex); - // cond_wait can return, without signal! - } while (!AudioRunning); - pthread_mutex_unlock(&AudioMutex); + Debug(3, "audio: wait on start condition\n"); + pthread_mutex_lock(&AudioMutex); + AudioRunning = 0; + do { + pthread_cond_wait(&AudioStartCond, &AudioMutex); + // cond_wait can return, without signal! + } while (!AudioRunning); + pthread_mutex_unlock(&AudioMutex); - Debug(3, "audio: ----> %dms start\n", (AudioUsedBytes() * 1000) - / (!AudioRing[AudioRingWrite].HwSampleRate + - !AudioRing[AudioRingWrite].HwChannels + - AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); + Debug(3, "audio: ----> %dms start\n", (AudioUsedBytes() * 1000) + / (!AudioRing[AudioRingWrite].HwSampleRate + !AudioRing[AudioRingWrite].HwChannels + + AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); - do { - int filled; - int read; - int flush; - int err; - int i; + do { + int filled; + int read; + int flush; + int err; + int i; - // check if we should stop the thread - if (AudioThreadStop) { - Debug(3, "audio: play thread stopped\n"); - return PTHREAD_CANCELED; - } - // look if there is a flush command in the queue - flush = 0; - filled = atomic_read(&AudioRingFilled); - read = AudioRingRead; - i = filled; - while (i--) { - read = (read + 1) % AUDIO_RING_MAX; - if (AudioRing[read].FlushBuffers) { - AudioRing[read].FlushBuffers = 0; - AudioRingRead = read; - // handle all flush in queue - flush = filled - i; - } - } + // check if we should stop the thread + if (AudioThreadStop) { + Debug(3, "audio: play thread stopped\n"); + return PTHREAD_CANCELED; + } + // look if there is a flush command in the queue + flush = 0; + filled = atomic_read(&AudioRingFilled); + read = AudioRingRead; + i = filled; + while (i--) { + read = (read + 1) % AUDIO_RING_MAX; + if (AudioRing[read].FlushBuffers) { + AudioRing[read].FlushBuffers = 0; + AudioRingRead = read; + // handle all flush in queue + flush = filled - i; + } + } - if (flush) { - Debug(3, "audio: flush %d ring buffer(s)\n", flush); - AudioUsedModule->FlushBuffers(); - atomic_sub(flush, &AudioRingFilled); - if (AudioNextRing()) { - Debug(3, "audio: HandlerThread break after flush\n"); - break; - } - Debug(3, "audio: continue after flush\n"); - } - // try to play some samples - err = 0; - if (RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer)) { - err = AudioUsedModule->Thread(); - } - // underrun, check if new ring buffer is available - if (!err) { - int passthrough; - int sample_rate; - int channels; - int old_passthrough; - int old_sample_rate; - int old_channels; + if (flush) { + Debug(3, "audio: flush %d ring buffer(s)\n", flush); + AudioUsedModule->FlushBuffers(); + atomic_sub(flush, &AudioRingFilled); + if (AudioNextRing()) { + Debug(3, "audio: HandlerThread break after flush\n"); + break; + } + Debug(3, "audio: continue after flush\n"); + } + // try to play some samples + err = 0; + if (RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer)) { + err = AudioUsedModule->Thread(); + } + // underrun, check if new ring buffer is available + if (!err) { + int passthrough; + int sample_rate; + int channels; + int old_passthrough; + int old_sample_rate; + int old_channels; - // underrun, and no new ring buffer, goto sleep. - if (!atomic_read(&AudioRingFilled)) { - Debug(3,"audio: HandlerThread Underrun with no new data\n"); - break; - } + // underrun, and no new ring buffer, goto sleep. + if (!atomic_read(&AudioRingFilled)) { + Debug(3, "audio: HandlerThread Underrun with no new data\n"); + break; + } - Debug(3, "audio: next ring buffer\n"); - old_passthrough = AudioRing[AudioRingRead].Passthrough; - old_sample_rate = AudioRing[AudioRingRead].HwSampleRate; - old_channels = AudioRing[AudioRingRead].HwChannels; + Debug(3, "audio: next ring buffer\n"); + old_passthrough = AudioRing[AudioRingRead].Passthrough; + old_sample_rate = AudioRing[AudioRingRead].HwSampleRate; + old_channels = AudioRing[AudioRingRead].HwChannels; - atomic_dec(&AudioRingFilled); - AudioRingRead = (AudioRingRead + 1) % AUDIO_RING_MAX; + atomic_dec(&AudioRingFilled); + AudioRingRead = (AudioRingRead + 1) % AUDIO_RING_MAX; - passthrough = AudioRing[AudioRingRead].Passthrough; - sample_rate = AudioRing[AudioRingRead].HwSampleRate; - channels = AudioRing[AudioRingRead].HwChannels; - Debug(3, "audio: thread channels %d frequency %dHz %s\n", - channels, sample_rate, passthrough ? "pass-through" : ""); - // audio config changed? - if (old_passthrough != passthrough - || old_sample_rate != sample_rate - || old_channels != channels) { - // FIXME: wait for buffer drain - if (AudioNextRing()) { - Debug(3,"audio: HandlerThread break on nextring"); - break; - } - } else { - AudioResetCompressor(); - AudioResetNormalizer(); - } - } - // FIXME: check AudioPaused ...Thread() - if (AudioPaused) { - Debug(3,"audio: HandlerThread break on paused"); - break; - } - } while (AudioRing[AudioRingRead].HwSampleRate); + passthrough = AudioRing[AudioRingRead].Passthrough; + sample_rate = AudioRing[AudioRingRead].HwSampleRate; + channels = AudioRing[AudioRingRead].HwChannels; + Debug(3, "audio: thread channels %d frequency %dHz %s\n", channels, sample_rate, + passthrough ? "pass-through" : ""); + // audio config changed? + if (old_passthrough != passthrough || old_sample_rate != sample_rate || old_channels != channels) { + // FIXME: wait for buffer drain + if (AudioNextRing()) { + Debug(3, "audio: HandlerThread break on nextring"); + break; + } + } else { + AudioResetCompressor(); + AudioResetNormalizer(); + } + } + // FIXME: check AudioPaused ...Thread() + if (AudioPaused) { + Debug(3, "audio: HandlerThread break on paused"); + break; + } + } while (AudioRing[AudioRingRead].HwSampleRate); } return dummy; } @@ -2197,15 +2137,15 @@ static void AudioExitThread(void) Debug(3, "audio: %s\n", __FUNCTION__); if (AudioThread) { - AudioThreadStop = 1; - AudioRunning = 1; // wakeup thread, if needed - pthread_cond_signal(&AudioStartCond); - if (pthread_join(AudioThread, &retval) || retval != PTHREAD_CANCELED) { - Error(_("audio: can't cancel play thread\n")); - } - pthread_cond_destroy(&AudioStartCond); - pthread_mutex_destroy(&AudioMutex); - AudioThread = 0; + AudioThreadStop = 1; + AudioRunning = 1; // wakeup thread, if needed + pthread_cond_signal(&AudioStartCond); + if (pthread_join(AudioThread, &retval) || retval != PTHREAD_CANCELED) { + Error(_("audio: can't cancel play thread\n")); + } + pthread_cond_destroy(&AudioStartCond); + pthread_mutex_destroy(&AudioMutex); + AudioThread = 0; } } @@ -2227,22 +2167,25 @@ static const AudioModule *AudioModules[] = { &NoopModule, }; -void AudioDelayms(int delayms) { - int count; - unsigned char *p; +void AudioDelayms(int delayms) +{ + int count; + unsigned char *p; #ifdef DEBUG - printf("Try Delay Audio for %d ms Samplerate %d Channels %d bps %d\n", - delayms,AudioRing[AudioRingWrite].HwSampleRate,AudioRing[AudioRingWrite].HwChannels,AudioBytesProSample); + printf("Try Delay Audio for %d ms Samplerate %d Channels %d bps %d\n", delayms, + AudioRing[AudioRingWrite].HwSampleRate, AudioRing[AudioRingWrite].HwChannels, AudioBytesProSample); #endif - count = delayms * AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample / 1000; - - if (delayms < 5000 && delayms > 0) { // not more than 5seconds - p = calloc(1,count); - RingBufferWrite(AudioRing[AudioRingWrite].RingBuffer, p, count); - free(p); - } + count = + delayms * AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample / + 1000; + + if (delayms < 5000 && delayms > 0) { // not more than 5seconds + p = calloc(1, count); + RingBufferWrite(AudioRing[AudioRingWrite].RingBuffer, p, count); + free(p); + } } /** @@ -2262,110 +2205,98 @@ void AudioEnqueue(const void *samples, int count) tick = GetMsTicks(); if (tick - last_tick > 101) { - Debug(3, "audio: enqueue %4d %dms\n", count, tick - last_tick); + Debug(3, "audio: enqueue %4d %dms\n", count, tick - last_tick); } last_tick = tick; #endif if (!AudioRing[AudioRingWrite].HwSampleRate) { - Debug(3, "audio: enqueue not ready\n"); - return; // no setup yet + Debug(3, "audio: enqueue not ready\n"); + return; // no setup yet } // save packet size if (!AudioRing[AudioRingWrite].PacketSize) { - AudioRing[AudioRingWrite].PacketSize = count; - Debug(3, "audio: a/v packet size %d bytes\n", count); + AudioRing[AudioRingWrite].PacketSize = count; + Debug(3, "audio: a/v packet size %d bytes\n", count); } // audio sample modification allowed and needed? buffer = (void *)samples; - if (!AudioRing[AudioRingWrite].Passthrough && (AudioCompression - || AudioNormalize - || AudioRing[AudioRingWrite].InChannels != - AudioRing[AudioRingWrite].HwChannels)) { - int frames; + if (!AudioRing[AudioRingWrite].Passthrough && (AudioCompression || AudioNormalize + || AudioRing[AudioRingWrite].InChannels != AudioRing[AudioRingWrite].HwChannels)) { + int frames; - // resample into ring-buffer is too complex in the case of a roundabout - // just use a temporary buffer - frames = - count / (AudioRing[AudioRingWrite].InChannels * - AudioBytesProSample); - buffer = - alloca(frames * AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample); + // resample into ring-buffer is too complex in the case of a roundabout + // just use a temporary buffer + frames = count / (AudioRing[AudioRingWrite].InChannels * AudioBytesProSample); + buffer = alloca(frames * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); #ifdef USE_AUDIO_MIXER - // Convert / resample input to hardware format - AudioResample(samples, AudioRing[AudioRingWrite].InChannels, frames, - buffer, AudioRing[AudioRingWrite].HwChannels); + // Convert / resample input to hardware format + AudioResample(samples, AudioRing[AudioRingWrite].InChannels, frames, buffer, + AudioRing[AudioRingWrite].HwChannels); #else #ifdef DEBUG - if (AudioRing[AudioRingWrite].InChannels != - AudioRing[AudioRingWrite].HwChannels) { - Debug(3, "audio: internal failure channels mismatch\n"); - return; - } + if (AudioRing[AudioRingWrite].InChannels != AudioRing[AudioRingWrite].HwChannels) { + Debug(3, "audio: internal failure channels mismatch\n"); + return; + } #endif - memcpy(buffer, samples, count); + memcpy(buffer, samples, count); #endif - count = - frames * AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample; + count = frames * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample; - if (AudioCompression) { // in place operation - AudioCompressor(buffer, count); - } - if (AudioNormalize) { // in place operation - AudioNormalizer(buffer, count); - } + if (AudioCompression) { // in place operation + AudioCompressor(buffer, count); + } + if (AudioNormalize) { // in place operation + AudioNormalizer(buffer, count); + } } n = RingBufferWrite(AudioRing[AudioRingWrite].RingBuffer, buffer, count); - if (n != (size_t) count) { - Error(_("audio: can't place %d samples in ring buffer\n"), count); - // too many bytes are lost - // FIXME: caller checks buffer full. - // FIXME: should skip more, longer skip, but less often? - // FIXME: round to channel + sample border + if (n != (size_t)count) { + Error(_("audio: can't place %d samples in ring buffer\n"), count); + // too many bytes are lost + // FIXME: caller checks buffer full. + // FIXME: should skip more, longer skip, but less often? + // FIXME: round to channel + sample border } - if (!AudioRunning) { // check, if we can start the thread - int skip; + if (!AudioRunning) { // check, if we can start the thread + int skip; - n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); - skip = AudioSkip; - // FIXME: round to packet size + n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); + skip = AudioSkip; + // FIXME: round to packet size - Debug(4, "audio: start? %4zdms skip %dms\n", (n * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), - (skip * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); + Debug(4, "audio: start? %4zdms skip %dms\n", (n * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), + (skip * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); - if (skip) { - if (n < (unsigned)skip) { - skip = n; - } - AudioSkip -= skip; - RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); - n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); - } - // forced start or enough video + audio buffered - // for some exotic channels * 4 too small - if (AudioStartThreshold * 10 < n || (AudioVideoIsReady -// if ((AudioVideoIsReady - && AudioStartThreshold < n)) { - // restart play-back - // no lock needed, can wakeup next time - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); - Debug(3,"Start on AudioEnque\n"); - } + if (skip) { + if (n < (unsigned)skip) { + skip = n; + } + AudioSkip -= skip; + RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); + n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); + } + // forced start or enough video + audio buffered + // for some exotic channels * 4 too small + if (AudioStartThreshold * 10 < n || (AudioVideoIsReady +// if ((AudioVideoIsReady + && AudioStartThreshold < n)) { + // restart play-back + // no lock needed, can wakeup next time + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + Debug(3, "Start on AudioEnque\n"); + } } // Update audio clock (stupid gcc developers thinks INT64_C is unsigned) if (AudioRing[AudioRingWrite].PTS != (int64_t) INT64_C(0x8000000000000000)) { - AudioRing[AudioRingWrite].PTS += ((int64_t) count * 90 * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); + AudioRing[AudioRingWrite].PTS += ((int64_t) count * 90 * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); } } @@ -2380,112 +2311,96 @@ void AudioVideoReady(int64_t pts) size_t used; if (pts == (int64_t) INT64_C(0x8000000000000000)) { - Debug(3, "audio: a/v start, no valid video\n"); - return; + Debug(3, "audio: a/v start, no valid video\n"); + return; } // no valid audio known - if (!AudioRing[AudioRingWrite].HwSampleRate - || !AudioRing[AudioRingWrite].HwChannels - || AudioRing[AudioRingWrite].PTS == - (int64_t) INT64_C(0x8000000000000000)) { - Debug(3, "audio: a/v start, no valid audio\n"); - AudioVideoIsReady = 1; - return; + if (!AudioRing[AudioRingWrite].HwSampleRate || !AudioRing[AudioRingWrite].HwChannels + || AudioRing[AudioRingWrite].PTS == (int64_t) INT64_C(0x8000000000000000)) { + Debug(3, "audio: a/v start, no valid audio\n"); + AudioVideoIsReady = 1; + return; } // Audio.PTS = next written sample time stamp used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); audio_pts = - AudioRing[AudioRingWrite].PTS - - (used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); + AudioRing[AudioRingWrite].PTS - + (used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample); - Debug(3, "audio: a/v sync buf(%d,%4zdms) %s | %s = %dms %s\n", - atomic_read(&AudioRingFilled), - (used * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), - Timestamp2String(pts), Timestamp2String(audio_pts), - (int)(pts - audio_pts) / 90, AudioRunning ? "running" : "ready"); + Debug(3, "audio: a/v sync buf(%d,%4zdms) %s | %s = %dms %s\n", atomic_read(&AudioRingFilled), + (used * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample), Timestamp2String(pts), Timestamp2String(audio_pts), (int)(pts - audio_pts) / 90, + AudioRunning ? "running" : "ready"); if (!AudioRunning) { - int skip; + int skip; - // buffer ~15 video frames - // FIXME: HDTV can use smaller video buffer - skip = - pts - 15 * 20 * 90 - AudioBufferTime * 90 - audio_pts + VideoAudioDelay; + // buffer ~15 video frames + // FIXME: HDTV can use smaller video buffer + skip = pts - 15 * 20 * 90 - AudioBufferTime * 90 - audio_pts + VideoAudioDelay; #ifdef DEBUG - fprintf(stderr, "%dms %dms %dms\n", (int)(pts - audio_pts) / 90, - VideoAudioDelay / 90, skip / 90); + fprintf(stderr, "%dms %dms %dms\n", (int)(pts - audio_pts) / 90, VideoAudioDelay / 90, skip / 90); #endif - // guard against old PTS - if (skip > 0 && skip < 4000 * 90) { - skip = (((int64_t) skip * AudioRing[AudioRingWrite].HwSampleRate) / (1000 * 90)) - * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample; - // FIXME: round to packet size - if ((unsigned)skip > used) { - AudioSkip = skip - used; - skip = used; - } - Debug(3, "audio: sync advance %dms %d/%zd\n", - (skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample), skip, used); - RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); + // guard against old PTS + if (skip > 0 && skip < 4000 * 90) { + skip = (((int64_t) skip * AudioRing[AudioRingWrite].HwSampleRate) / (1000 * 90)) + * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample; + // FIXME: round to packet size + if ((unsigned)skip > used) { + AudioSkip = skip - used; + skip = used; + } + Debug(3, "audio: sync advance %dms %d/%zd\n", + (skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample), skip, used); + RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); - used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); - } - else { - Debug(3,"No audio skip -> should skip %d\n",skip/90); - } - // FIXME: skip<0 we need bigger audio buffer + used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); + } else { + Debug(3, "No audio skip -> should skip %d\n", skip / 90); + } + // FIXME: skip<0 we need bigger audio buffer - // enough video + audio buffered - if (AudioStartThreshold < used) { - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); - Debug(3,"Start on AudioVideoReady\n"); - } + // enough video + audio buffered + if (AudioStartThreshold < used) { + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + Debug(3, "Start on AudioVideoReady\n"); + } } AudioVideoIsReady = 1; #if 0 - if (AudioRing[AudioRingWrite].HwSampleRate - && AudioRing[AudioRingWrite].HwChannels) { - if (pts != (int64_t) INT64_C(0x8000000000000000) - && AudioRing[AudioRingWrite].PTS != - (int64_t) INT64_C(0x8000000000000000)) { - Debug(3, "audio: a/v %d %s\n", - (int)(pts - AudioRing[AudioRingWrite].PTS) / 90, - AudioRunning ? "running" : "stopped"); - } - Debug(3, "audio: start %4zdms %s|%s video ready\n", - (RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), - Timestamp2String(pts), - Timestamp2String(AudioRing[AudioRingWrite].PTS)); + if (AudioRing[AudioRingWrite].HwSampleRate && AudioRing[AudioRingWrite].HwChannels) { + if (pts != (int64_t) INT64_C(0x8000000000000000) + && AudioRing[AudioRingWrite].PTS != (int64_t) INT64_C(0x8000000000000000)) { + Debug(3, "audio: a/v %d %s\n", (int)(pts - AudioRing[AudioRingWrite].PTS) / 90, + AudioRunning ? "running" : "stopped"); + } + Debug(3, "audio: start %4zdms %s|%s video ready\n", + (RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), + Timestamp2String(pts), Timestamp2String(AudioRing[AudioRingWrite].PTS)); - if (!AudioRunning) { - size_t used; + if (!AudioRunning) { + size_t used; - used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); - // enough video + audio buffered - if (AudioStartThreshold < used) { - // too much audio buffered, skip it - if (AudioStartThreshold < used) { - Debug(3, "audio: start %4zdms skip video ready\n", - ((used - AudioStartThreshold) * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * - AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample)); - RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, - used - AudioStartThreshold); - } - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); - } - } + used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); + // enough video + audio buffered + if (AudioStartThreshold < used) { + // too much audio buffered, skip it + if (AudioStartThreshold < used) { + Debug(3, "audio: start %4zdms skip video ready\n", ((used - AudioStartThreshold) * 1000) + / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample)); + RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, used - AudioStartThreshold); + } + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + } + } } AudioVideoIsReady = 1; #endif @@ -2500,19 +2415,19 @@ void AudioFlushBuffers(void) int i; if (atomic_read(&AudioRingFilled) >= AUDIO_RING_MAX) { - // wait for space in ring buffer, should never happen - for (i = 0; i < 24 * 2; ++i) { - if (atomic_read(&AudioRingFilled) < AUDIO_RING_MAX) { - break; - } - Debug(3, "audio: flush out of ring buffers\n"); - usleep(1 * 1000); // avoid hot polling - } - if (atomic_read(&AudioRingFilled) >= AUDIO_RING_MAX) { - // FIXME: We can set the flush flag in the last wrote ring buffer - Error(_("audio: flush out of ring buffers\n")); - return; - } + // wait for space in ring buffer, should never happen + for (i = 0; i < 24 * 2; ++i) { + if (atomic_read(&AudioRingFilled) < AUDIO_RING_MAX) { + break; + } + Debug(3, "audio: flush out of ring buffers\n"); + usleep(1 * 1000); // avoid hot polling + } + if (atomic_read(&AudioRingFilled) >= AUDIO_RING_MAX) { + // FIXME: We can set the flush flag in the last wrote ring buffer + Error(_("audio: flush out of ring buffers\n")); + return; + } } old = AudioRingWrite; @@ -2525,7 +2440,7 @@ void AudioFlushBuffers(void) AudioRing[AudioRingWrite].InChannels = AudioRing[old].InChannels; AudioRing[AudioRingWrite].PTS = INT64_C(0x8000000000000000); RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, - RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer)); + RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer)); Debug(3, "audio: reset video ready\n"); AudioVideoIsReady = 0; AudioSkip = 0; @@ -2534,16 +2449,16 @@ void AudioFlushBuffers(void) // FIXME: wait for flush complete needed? for (i = 0; i < 24 * 2; ++i) { - if (!AudioRunning) { // wakeup thread to flush buffers - AudioRunning = 1; - pthread_cond_signal(&AudioStartCond); - Debug(3,"Start on Flush\n"); - } - // FIXME: waiting on zero isn't correct, but currently works - if (!atomic_read(&AudioRingFilled)) { - break; - } - usleep(1 * 1000); // avoid hot polling + if (!AudioRunning) { // wakeup thread to flush buffers + AudioRunning = 1; + pthread_cond_signal(&AudioStartCond); + Debug(3, "Start on Flush\n"); + } + // FIXME: waiting on zero isn't correct, but currently works + if (!atomic_read(&AudioRingFilled)) { + break; + } + usleep(1 * 1000); // avoid hot polling } Debug(3, "audio: audio flush %dms\n", i); } @@ -2561,9 +2476,8 @@ void AudioPoller(void) */ int AudioFreeBytes(void) { - return AudioRing[AudioRingWrite].RingBuffer ? - RingBufferFreeBytes(AudioRing[AudioRingWrite].RingBuffer) - : INT32_MAX; + return AudioRing[AudioRingWrite].RingBuffer ? RingBufferFreeBytes(AudioRing[AudioRingWrite].RingBuffer) + : INT32_MAX; } /** @@ -2572,8 +2486,7 @@ int AudioFreeBytes(void) int AudioUsedBytes(void) { // FIXME: not correct, if multiple buffer are in use - return AudioRing[AudioRingWrite].RingBuffer ? - RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) : 0; + return AudioRing[AudioRingWrite].RingBuffer ? RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) : 0; } /** @@ -2586,20 +2499,20 @@ int64_t AudioGetDelay(void) int64_t pts; if (!AudioRunning) { - return 0L; // audio not running + return 0L; // audio not running } if (!AudioRing[AudioRingRead].HwSampleRate) { - return 0L; // audio not setup + return 0L; // audio not setup } if (atomic_read(&AudioRingFilled)) { - return 0L; // multiple buffers, invalid delay + return 0L; // multiple buffers, invalid delay } pts = AudioUsedModule->GetDelay(); pts += ((int64_t) RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer) - * 90 * 1000) / (AudioRing[AudioRingRead].HwSampleRate * - AudioRing[AudioRingRead].HwChannels * AudioBytesProSample); - Debug(4, "audio: hw+sw delay %zd %" PRId64 "ms\n", - RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer), pts / 90); + * 90 * 1000) / (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels * + AudioBytesProSample); + Debug(4, "audio: hw+sw delay %zd %" PRId64 "ms\n", RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer), + pts / 90); return pts; } @@ -2612,9 +2525,8 @@ int64_t AudioGetDelay(void) void AudioSetClock(int64_t pts) { if (AudioRing[AudioRingWrite].PTS != pts) { - Debug(4, "audio: set clock %s -> %s pts\n", - Timestamp2String(AudioRing[AudioRingWrite].PTS), - Timestamp2String(pts)); + Debug(4, "audio: set clock %s -> %s pts\n", Timestamp2String(AudioRing[AudioRingWrite].PTS), + Timestamp2String(pts)); } AudioRing[AudioRingWrite].PTS = pts; } @@ -2628,15 +2540,15 @@ int64_t AudioGetClock(void) { // (cast) needed for the evil gcc if (AudioRing[AudioRingRead].PTS != (int64_t) INT64_C(0x8000000000000000)) { - int64_t delay; + int64_t delay; - // delay zero, if no valid time stamp - if ((delay = AudioGetDelay())) { - if (AudioRing[AudioRingRead].Passthrough) { - return AudioRing[AudioRingRead].PTS + 0 * 90 - delay; - } - return AudioRing[AudioRingRead].PTS + 0 * 90 - delay; - } + // delay zero, if no valid time stamp + if ((delay = AudioGetDelay())) { + if (AudioRing[AudioRingRead].Passthrough) { + return AudioRing[AudioRingRead].PTS + 0 * 90 - delay; + } + return AudioRing[AudioRingRead].PTS + 0 * 90 - delay; + } } return INT64_C(0x8000000000000000); } @@ -2651,18 +2563,17 @@ void AudioSetVolume(int volume) AudioVolume = volume; AudioMute = !volume; // reduce loudness for stereo output - if (AudioStereoDescent && AudioRing[AudioRingRead].InChannels == 2 - && !AudioRing[AudioRingRead].Passthrough) { - volume -= AudioStereoDescent; - if (volume < 0) { - volume = 0; - } else if (volume > 1000) { - volume = 1000; - } + if (AudioStereoDescent && AudioRing[AudioRingRead].InChannels == 2 && !AudioRing[AudioRingRead].Passthrough) { + volume -= AudioStereoDescent; + if (volume < 0) { + volume = 0; + } else if (volume > 1000) { + volume = 1000; + } } AudioAmplifier = volume; if (!AudioSoftVolume) { - AudioUsedModule->SetVolume(volume); + AudioUsedModule->SetVolume(volume); } } @@ -2681,14 +2592,13 @@ void AudioSetVolume(int volume) */ int AudioSetup(int *freq, int *channels, int passthrough) { - Debug(3, "audio: setup channels %d frequency %dHz %s\n", *channels, *freq, - passthrough ? "pass-through" : ""); + Debug(3, "audio: setup channels %d frequency %dHz %s\n", *channels, *freq, passthrough ? "pass-through" : ""); // invalid parameter if (!freq || !channels || !*freq || !*channels) { - Debug(3, "audio: bad channels or frequency parameters\n"); - // FIXME: set flag invalid setup - return -1; + Debug(3, "audio: bad channels or frequency parameters\n"); + // FIXME: set flag invalid setup + return -1; } return AudioRingAdd(*freq, *channels, passthrough); } @@ -2699,12 +2609,12 @@ int AudioSetup(int *freq, int *channels, int passthrough) void AudioPlay(void) { if (!AudioPaused) { - Debug(3, "audio: not paused, check the code\n"); - return; + Debug(3, "audio: not paused, check the code\n"); + return; } Debug(3, "audio: resumed\n"); AudioPaused = 0; - AudioEnqueue(NULL, 0); // wakeup thread + AudioEnqueue(NULL, 0); // wakeup thread } /** @@ -2713,8 +2623,8 @@ void AudioPlay(void) void AudioPause(void) { if (AudioPaused) { - Debug(3, "audio: already paused, check the code\n"); - return; + Debug(3, "audio: already paused, check the code\n"); + return; } Debug(3, "audio: paused\n"); AudioPaused = 1; @@ -2731,7 +2641,7 @@ void AudioPause(void) void AudioSetBufferTime(int delay) { if (!delay) { - delay = 336; + delay = 336; } AudioBufferTime = delay; } @@ -2744,9 +2654,9 @@ void AudioSetBufferTime(int delay) void AudioSetSoftvol(int onoff) { if (onoff < 0) { - AudioSoftVolume ^= 1; + AudioSoftVolume ^= 1; } else { - AudioSoftVolume = onoff; + AudioSoftVolume = onoff; } } @@ -2759,9 +2669,9 @@ void AudioSetSoftvol(int onoff) void AudioSetNormalize(int onoff, int maxfac) { if (onoff < 0) { - AudioNormalize ^= 1; + AudioNormalize ^= 1; } else { - AudioNormalize = onoff; + AudioNormalize = onoff; } AudioMaxNormalize = maxfac; } @@ -2775,16 +2685,16 @@ void AudioSetNormalize(int onoff, int maxfac) void AudioSetCompression(int onoff, int maxfac) { if (onoff < 0) { - AudioCompression ^= 1; + AudioCompression ^= 1; } else { - AudioCompression = onoff; + AudioCompression = onoff; } AudioMaxCompression = maxfac; if (!AudioCompressionFactor) { - AudioCompressionFactor = 1000; + AudioCompressionFactor = 1000; } if (AudioCompressionFactor > AudioMaxCompression) { - AudioCompressionFactor = AudioMaxCompression; + AudioCompressionFactor = AudioMaxCompression; } } @@ -2796,7 +2706,7 @@ void AudioSetCompression(int onoff, int maxfac) void AudioSetStereoDescent(int delta) { AudioStereoDescent = delta; - AudioSetVolume(AudioVolume); // update channel delta + AudioSetVolume(AudioVolume); // update channel delta } /** @@ -2809,12 +2719,12 @@ void AudioSetStereoDescent(int delta) void AudioSetDevice(const char *device) { if (!AudioModuleName) { - AudioModuleName = "alsa"; // detect alsa/OSS - if (!device[0]) { - AudioModuleName = "noop"; - } else if (device[0] == '/') { - AudioModuleName = "oss"; - } + AudioModuleName = "alsa"; // detect alsa/OSS + if (!device[0]) { + AudioModuleName = "noop"; + } else if (device[0] == '/') { + AudioModuleName = "oss"; + } } AudioPCMDevice = device; } @@ -2829,12 +2739,12 @@ void AudioSetDevice(const char *device) void AudioSetPassthroughDevice(const char *device) { if (!AudioModuleName) { - AudioModuleName = "alsa"; // detect alsa/OSS - if (!device[0]) { - AudioModuleName = "noop"; - } else if (device[0] == '/') { - AudioModuleName = "oss"; - } + AudioModuleName = "alsa"; // detect alsa/OSS + if (!device[0]) { + AudioModuleName = "noop"; + } else if (device[0] == '/') { + AudioModuleName = "oss"; + } } AudioPassthroughDevice = device; } @@ -2859,9 +2769,9 @@ void AudioSetChannel(const char *channel) void AudioSetAutoAES(int onoff) { if (onoff < 0) { - AudioAppendAES ^= 1; + AudioAppendAES ^= 1; } else { - AudioAppendAES = onoff; + AudioAppendAES = onoff; } } @@ -2885,17 +2795,17 @@ void AudioInit(void) name = "alsa"; #endif if (AudioModuleName) { - name = AudioModuleName; + name = AudioModuleName; } // - // search selected audio module. + // search selected audio module. // for (u = 0; u < sizeof(AudioModules) / sizeof(*AudioModules); ++u) { - if (!strcasecmp(name, AudioModules[u]->Name)) { - AudioUsedModule = AudioModules[u]; - Info(_("audio: '%s' output module used\n"), AudioUsedModule->Name); - goto found; - } + if (!strcasecmp(name, AudioModules[u]->Name)) { + AudioUsedModule = AudioModules[u]; + Info(_("audio: '%s' output module used\n"), AudioUsedModule->Name); + goto found; + } } Error(_("audio: '%s' output module isn't supported\n"), name); AudioUsedModule = &NoopModule; @@ -2906,132 +2816,129 @@ void AudioInit(void) AudioRingInit(); AudioUsedModule->Init(); // - // Check which channels/rates/formats are supported - // FIXME: we force 44.1Khz and 48Khz must be supported equal - // FIXME: should use bitmap of channels supported in RatesInHw - // FIXME: use loop over sample-rates + // Check which channels/rates/formats are supported + // FIXME: we force 44.1Khz and 48Khz must be supported equal + // FIXME: should use bitmap of channels supported in RatesInHw + // FIXME: use loop over sample-rates freq = 44100; AudioRatesInHw[Audio44100] = 0; for (chan = 1; chan < 9; ++chan) { - int tchan; - int tfreq; + int tchan; + int tfreq; - tchan = chan; - tfreq = freq; - if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) { - AudioChannelsInHw[chan] = 0; - } else { - AudioChannelsInHw[chan] = chan; - AudioRatesInHw[Audio44100] |= (1 << chan); - } + tchan = chan; + tfreq = freq; + if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) { + AudioChannelsInHw[chan] = 0; + } else { + AudioChannelsInHw[chan] = chan; + AudioRatesInHw[Audio44100] |= (1 << chan); + } } freq = 48000; AudioRatesInHw[Audio48000] = 0; for (chan = 1; chan < 9; ++chan) { - int tchan; - int tfreq; + int tchan; + int tfreq; - if (!AudioChannelsInHw[chan]) { - continue; - } - tchan = chan; - tfreq = freq; - if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) { - //AudioChannelsInHw[chan] = 0; - } else { - AudioChannelsInHw[chan] = chan; - AudioRatesInHw[Audio48000] |= (1 << chan); - } + if (!AudioChannelsInHw[chan]) { + continue; + } + tchan = chan; + tfreq = freq; + if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) { + //AudioChannelsInHw[chan] = 0; + } else { + AudioChannelsInHw[chan] = chan; + AudioRatesInHw[Audio48000] |= (1 << chan); + } } freq = 192000; AudioRatesInHw[Audio192000] = 0; for (chan = 1; chan < 9; ++chan) { - int tchan; - int tfreq; + int tchan; + int tfreq; - if (!AudioChannelsInHw[chan]) { - continue; - } - tchan = chan; - tfreq = freq; - if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) { - //AudioChannelsInHw[chan] = 0; - } else { - AudioChannelsInHw[chan] = chan; - AudioRatesInHw[Audio192000] |= (1 << chan); - } + if (!AudioChannelsInHw[chan]) { + continue; + } + tchan = chan; + tfreq = freq; + if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) { + //AudioChannelsInHw[chan] = 0; + } else { + AudioChannelsInHw[chan] = chan; + AudioRatesInHw[Audio192000] |= (1 << chan); + } } - // build channel support and conversion table + // build channel support and conversion table for (u = 0; u < AudioRatesMax; ++u) { - for (chan = 1; chan < 9; ++chan) { - AudioChannelMatrix[u][chan] = 0; - if (!AudioRatesInHw[u]) { // rate unsupported - continue; - } - if (AudioChannelsInHw[chan]) { - AudioChannelMatrix[u][chan] = chan; - } else { - switch (chan) { - case 1: - if (AudioChannelsInHw[2]) { - AudioChannelMatrix[u][chan] = 2; - } - break; - case 2: - case 3: - if (AudioChannelsInHw[4]) { - AudioChannelMatrix[u][chan] = 4; - break; - } - case 4: - if (AudioChannelsInHw[5]) { - AudioChannelMatrix[u][chan] = 5; - break; - } - case 5: - if (AudioChannelsInHw[6]) { - AudioChannelMatrix[u][chan] = 6; - break; - } - case 6: - if (AudioChannelsInHw[7]) { - AudioChannelMatrix[u][chan] = 7; - break; - } - case 7: - if (AudioChannelsInHw[8]) { - AudioChannelMatrix[u][chan] = 8; - break; - } - case 8: - if (AudioChannelsInHw[6]) { - AudioChannelMatrix[u][chan] = 6; - break; - } - if (AudioChannelsInHw[2]) { - AudioChannelMatrix[u][chan] = 2; - break; - } - if (AudioChannelsInHw[1]) { - AudioChannelMatrix[u][chan] = 1; - break; - } - break; - } - } - } + for (chan = 1; chan < 9; ++chan) { + AudioChannelMatrix[u][chan] = 0; + if (!AudioRatesInHw[u]) { // rate unsupported + continue; + } + if (AudioChannelsInHw[chan]) { + AudioChannelMatrix[u][chan] = chan; + } else { + switch (chan) { + case 1: + if (AudioChannelsInHw[2]) { + AudioChannelMatrix[u][chan] = 2; + } + break; + case 2: + case 3: + if (AudioChannelsInHw[4]) { + AudioChannelMatrix[u][chan] = 4; + break; + } + case 4: + if (AudioChannelsInHw[5]) { + AudioChannelMatrix[u][chan] = 5; + break; + } + case 5: + if (AudioChannelsInHw[6]) { + AudioChannelMatrix[u][chan] = 6; + break; + } + case 6: + if (AudioChannelsInHw[7]) { + AudioChannelMatrix[u][chan] = 7; + break; + } + case 7: + if (AudioChannelsInHw[8]) { + AudioChannelMatrix[u][chan] = 8; + break; + } + case 8: + if (AudioChannelsInHw[6]) { + AudioChannelMatrix[u][chan] = 6; + break; + } + if (AudioChannelsInHw[2]) { + AudioChannelMatrix[u][chan] = 2; + break; + } + if (AudioChannelsInHw[1]) { + AudioChannelMatrix[u][chan] = 1; + break; + } + break; + } + } + } } for (u = 0; u < AudioRatesMax; ++u) { - Info(_("audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n"), - AudioRatesTable[u], AudioChannelMatrix[u][1], - AudioChannelMatrix[u][2], AudioChannelMatrix[u][3], - AudioChannelMatrix[u][4], AudioChannelMatrix[u][5], - AudioChannelMatrix[u][6], AudioChannelMatrix[u][7], - AudioChannelMatrix[u][8]); + Info(_("audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n"), AudioRatesTable[u], + AudioChannelMatrix[u][1], AudioChannelMatrix[u][2], AudioChannelMatrix[u][3], AudioChannelMatrix[u][4], + AudioChannelMatrix[u][5], AudioChannelMatrix[u][6], AudioChannelMatrix[u][7], AudioChannelMatrix[u][8]); } #ifdef USE_AUDIO_THREAD - if (AudioUsedModule->Thread) { // supports threads - AudioInitThread(); + if (AudioUsedModule->Thread) { // supports threads + AudioInitThread(); } #endif AudioDoingInit = 0; @@ -3047,8 +2954,8 @@ void AudioExit(void) Debug(3, "audio: %s\n", __FUNCTION__); #ifdef USE_AUDIO_THREAD - if (AudioUsedModule->Thread) { // supports threads - AudioExitThread(); + if (AudioUsedModule->Thread) { // supports threads + AudioExitThread(); } #endif module = AudioUsedModule; @@ -3062,34 +2969,34 @@ void AudioExit(void) #ifdef AUDIO_TEST //---------------------------------------------------------------------------- -// Test +// Test //---------------------------------------------------------------------------- void AudioTest(void) { for (;;) { - unsigned u; - uint8_t buffer[16 * 1024]; // some random data - int i; + unsigned u; + uint8_t buffer[16 * 1024]; // some random data + int i; - for (u = 0; u < sizeof(buffer); u++) { - buffer[u] = random() & 0xffff; - } + for (u = 0; u < sizeof(buffer); u++) { + buffer[u] = random() & 0xffff; + } - Debug(3, "audio/test: loop\n"); - for (i = 0; i < 100; ++i) { - while (RingBufferFreeBytes(AlsaRingBuffer) > sizeof(buffer)) { - AlsaEnqueue(buffer, sizeof(buffer)); - } - usleep(20 * 1000); - } - break; + Debug(3, "audio/test: loop\n"); + for (i = 0; i < 100; ++i) { + while (RingBufferFreeBytes(AlsaRingBuffer) > sizeof(buffer)) { + AlsaEnqueue(buffer, sizeof(buffer)); + } + usleep(20 * 1000); + } + break; } } #include -int SysLogLevel; ///< show additional debug informations +int SysLogLevel; ///< show additional debug informations /** ** Print version. @@ -3098,10 +3005,9 @@ static void PrintVersion(void) { printf("audio_test: audio tester Version " VERSION #ifdef GIT_REV - "(GIT-" GIT_REV ")" + "(GIT-" GIT_REV ")" #endif - ",\n\t(c) 2009 - 2013 by Johns\n" - "\tLicense AGPLv3: GNU Affero General Public License version 3\n"); + ",\n\t(c) 2009 - 2013 by Johns\n" "\tLicense AGPLv3: GNU Affero General Public License version 3\n"); } /** @@ -3109,10 +3015,9 @@ static void PrintVersion(void) */ static void PrintUsage(void) { - printf("Usage: audio_test [-?dhv]\n" - "\t-d\tenable debug, more -d increase the verbosity\n" - "\t-? -h\tdisplay this message\n" "\t-v\tdisplay version information\n" - "Only idiots print usage on stderr!\n"); + printf("Usage: audio_test [-?dhv]\n" "\t-d\tenable debug, more -d increase the verbosity\n" + "\t-? -h\tdisplay this message\n" "\t-v\tdisplay version information\n" + "Only idiots print usage on stderr!\n"); } /** @@ -3128,65 +3033,65 @@ int main(int argc, char *const argv[]) SysLogLevel = 0; // - // Parse command line arguments + // Parse command line arguments // for (;;) { - switch (getopt(argc, argv, "hv?-c:d")) { - case 'd': // enabled debug - ++SysLogLevel; - continue; + switch (getopt(argc, argv, "hv?-c:d")) { + case 'd': // enabled debug + ++SysLogLevel; + continue; - case EOF: - break; - case 'v': // print version - PrintVersion(); - return 0; - case '?': - case 'h': // help usage - PrintVersion(); - PrintUsage(); - return 0; - case '-': - PrintVersion(); - PrintUsage(); - fprintf(stderr, "\nWe need no long options\n"); - return -1; - case ':': - PrintVersion(); - fprintf(stderr, "Missing argument for option '%c'\n", optopt); - return -1; - default: - PrintVersion(); - fprintf(stderr, "Unknown option '%c'\n", optopt); - return -1; - } - break; + case EOF: + break; + case 'v': // print version + PrintVersion(); + return 0; + case '?': + case 'h': // help usage + PrintVersion(); + PrintUsage(); + return 0; + case '-': + PrintVersion(); + PrintUsage(); + fprintf(stderr, "\nWe need no long options\n"); + return -1; + case ':': + PrintVersion(); + fprintf(stderr, "Missing argument for option '%c'\n", optopt); + return -1; + default: + PrintVersion(); + fprintf(stderr, "Unknown option '%c'\n", optopt); + return -1; + } + break; } if (optind < argc) { - PrintVersion(); - while (optind < argc) { - fprintf(stderr, "Unhandled argument '%s'\n", argv[optind++]); - } - return -1; + PrintVersion(); + while (optind < argc) { + fprintf(stderr, "Unhandled argument '%s'\n", argv[optind++]); + } + return -1; } // - // main loop + // main loop // AudioInit(); for (;;) { - unsigned u; - uint8_t buffer[16 * 1024]; // some random data + unsigned u; + uint8_t buffer[16 * 1024]; // some random data - for (u = 0; u < sizeof(buffer); u++) { - buffer[u] = random() & 0xffff; - } + for (u = 0; u < sizeof(buffer); u++) { + buffer[u] = random() & 0xffff; + } - Debug(3, "audio/test: loop\n"); - for (;;) { - while (RingBufferFreeBytes(AlsaRingBuffer) > sizeof(buffer)) { - AlsaEnqueue(buffer, sizeof(buffer)); - } - } + Debug(3, "audio/test: loop\n"); + for (;;) { + while (RingBufferFreeBytes(AlsaRingBuffer) > sizeof(buffer)) { + AlsaEnqueue(buffer, sizeof(buffer)); + } + } } AudioExit(); diff --git a/audio.h b/audio.h index 14af010..46a8d90 100644 --- a/audio.h +++ b/audio.h @@ -1,67 +1,67 @@ /// -/// @file audio.h @brief Audio module headerfile +/// @file audio.h @brief Audio module headerfile /// -/// Copyright (c) 2009 - 2014 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2014 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: da055758af70cfdb9ab1989d5fcfb218c5d6a366 $ +/// $Id: da055758af70cfdb9ab1989d5fcfb218c5d6a366 $ ////////////////////////////////////////////////////////////////////////////// /// @addtogroup Audio /// @{ //---------------------------------------------------------------------------- -// Prototypes +// Prototypes //---------------------------------------------------------------------------- -extern void AudioEnqueue(const void *, int); ///< buffer audio samples -extern void AudioFlushBuffers(void); ///< flush audio buffers -extern void AudioPoller(void); ///< poll audio events/handling -extern int AudioFreeBytes(void); ///< free bytes in audio output -extern int AudioUsedBytes(void); ///< used bytes in audio output -extern int64_t AudioGetDelay(void); ///< get current audio delay -extern void AudioSetClock(int64_t); ///< set audio clock base -extern int64_t AudioGetClock(); ///< get current audio clock -extern void AudioSetVolume(int); ///< set volume -extern int AudioSetup(int *, int *, int); ///< setup audio output +extern void AudioEnqueue(const void *, int); ///< buffer audio samples +extern void AudioFlushBuffers(void); ///< flush audio buffers +extern void AudioPoller(void); ///< poll audio events/handling +extern int AudioFreeBytes(void); ///< free bytes in audio output +extern int AudioUsedBytes(void); ///< used bytes in audio output +extern int64_t AudioGetDelay(void); ///< get current audio delay +extern void AudioSetClock(int64_t); ///< set audio clock base +extern int64_t AudioGetClock(); ///< get current audio clock +extern void AudioSetVolume(int); ///< set volume +extern int AudioSetup(int *, int *, int); ///< setup audio output -extern void AudioPlay(void); ///< play audio -extern void AudioPause(void); ///< pause audio +extern void AudioPlay(void); ///< play audio +extern void AudioPause(void); ///< pause audio -extern void AudioSetBufferTime(int); ///< set audio buffer time -extern void AudioSetSoftvol(int); ///< enable/disable softvol -extern void AudioSetNormalize(int, int); ///< set normalize parameters -extern void AudioSetCompression(int, int); ///< set compression parameters -extern void AudioSetStereoDescent(int); ///< set stereo loudness descent +extern void AudioSetBufferTime(int); ///< set audio buffer time +extern void AudioSetSoftvol(int); ///< enable/disable softvol +extern void AudioSetNormalize(int, int); ///< set normalize parameters +extern void AudioSetCompression(int, int); ///< set compression parameters +extern void AudioSetStereoDescent(int); ///< set stereo loudness descent -extern void AudioSetDevice(const char *); ///< set PCM audio device +extern void AudioSetDevice(const char *); ///< set PCM audio device /// set pass-through device extern void AudioSetPassthroughDevice(const char *); -extern void AudioSetChannel(const char *); ///< set mixer channel -extern void AudioSetAutoAES(int); ///< set automatic AES flag handling -extern void AudioInit(void); ///< setup audio module -extern void AudioExit(void); ///< cleanup and exit audio module +extern void AudioSetChannel(const char *); ///< set mixer channel +extern void AudioSetAutoAES(int); ///< set automatic AES flag handling +extern void AudioInit(void); ///< setup audio module +extern void AudioExit(void); ///< cleanup and exit audio module //---------------------------------------------------------------------------- -// Variables +// Variables //---------------------------------------------------------------------------- -extern char AudioAlsaDriverBroken; ///< disable broken driver message -extern char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix -extern char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix +extern char AudioAlsaDriverBroken; ///< disable broken driver message +extern char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix +extern char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix /// @} diff --git a/codec.c b/codec.c index 32b0d38..0addf9f 100644 --- a/codec.c +++ b/codec.c @@ -1,34 +1,34 @@ /// -/// @file codec.c @brief Codec functions +/// @file codec.c @brief Codec functions /// -/// Copyright (c) 2009 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2015 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: d285eb28485bea02cd205fc8be47320dfe0376cf $ +/// $Id: d285eb28485bea02cd205fc8be47320dfe0376cf $ ////////////////////////////////////////////////////////////////////////////// /// -/// @defgroup Codec The codec module. +/// @defgroup Codec The codec module. /// -/// This module contains all decoder and codec functions. -/// It is uses ffmpeg (http://ffmpeg.org) as backend. +/// This module contains all decoder and codec functions. +/// It is uses ffmpeg (http://ffmpeg.org) as backend. /// -/// It may work with libav (http://libav.org), but the tests show -/// many bugs and incompatiblity in it. Don't use this shit. +/// It may work with libav (http://libav.org), but the tests show +/// many bugs and incompatiblity in it. Don't use this shit. /// /// compile with pass-through support (stable, AC-3, E-AC-3 only) @@ -56,8 +56,8 @@ #include #include #include -#define _(str) gettext(str) ///< gettext shortcut -#define _N(str) str ///< gettext_noop shortcut +#define _(str) gettext(str) ///< gettext shortcut +#define _N(str) str ///< gettext_noop shortcut #include #include @@ -86,46 +86,47 @@ #include "codec.h" //---------------------------------------------------------------------------- -// Global +// Global //---------------------------------------------------------------------------- /// - /// ffmpeg lock mutex + /// ffmpeg lock mutex /// - /// new ffmpeg dislikes simultanous open/close - /// this breaks our code, until this is fixed use lock. + /// new ffmpeg dislikes simultanous open/close + /// this breaks our code, until this is fixed use lock. /// static pthread_mutex_t CodecLockMutex; /// Flag prefer fast channel switch char CodecUsePossibleDefectFrames; AVBufferRef *hw_device_ctx; + //---------------------------------------------------------------------------- -// Video +// Video //---------------------------------------------------------------------------- #if 0 /// -/// Video decoder typedef. +/// Video decoder typedef. /// //typedef struct _video_decoder_ Decoder; #endif #if 0 /// -/// Video decoder structure. +/// Video decoder structure. /// struct _video_decoder_ { - VideoHwDecoder *HwDecoder; ///< video hardware decoder + VideoHwDecoder *HwDecoder; ///< video hardware decoder - int GetFormatDone; ///< flag get format called! - AVCodec *VideoCodec; ///< video codec - AVCodecContext *VideoCtx; ///< video codec context - AVFrame *Frame; ///< decoded video frame + int GetFormatDone; ///< flag get format called! + AVCodec *VideoCodec; ///< video codec + AVCodecContext *VideoCtx; ///< video codec context + AVFrame *Frame; ///< decoded video frame }; #endif //---------------------------------------------------------------------------- -// Call-backs +// Call-backs //---------------------------------------------------------------------------- /** @@ -137,21 +138,20 @@ struct _video_decoder_ ** valid format, the formats are ordered by ** quality. */ -static enum AVPixelFormat Codec_get_format(AVCodecContext * video_ctx, - const enum AVPixelFormat *fmt) +static enum AVPixelFormat Codec_get_format(AVCodecContext * video_ctx, const enum AVPixelFormat *fmt) { VideoDecoder *decoder; - enum AVPixelFormat fmt1; + enum AVPixelFormat fmt1; + decoder = video_ctx->opaque; // bug in ffmpeg 1.1.1, called with zero width or height if (!video_ctx->width || !video_ctx->height) { - Error("codec/video: ffmpeg/libav buggy: width or height zero\n"); + Error("codec/video: ffmpeg/libav buggy: width or height zero\n"); } - -// decoder->GetFormatDone = 1; + + // decoder->GetFormatDone = 1; return Video_get_format(decoder->HwDecoder, video_ctx, fmt); - } @@ -171,21 +171,20 @@ static int Codec_get_buffer2(AVCodecContext * video_ctx, AVFrame * frame, int fl decoder = video_ctx->opaque; - if (!decoder->GetFormatDone) { // get_format missing - enum AVPixelFormat fmts[2]; + if (!decoder->GetFormatDone) { // get_format missing + enum AVPixelFormat fmts[2]; -// fprintf(stderr, "codec: buggy libav, use ffmpeg\n"); -// Warning(_("codec: buggy libav, use ffmpeg\n")); - fmts[0] = video_ctx->pix_fmt; - fmts[1] = AV_PIX_FMT_NONE; - Codec_get_format(video_ctx, fmts); +// fprintf(stderr, "codec: buggy libav, use ffmpeg\n"); +// Warning(_("codec: buggy libav, use ffmpeg\n")); + fmts[0] = video_ctx->pix_fmt; + fmts[1] = AV_PIX_FMT_NONE; + Codec_get_format(video_ctx, fmts); } #if 0 - if (decoder->hwaccel_get_buffer && (AV_PIX_FMT_VDPAU == decoder->hwaccel_pix_fmt || - AV_PIX_FMT_CUDA == decoder->hwaccel_pix_fmt || - AV_PIX_FMT_VAAPI == decoder->hwaccel_pix_fmt)) { - //Debug(3,"hwaccel get_buffer\n"); - return decoder->hwaccel_get_buffer(video_ctx, frame, flags); + if (decoder->hwaccel_get_buffer && (AV_PIX_FMT_VDPAU == decoder->hwaccel_pix_fmt + || AV_PIX_FMT_CUDA == decoder->hwaccel_pix_fmt || AV_PIX_FMT_VAAPI == decoder->hwaccel_pix_fmt)) { + //Debug(3,"hwaccel get_buffer\n"); + return decoder->hwaccel_get_buffer(video_ctx, frame, flags); } #endif //Debug(3, "codec: fallback to default get_buffer\n"); @@ -193,7 +192,7 @@ static int Codec_get_buffer2(AVCodecContext * video_ctx, AVFrame * frame, int fl } //---------------------------------------------------------------------------- -// Test +// Test //---------------------------------------------------------------------------- /** @@ -208,7 +207,7 @@ VideoDecoder *CodecVideoNewDecoder(VideoHwDecoder * hw_decoder) VideoDecoder *decoder; if (!(decoder = calloc(1, sizeof(*decoder)))) { - Fatal(_("codec: can't allocate vodeo decoder\n")); + Fatal(_("codec: can't allocate vodeo decoder\n")); } decoder->HwDecoder = hw_decoder; @@ -225,7 +224,6 @@ void CodecVideoDelDecoder(VideoDecoder * decoder) free(decoder); } - /** ** Open video decoder. ** @@ -236,197 +234,191 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) { AVCodec *video_codec; const char *name; - int ret,deint=2; + int ret, deint = 2; Debug(3, "***************codec: Video Open using video codec ID %#06x (%s)\n", codec_id, - avcodec_get_name(codec_id)); + avcodec_get_name(codec_id)); if (decoder->VideoCtx) { - Error(_("codec: missing close\n")); + Error(_("codec: missing close\n")); } - + name = "NULL"; #ifdef CUVID if (!strcasecmp(VideoGetDriverName(), "cuvid")) { - switch (codec_id) { - case AV_CODEC_ID_MPEG2VIDEO: - name = "mpeg2_cuvid"; - break; - case AV_CODEC_ID_H264: - name = "h264_cuvid"; - break; - case AV_CODEC_ID_HEVC: - name = "hevc_cuvid"; - break; - } + switch (codec_id) { + case AV_CODEC_ID_MPEG2VIDEO: + name = "mpeg2_cuvid"; + break; + case AV_CODEC_ID_H264: + name = "h264_cuvid"; + break; + case AV_CODEC_ID_HEVC: + name = "hevc_cuvid"; + break; + } } -#endif +#endif if (name && (video_codec = avcodec_find_decoder_by_name(name))) { - Debug(3, "codec: decoder found\n"); - } else if ((video_codec = avcodec_find_decoder(codec_id))==NULL) { - Debug(3,"Decoder %s not supported %p\n",name,video_codec); - Fatal(_(" No decoder found")); - } + Debug(3, "codec: decoder found\n"); + } else if ((video_codec = avcodec_find_decoder(codec_id)) == NULL) { + Debug(3, "Decoder %s not supported %p\n", name, video_codec); + Fatal(_(" No decoder found")); + } decoder->VideoCodec = video_codec; - - Debug(3, "codec: video '%s'\n", decoder->VideoCodec->long_name); - + + Debug(3, "codec: video '%s'\n", decoder->VideoCodec->long_name); + if (!(decoder->VideoCtx = avcodec_alloc_context3(video_codec))) { - Fatal(_("codec: can't allocate video codec context\n")); + Fatal(_("codec: can't allocate video codec context\n")); } - if (!HwDeviceContext) { - Fatal("codec: no hw device context to be used"); + if (!HwDeviceContext) { + Fatal("codec: no hw device context to be used"); } decoder->VideoCtx->hw_device_ctx = av_buffer_ref(HwDeviceContext); - // FIXME: for software decoder use all cpus, otherwise 1 decoder->VideoCtx->thread_count = 1; - - decoder->VideoCtx->pkt_timebase.num = 1; - decoder->VideoCtx->pkt_timebase.den = 90000; - decoder->VideoCtx->framerate.num = 50; - decoder->VideoCtx->framerate.den = 1; - - + + decoder->VideoCtx->pkt_timebase.num = 1; + decoder->VideoCtx->pkt_timebase.den = 90000; + decoder->VideoCtx->framerate.num = 50; + decoder->VideoCtx->framerate.den = 1; + pthread_mutex_lock(&CodecLockMutex); // open codec #ifdef YADIF - deint = 2; + deint = 2; #endif #ifdef VAAPI - decoder->VideoCtx->extra_hw_frames = 8; // VIDEO_SURFACES_MAX +1 + decoder->VideoCtx->extra_hw_frames = 8; // VIDEO_SURFACES_MAX +1 if (video_codec->capabilities & (AV_CODEC_CAP_AUTO_THREADS)) { - Debug(3,"codec: auto threads enabled"); + Debug(3, "codec: auto threads enabled"); decoder->VideoCtx->thread_count = 0; } if (video_codec->capabilities & AV_CODEC_CAP_TRUNCATED) { - Debug(3,"codec: supports truncated packets"); + Debug(3, "codec: supports truncated packets"); //decoder->VideoCtx->flags |= CODEC_FLAG_TRUNCATED; } // FIXME: own memory management for video frames. if (video_codec->capabilities & AV_CODEC_CAP_DR1) { - Debug(3,"codec: can use own buffer management"); + Debug(3, "codec: can use own buffer management"); } if (video_codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) { - Debug(3,"codec: supports frame threads"); + Debug(3, "codec: supports frame threads"); decoder->VideoCtx->thread_count = 0; - // decoder->VideoCtx->thread_type |= FF_THREAD_FRAME; + // decoder->VideoCtx->thread_type |= FF_THREAD_FRAME; } if (video_codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) { - Debug(3,"codec: supports slice threads"); + Debug(3, "codec: supports slice threads"); decoder->VideoCtx->thread_count = 0; - // decoder->VideoCtx->thread_type |= FF_THREAD_SLICE; + // decoder->VideoCtx->thread_type |= FF_THREAD_SLICE; } - if (av_opt_set_int(decoder->VideoCtx, "refcounted_frames", 1, 0)<0) - Fatal(_("VAAPI Refcounts invalid\n")); - decoder->VideoCtx->thread_safe_callbacks = 0; + if (av_opt_set_int(decoder->VideoCtx, "refcounted_frames", 1, 0) < 0) + Fatal(_("VAAPI Refcounts invalid\n")); + decoder->VideoCtx->thread_safe_callbacks = 0; #endif - - - + #ifdef CUVID - if (strcmp(decoder->VideoCodec->long_name,"Nvidia CUVID MPEG2VIDEO decoder") == 0) { // deinterlace for mpeg2 is somehow broken - if (av_opt_set_int(decoder->VideoCtx->priv_data, "deint", deint ,0) < 0) { // adaptive - pthread_mutex_unlock(&CodecLockMutex); - Fatal(_("codec: can't set option deint to video codec!\n")); - } + if (strcmp(decoder->VideoCodec->long_name, "Nvidia CUVID MPEG2VIDEO decoder") == 0) { // deinterlace for mpeg2 is somehow broken + if (av_opt_set_int(decoder->VideoCtx->priv_data, "deint", deint, 0) < 0) { // adaptive + pthread_mutex_unlock(&CodecLockMutex); + Fatal(_("codec: can't set option deint to video codec!\n")); + } #if 1 - if (av_opt_set_int(decoder->VideoCtx->priv_data, "surfaces", 9 ,0) < 0) { - pthread_mutex_unlock(&CodecLockMutex); - Fatal(_("codec: can't set option surfces to video codec!\n")); - } + if (av_opt_set_int(decoder->VideoCtx->priv_data, "surfaces", 9, 0) < 0) { + pthread_mutex_unlock(&CodecLockMutex); + Fatal(_("codec: can't set option surfces to video codec!\n")); + } #endif - if (av_opt_set(decoder->VideoCtx->priv_data, "drop_second_field", "false" ,0) < 0) { - pthread_mutex_unlock(&CodecLockMutex); - Fatal(_("codec: can't set option drop 2.field to video codec!\n")); - } - } - else if (strstr(decoder->VideoCodec->long_name,"Nvidia CUVID") != NULL) { - if (av_opt_set_int(decoder->VideoCtx->priv_data, "deint", deint ,0) < 0) { // adaptive - pthread_mutex_unlock(&CodecLockMutex); - Fatal(_("codec: can't set option deint to video codec!\n")); - } + if (av_opt_set(decoder->VideoCtx->priv_data, "drop_second_field", "false", 0) < 0) { + pthread_mutex_unlock(&CodecLockMutex); + Fatal(_("codec: can't set option drop 2.field to video codec!\n")); + } + } else if (strstr(decoder->VideoCodec->long_name, "Nvidia CUVID") != NULL) { + if (av_opt_set_int(decoder->VideoCtx->priv_data, "deint", deint, 0) < 0) { // adaptive + pthread_mutex_unlock(&CodecLockMutex); + Fatal(_("codec: can't set option deint to video codec!\n")); + } #if 1 - if (av_opt_set_int(decoder->VideoCtx->priv_data, "surfaces", 13 ,0) < 0) { - pthread_mutex_unlock(&CodecLockMutex); - Fatal(_("codec: can't set option surfces to video codec!\n")); - } + if (av_opt_set_int(decoder->VideoCtx->priv_data, "surfaces", 13, 0) < 0) { + pthread_mutex_unlock(&CodecLockMutex); + Fatal(_("codec: can't set option surfces to video codec!\n")); + } #endif - if (av_opt_set(decoder->VideoCtx->priv_data, "drop_second_field", "false" ,0) < 0) { - pthread_mutex_unlock(&CodecLockMutex); - Fatal(_("codec: can't set option drop 2.field to video codec!\n")); - } - } + if (av_opt_set(decoder->VideoCtx->priv_data, "drop_second_field", "false", 0) < 0) { + pthread_mutex_unlock(&CodecLockMutex); + Fatal(_("codec: can't set option drop 2.field to video codec!\n")); + } + } #endif if ((ret = avcodec_open2(decoder->VideoCtx, video_codec, NULL)) < 0) { - pthread_mutex_unlock(&CodecLockMutex); - Fatal(_("codec: can't open video codec!\n")); + pthread_mutex_unlock(&CodecLockMutex); + Fatal(_("codec: can't open video codec!\n")); } - Debug(3," Codec open %d\n",ret); + Debug(3, " Codec open %d\n", ret); pthread_mutex_unlock(&CodecLockMutex); - decoder->VideoCtx->opaque = decoder; // our structure - + decoder->VideoCtx->opaque = decoder; // our structure + //decoder->VideoCtx->debug = FF_DEBUG_STARTCODE; //decoder->VideoCtx->err_recognition |= AV_EF_EXPLODE; -// av_log_set_level(AV_LOG_DEBUG); - av_log_set_level(0); - - decoder->VideoCtx->get_format = Codec_get_format; - decoder->VideoCtx->get_buffer2 = Codec_get_buffer2; -// decoder->VideoCtx->active_thread_type = 0; - decoder->VideoCtx->draw_horiz_band = NULL; - decoder->VideoCtx->hwaccel_context = VideoGetHwAccelContext(decoder->HwDecoder); +// av_log_set_level(AV_LOG_DEBUG); + av_log_set_level(0); + decoder->VideoCtx->get_format = Codec_get_format; + decoder->VideoCtx->get_buffer2 = Codec_get_buffer2; +// decoder->VideoCtx->active_thread_type = 0; + decoder->VideoCtx->draw_horiz_band = NULL; + decoder->VideoCtx->hwaccel_context = VideoGetHwAccelContext(decoder->HwDecoder); // - // Prepare frame buffer for decoder + // Prepare frame buffer for decoder // #if 0 if (!(decoder->Frame = av_frame_alloc())) { - Fatal(_("codec: can't allocate video decoder frame buffer\n")); + Fatal(_("codec: can't allocate video decoder frame buffer\n")); } #endif - + // reset buggy ffmpeg/libav flag decoder->GetFormatDone = 0; #ifdef YADIF - decoder->filter = 0; + decoder->filter = 0; #endif } - /** ** Close video decoder. ** ** @param video_decoder private video decoder */ -void CodecVideoClose(VideoDecoder *video_decoder) +void CodecVideoClose(VideoDecoder * video_decoder) { - AVFrame *frame; - // FIXME: play buffered data -// av_frame_free(&video_decoder->Frame); // callee does checks + AVFrame *frame; - Debug(3,"CodecVideoClose\n"); + // FIXME: play buffered data +// av_frame_free(&video_decoder->Frame); // callee does checks + + Debug(3, "CodecVideoClose\n"); if (video_decoder->VideoCtx) { - pthread_mutex_lock(&CodecLockMutex); -#if 1 - frame = av_frame_alloc(); - avcodec_send_packet(video_decoder->VideoCtx, NULL); - while (avcodec_receive_frame(video_decoder->VideoCtx,frame) >= 0); - av_frame_free(&frame); + pthread_mutex_lock(&CodecLockMutex); +#if 1 + frame = av_frame_alloc(); + avcodec_send_packet(video_decoder->VideoCtx, NULL); + while (avcodec_receive_frame(video_decoder->VideoCtx, frame) >= 0) ; + av_frame_free(&frame); #endif - avcodec_close(video_decoder->VideoCtx); - av_freep(&video_decoder->VideoCtx); - pthread_mutex_unlock(&CodecLockMutex); + avcodec_close(video_decoder->VideoCtx); + av_freep(&video_decoder->VideoCtx); + pthread_mutex_unlock(&CodecLockMutex); } - + } #if 0 @@ -450,17 +442,16 @@ void DisplayPts(AVCodecContext * video_ctx, AVFrame * frame) pts = frame->pkt_pts; if (pts == (int64_t) AV_NOPTS_VALUE) { - printf("*"); + printf("*"); } ms_delay = (1000 * video_ctx->time_base.num) / video_ctx->time_base.den; ms_delay += frame->repeat_pict * ms_delay / 2; - printf("codec: PTS %s%s %" PRId64 " %d %d/%d %d/%d %dms\n", - frame->repeat_pict ? "r" : " ", frame->interlaced_frame ? "I" : " ", - pts, (int)(pts - last_pts) / 90, video_ctx->time_base.num, - video_ctx->time_base.den, video_ctx->framerate.num, video_ctx->framerate.den, ms_delay); + printf("codec: PTS %s%s %" PRId64 " %d %d/%d %d/%d %dms\n", frame->repeat_pict ? "r" : " ", + frame->interlaced_frame ? "I" : " ", pts, (int)(pts - last_pts) / 90, video_ctx->time_base.num, + video_ctx->time_base.den, video_ctx->framerate.num, video_ctx->framerate.den, ms_delay); if (pts != (int64_t) AV_NOPTS_VALUE) { - last_pts = pts; + last_pts = pts; } } @@ -473,9 +464,10 @@ void DisplayPts(AVCodecContext * video_ctx, AVFrame * frame) ** @param avpkt video packet */ extern int CuvidTestSurfaces(); + #ifdef YADIF -extern int init_filters(AVCodecContext * dec_ctx,void * decoder,AVFrame *frame); -extern int push_filters(AVCodecContext * dec_ctx,void * decoder,AVFrame *frame); +extern int init_filters(AVCodecContext * dec_ctx, void *decoder, AVFrame * frame); +extern int push_filters(AVCodecContext * dec_ctx, void *decoder, AVFrame * frame); #endif #ifdef VAAPI void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) @@ -483,132 +475,126 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) AVCodecContext *video_ctx = decoder->VideoCtx; if (video_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { - int ret; - AVPacket pkt[1]; - AVFrame *frame; + int ret; + AVPacket pkt[1]; + AVFrame *frame; - *pkt = *avpkt; // use copy - ret = avcodec_send_packet(video_ctx, pkt); - if (ret < 0) { - Debug(4,"codec: sending video packet failed"); - return; - } - frame = av_frame_alloc(); - ret = avcodec_receive_frame(video_ctx, frame); - if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { - Debug(4,"codec: receiving video frame failed"); - av_frame_free(&frame); - return; - } - if (ret >= 0) { - if (decoder->filter ) { - if (decoder->filter == 1) { - if (init_filters(video_ctx,decoder->HwDecoder,frame) < 0) { - Debug(3,"video: Init of VAAPI deint Filter failed\n"); - decoder->filter = 0; - } - else { - Debug(3,"Init VAAPI deint ok\n"); - decoder->filter = 2; - } - } - if (frame->interlaced_frame && decoder->filter == 2 && (frame->height != 720)) { // broken ZDF sends Interlaced flag - ret = push_filters(video_ctx,decoder->HwDecoder,frame); - return; - } - } - VideoRenderFrame(decoder->HwDecoder, video_ctx, frame); - } - else { - av_frame_free(&frame); - } + *pkt = *avpkt; // use copy + ret = avcodec_send_packet(video_ctx, pkt); + if (ret < 0) { + Debug(4, "codec: sending video packet failed"); + return; + } + frame = av_frame_alloc(); + ret = avcodec_receive_frame(video_ctx, frame); + if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { + Debug(4, "codec: receiving video frame failed"); + av_frame_free(&frame); + return; + } + if (ret >= 0) { + if (decoder->filter) { + if (decoder->filter == 1) { + if (init_filters(video_ctx, decoder->HwDecoder, frame) < 0) { + Debug(3, "video: Init of VAAPI deint Filter failed\n"); + decoder->filter = 0; + } else { + Debug(3, "Init VAAPI deint ok\n"); + decoder->filter = 2; + } + } + if (frame->interlaced_frame && decoder->filter == 2 && (frame->height != 720)) { // broken ZDF sends Interlaced flag + ret = push_filters(video_ctx, decoder->HwDecoder, frame); + return; + } + } + VideoRenderFrame(decoder->HwDecoder, video_ctx, frame); + } else { + av_frame_free(&frame); + } } } #endif -#ifdef CUVID +#ifdef CUVID void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) { AVCodecContext *video_ctx; - AVFrame *frame - -; - int ret,ret1; + AVFrame *frame; + int ret, ret1; int got_frame; int consumed = 0; - static uint64_t first_time = 0; + static uint64_t first_time = 0; const AVPacket *pkt; - -next_part: + + next_part: video_ctx = decoder->VideoCtx; - pkt = avpkt; // use copy + pkt = avpkt; // use copy got_frame = 0; - -// printf("decode packet %d\n",(GetusTicks()-first_time)/1000000); - ret1 = avcodec_send_packet(video_ctx, pkt); - -// first_time = GetusTicks(); - - if (ret1 >= 0) { - consumed = 1; - } - if (!CuvidTestSurfaces()) - usleep(1000); +// printf("decode packet %d\n",(GetusTicks()-first_time)/1000000); + ret1 = avcodec_send_packet(video_ctx, pkt); + +// first_time = GetusTicks(); + + if (ret1 >= 0) { + consumed = 1; + } + + if (!CuvidTestSurfaces()) + usleep(1000); //printf("send packet to decode %s\n",consumed?"ok":"Full"); - - if ((ret1 == AVERROR(EAGAIN) || ret1 == AVERROR_EOF || ret1 >= 0) && CuvidTestSurfaces()) { - ret = 0; - while ((ret >= 0) && CuvidTestSurfaces()) { // get frames until empty snd Surfaces avail. - frame = av_frame_alloc(); - ret = avcodec_receive_frame(video_ctx, frame); // get new frame - if (ret >= 0) { // one is avail. - got_frame = 1; - } - else { - got_frame = 0; - } -// printf("got %s packet from decoder\n",got_frame?"1":"no"); - if (got_frame) { // frame completed -#ifdef YADIF - if (decoder->filter ) { - if (decoder->filter == 1) { - if (init_filters(video_ctx,decoder->HwDecoder,frame) < 0) { - Fatal(_("video: Init of YADIF Filter failed\n")); - decoder->filter = 0; - } - else { - Debug(3,"Init YADIF ok\n"); - decoder->filter = 2; - } - } - if (frame->interlaced_frame && decoder->filter == 2 && (frame->height != 720)) { // broken ZDF sends Interlaced flag - ret = push_filters(video_ctx,decoder->HwDecoder,frame); -// av_frame_unref(frame); - continue; - } - } -#endif - //DisplayPts(video_ctx, frame); - VideoRenderFrame(decoder->HwDecoder, video_ctx, frame); -// av_frame_unref(frame); - } else { - av_frame_free(&frame); -// printf("codec: got no frame %d send %d\n",ret,ret1); - } - } - if (!CuvidTestSurfaces()) { - usleep(1000); - } - } else { -// consumed = 1; - } - - if (!consumed) { - goto next_part; // try again to stuff decoder - } + + if ((ret1 == AVERROR(EAGAIN) || ret1 == AVERROR_EOF || ret1 >= 0) && CuvidTestSurfaces()) { + ret = 0; + while ((ret >= 0) && CuvidTestSurfaces()) { // get frames until empty snd Surfaces avail. + frame = av_frame_alloc(); + ret = avcodec_receive_frame(video_ctx, frame); // get new frame + if (ret >= 0) { // one is avail. + got_frame = 1; + } else { + got_frame = 0; + } +// printf("got %s packet from decoder\n",got_frame?"1":"no"); + if (got_frame) { // frame completed +#ifdef YADIF + if (decoder->filter) { + if (decoder->filter == 1) { + if (init_filters(video_ctx, decoder->HwDecoder, frame) < 0) { + Fatal(_("video: Init of YADIF Filter failed\n")); + decoder->filter = 0; + } else { + Debug(3, "Init YADIF ok\n"); + decoder->filter = 2; + } + } + if (frame->interlaced_frame && decoder->filter == 2 && (frame->height != 720)) { // broken ZDF sends Interlaced flag + ret = push_filters(video_ctx, decoder->HwDecoder, frame); +// av_frame_unref(frame); + continue; + } + } +#endif + //DisplayPts(video_ctx, frame); + VideoRenderFrame(decoder->HwDecoder, video_ctx, frame); +// av_frame_unref(frame); + } else { + av_frame_free(&frame); +// printf("codec: got no frame %d send %d\n",ret,ret1); + } + } + if (!CuvidTestSurfaces()) { + usleep(1000); + } + } else { +// consumed = 1; + } + + if (!consumed) { + goto next_part; // try again to stuff decoder + } } #endif @@ -621,89 +607,89 @@ next_part: void CodecVideoFlushBuffers(VideoDecoder * decoder) { if (decoder->VideoCtx) { - avcodec_flush_buffers(decoder->VideoCtx); + avcodec_flush_buffers(decoder->VideoCtx); } } //---------------------------------------------------------------------------- -// Audio +// Audio //---------------------------------------------------------------------------- #if 0 /// -/// Audio decoder typedef. +/// Audio decoder typedef. /// typedef struct _audio_decoder_ AudioDecoder; #endif /// -/// Audio decoder structure. +/// Audio decoder structure. /// struct _audio_decoder_ { - AVCodec *AudioCodec; ///< audio codec - AVCodecContext *AudioCtx; ///< audio codec context + AVCodec *AudioCodec; ///< audio codec + AVCodecContext *AudioCtx; ///< audio codec context - char Passthrough; ///< current pass-through flags - int SampleRate; ///< current stream sample rate - int Channels; ///< current stream channels + char Passthrough; ///< current pass-through flags + int SampleRate; ///< current stream sample rate + int Channels; ///< current stream channels - int HwSampleRate; ///< hw sample rate - int HwChannels; ///< hw channels + int HwSampleRate; ///< hw sample rate + int HwChannels; ///< hw channels - AVFrame *Frame; ///< decoded audio frame buffer + AVFrame *Frame; ///< decoded audio frame buffer #if !defined(USE_SWRESAMPLE) && !defined(USE_AVRESAMPLE) - ReSampleContext *ReSample; ///< old resampling context + ReSampleContext *ReSample; ///< old resampling context #endif #ifdef USE_SWRESAMPLE #if LIBSWRESAMPLE_VERSION_INT < AV_VERSION_INT(0, 15, 100) - struct SwrContext *Resample; ///< ffmpeg software resample context + struct SwrContext *Resample; ///< ffmpeg software resample context #else - SwrContext *Resample; ///< ffmpeg software resample context + SwrContext *Resample; ///< ffmpeg software resample context #endif #endif #ifdef USE_AVRESAMPLE - AVAudioResampleContext *Resample; ///< libav software resample context + AVAudioResampleContext *Resample; ///< libav software resample context #endif - uint16_t Spdif[24576 / 2]; ///< SPDIF output buffer - int SpdifIndex; ///< index into SPDIF output buffer - int SpdifCount; ///< SPDIF repeat counter + uint16_t Spdif[24576 / 2]; ///< SPDIF output buffer + int SpdifIndex; ///< index into SPDIF output buffer + int SpdifCount; ///< SPDIF repeat counter - int64_t LastDelay; ///< last delay - struct timespec LastTime; ///< last time - int64_t LastPTS; ///< last PTS + int64_t LastDelay; ///< last delay + struct timespec LastTime; ///< last time + int64_t LastPTS; ///< last PTS - int Drift; ///< accumulated audio drift - int DriftCorr; ///< audio drift correction value - int DriftFrac; ///< audio drift fraction for ac3 + int Drift; ///< accumulated audio drift + int DriftCorr; ///< audio drift correction value + int DriftFrac; ///< audio drift fraction for ac3 #if !defined(USE_SWRESAMPLE) && !defined(USE_AVRESAMPLE) - struct AVResampleContext *AvResample; ///< second audio resample context -#define MAX_CHANNELS 8 ///< max number of channels supported - int16_t *Buffer[MAX_CHANNELS]; ///< deinterleave sample buffers - int BufferSize; ///< size of sample buffer - int16_t *Remain[MAX_CHANNELS]; ///< filter remaining samples - int RemainSize; ///< size of remain buffer - int RemainCount; ///< number of remaining samples + struct AVResampleContext *AvResample; ///< second audio resample context +#define MAX_CHANNELS 8 ///< max number of channels supported + int16_t *Buffer[MAX_CHANNELS]; ///< deinterleave sample buffers + int BufferSize; ///< size of sample buffer + int16_t *Remain[MAX_CHANNELS]; ///< filter remaining samples + int RemainSize; ///< size of remain buffer + int RemainCount; ///< number of remaining samples #endif }; /// -/// IEC Data type enumeration. +/// IEC Data type enumeration. /// enum IEC61937 { - IEC61937_AC3 = 0x01, ///< AC-3 data + IEC61937_AC3 = 0x01, ///< AC-3 data // FIXME: more data types - IEC61937_EAC3 = 0x15, ///< E-AC-3 data + IEC61937_EAC3 = 0x15, ///< E-AC-3 data }; #ifdef USE_AUDIO_DRIFT_CORRECTION -#define CORRECT_PCM 1 ///< do PCM audio-drift correction -#define CORRECT_AC3 2 ///< do AC-3 audio-drift correction -static char CodecAudioDrift; ///< flag: enable audio-drift correction +#define CORRECT_PCM 1 ///< do PCM audio-drift correction +#define CORRECT_AC3 2 ///< do AC-3 audio-drift correction +static char CodecAudioDrift; ///< flag: enable audio-drift correction #else static const int CodecAudioDrift = 0; #endif @@ -715,7 +701,7 @@ static char CodecPassthrough; #else static const int CodecPassthrough = 0; #endif -static char CodecDownmix; ///< enable AC-3 decoder downmix +static char CodecDownmix; ///< enable AC-3 decoder downmix /** ** Allocate a new audio decoder context. @@ -727,10 +713,10 @@ AudioDecoder *CodecAudioNewDecoder(void) AudioDecoder *audio_decoder; if (!(audio_decoder = calloc(1, sizeof(*audio_decoder)))) { - Fatal(_("codec: can't allocate audio decoder\n")); + Fatal(_("codec: can't allocate audio decoder\n")); } if (!(audio_decoder->Frame = av_frame_alloc())) { - Fatal(_("codec: can't allocate audio decoder frame buffer\n")); + Fatal(_("codec: can't allocate audio decoder frame buffer\n")); } return audio_decoder; @@ -743,7 +729,7 @@ AudioDecoder *CodecAudioNewDecoder(void) */ void CodecAudioDelDecoder(AudioDecoder * decoder) { - av_frame_free(&decoder->Frame); // callee does checks + av_frame_free(&decoder->Frame); // callee does checks free(decoder); } @@ -757,43 +743,40 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, int codec_id) { AVCodec *audio_codec; - Debug(3, "codec: using audio codec ID %#06x (%s)\n", codec_id, - avcodec_get_name(codec_id)); + Debug(3, "codec: using audio codec ID %#06x (%s)\n", codec_id, avcodec_get_name(codec_id)); if (!(audio_codec = avcodec_find_decoder(codec_id))) { // if (!(audio_codec = avcodec_find_decoder(codec_id))) { - Fatal(_("codec: codec ID %#06x not found\n"), codec_id); - // FIXME: errors aren't fatal + Fatal(_("codec: codec ID %#06x not found\n"), codec_id); + // FIXME: errors aren't fatal } audio_decoder->AudioCodec = audio_codec; if (!(audio_decoder->AudioCtx = avcodec_alloc_context3(audio_codec))) { - Fatal(_("codec: can't allocate audio codec context\n")); + Fatal(_("codec: can't allocate audio codec context\n")); } if (CodecDownmix) { - audio_decoder->AudioCtx->request_channel_layout = - AV_CH_LAYOUT_STEREO_DOWNMIX; + audio_decoder->AudioCtx->request_channel_layout = AV_CH_LAYOUT_STEREO_DOWNMIX; } pthread_mutex_lock(&CodecLockMutex); // open codec if (1) { - AVDictionary *av_dict; + AVDictionary *av_dict; - av_dict = NULL; - // FIXME: import settings - //av_dict_set(&av_dict, "dmix_mode", "0", 0); - //av_dict_set(&av_dict, "ltrt_cmixlev", "1.414", 0); - //av_dict_set(&av_dict, "loro_cmixlev", "1.414", 0); - if (avcodec_open2(audio_decoder->AudioCtx, audio_codec, &av_dict) < 0) { - pthread_mutex_unlock(&CodecLockMutex); - Fatal(_("codec: can't open audio codec\n")); - } - av_dict_free(&av_dict); + av_dict = NULL; + // FIXME: import settings + //av_dict_set(&av_dict, "dmix_mode", "0", 0); + //av_dict_set(&av_dict, "ltrt_cmixlev", "1.414", 0); + //av_dict_set(&av_dict, "loro_cmixlev", "1.414", 0); + if (avcodec_open2(audio_decoder->AudioCtx, audio_codec, &av_dict) < 0) { + pthread_mutex_unlock(&CodecLockMutex); + Fatal(_("codec: can't open audio codec\n")); + } + av_dict_free(&av_dict); } pthread_mutex_unlock(&CodecLockMutex); Debug(3, "codec: audio '%s'\n", audio_decoder->AudioCodec->long_name); - audio_decoder->SampleRate = 0; audio_decoder->Channels = 0; audio_decoder->HwSampleRate = 0; @@ -811,40 +794,40 @@ void CodecAudioClose(AudioDecoder * audio_decoder) // FIXME: output any buffered data #if !defined(USE_SWRESAMPLE) && !defined(USE_AVRESAMPLE) if (audio_decoder->AvResample) { - int ch; + int ch; - av_resample_close(audio_decoder->AvResample); - audio_decoder->AvResample = NULL; - audio_decoder->RemainCount = 0; - audio_decoder->BufferSize = 0; - audio_decoder->RemainSize = 0; - for (ch = 0; ch < MAX_CHANNELS; ++ch) { - free(audio_decoder->Buffer[ch]); - audio_decoder->Buffer[ch] = NULL; - free(audio_decoder->Remain[ch]); - audio_decoder->Remain[ch] = NULL; - } + av_resample_close(audio_decoder->AvResample); + audio_decoder->AvResample = NULL; + audio_decoder->RemainCount = 0; + audio_decoder->BufferSize = 0; + audio_decoder->RemainSize = 0; + for (ch = 0; ch < MAX_CHANNELS; ++ch) { + free(audio_decoder->Buffer[ch]); + audio_decoder->Buffer[ch] = NULL; + free(audio_decoder->Remain[ch]); + audio_decoder->Remain[ch] = NULL; + } } if (audio_decoder->ReSample) { - audio_resample_close(audio_decoder->ReSample); - audio_decoder->ReSample = NULL; + audio_resample_close(audio_decoder->ReSample); + audio_decoder->ReSample = NULL; } #endif #ifdef USE_SWRESAMPLE if (audio_decoder->Resample) { - swr_free(&audio_decoder->Resample); + swr_free(&audio_decoder->Resample); } #endif #ifdef USE_AVRESAMPLE if (audio_decoder->Resample) { - avresample_free(&audio_decoder->Resample); + avresample_free(&audio_decoder->Resample); } #endif if (audio_decoder->AudioCtx) { - pthread_mutex_lock(&CodecLockMutex); - avcodec_close(audio_decoder->AudioCtx); - av_freep(&audio_decoder->AudioCtx); - pthread_mutex_unlock(&CodecLockMutex); + pthread_mutex_lock(&CodecLockMutex); + avcodec_close(audio_decoder->AudioCtx); + av_freep(&audio_decoder->AudioCtx); + pthread_mutex_unlock(&CodecLockMutex); } } @@ -882,8 +865,8 @@ void CodecSetAudioPassthrough(int mask) void CodecSetAudioDownmix(int onoff) { if (onoff == -1) { - CodecDownmix ^= 1; - return; + CodecDownmix ^= 1; + return; } CodecDownmix = onoff; } @@ -908,43 +891,43 @@ static void CodecReorderAudioFrame(int16_t * buf, int size, int channels) int lfe; switch (channels) { - case 5: - size /= 2; - for (i = 0; i < size; i += 5) { - c = buf[i + 2]; - ls = buf[i + 3]; - rs = buf[i + 4]; - buf[i + 2] = ls; - buf[i + 3] = rs; - buf[i + 4] = c; - } - break; - case 6: - size /= 2; - for (i = 0; i < size; i += 6) { - c = buf[i + 2]; - lfe = buf[i + 3]; - ls = buf[i + 4]; - rs = buf[i + 5]; - buf[i + 2] = ls; - buf[i + 3] = rs; - buf[i + 4] = c; - buf[i + 5] = lfe; - } - break; - case 8: - size /= 2; - for (i = 0; i < size; i += 8) { - c = buf[i + 2]; - lfe = buf[i + 3]; - ls = buf[i + 4]; - rs = buf[i + 5]; - buf[i + 2] = ls; - buf[i + 3] = rs; - buf[i + 4] = c; - buf[i + 5] = lfe; - } - break; + case 5: + size /= 2; + for (i = 0; i < size; i += 5) { + c = buf[i + 2]; + ls = buf[i + 3]; + rs = buf[i + 4]; + buf[i + 2] = ls; + buf[i + 3] = rs; + buf[i + 4] = c; + } + break; + case 6: + size /= 2; + for (i = 0; i < size; i += 6) { + c = buf[i + 2]; + lfe = buf[i + 3]; + ls = buf[i + 4]; + rs = buf[i + 5]; + buf[i + 2] = ls; + buf[i + 3] = rs; + buf[i + 4] = c; + buf[i + 5] = lfe; + } + break; + case 8: + size /= 2; + for (i = 0; i < size; i += 8) { + c = buf[i + 2]; + lfe = buf[i + 3]; + ls = buf[i + 4]; + rs = buf[i + 5]; + buf[i + 2] = ls; + buf[i + 3] = rs; + buf[i + 4] = c; + buf[i + 5] = lfe; + } + break; } } @@ -954,20 +937,17 @@ static void CodecReorderAudioFrame(int16_t * buf, int size, int channels) ** @param audio_decoder audio decoder data ** @param[out] passthrough pass-through output */ -static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, - int *passthrough) +static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, int *passthrough) { const AVCodecContext *audio_ctx; int err; audio_ctx = audio_decoder->AudioCtx; Debug(3, "codec/audio: format change %s %dHz *%d channels%s%s%s%s%s\n", - av_get_sample_fmt_name(audio_ctx->sample_fmt), audio_ctx->sample_rate, - audio_ctx->channels, CodecPassthrough & CodecPCM ? " PCM" : "", - CodecPassthrough & CodecMPA ? " MPA" : "", - CodecPassthrough & CodecAC3 ? " AC-3" : "", - CodecPassthrough & CodecEAC3 ? " E-AC-3" : "", - CodecPassthrough ? " pass-through" : ""); + av_get_sample_fmt_name(audio_ctx->sample_fmt), audio_ctx->sample_rate, audio_ctx->channels, + CodecPassthrough & CodecPCM ? " PCM" : "", CodecPassthrough & CodecMPA ? " MPA" : "", + CodecPassthrough & CodecAC3 ? " AC-3" : "", CodecPassthrough & CodecEAC3 ? " E-AC-3" : "", + CodecPassthrough ? " pass-through" : ""); *passthrough = 0; audio_decoder->SampleRate = audio_ctx->sample_rate; @@ -978,41 +958,35 @@ static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, // SPDIF/HDMI pass-through if ((CodecPassthrough & CodecAC3 && audio_ctx->codec_id == AV_CODEC_ID_AC3) - || (CodecPassthrough & CodecEAC3 - && audio_ctx->codec_id == AV_CODEC_ID_EAC3)) { - if (audio_ctx->codec_id == AV_CODEC_ID_EAC3) { - // E-AC-3 over HDMI some receivers need HBR - audio_decoder->HwSampleRate *= 4; - } - audio_decoder->HwChannels = 2; - audio_decoder->SpdifIndex = 0; // reset buffer - audio_decoder->SpdifCount = 0; - *passthrough = 1; + || (CodecPassthrough & CodecEAC3 && audio_ctx->codec_id == AV_CODEC_ID_EAC3)) { + if (audio_ctx->codec_id == AV_CODEC_ID_EAC3) { + // E-AC-3 over HDMI some receivers need HBR + audio_decoder->HwSampleRate *= 4; + } + audio_decoder->HwChannels = 2; + audio_decoder->SpdifIndex = 0; // reset buffer + audio_decoder->SpdifCount = 0; + *passthrough = 1; } // channels/sample-rate not support? - if ((err = - AudioSetup(&audio_decoder->HwSampleRate, - &audio_decoder->HwChannels, *passthrough))) { + if ((err = AudioSetup(&audio_decoder->HwSampleRate, &audio_decoder->HwChannels, *passthrough))) { - // try E-AC-3 none HBR - audio_decoder->HwSampleRate /= 4; - if (audio_ctx->codec_id != AV_CODEC_ID_EAC3 - || (err = - AudioSetup(&audio_decoder->HwSampleRate, - &audio_decoder->HwChannels, *passthrough))) { + // try E-AC-3 none HBR + audio_decoder->HwSampleRate /= 4; + if (audio_ctx->codec_id != AV_CODEC_ID_EAC3 + || (err = AudioSetup(&audio_decoder->HwSampleRate, &audio_decoder->HwChannels, *passthrough))) { - Debug(3, "codec/audio: audio setup error\n"); - // FIXME: handle errors - audio_decoder->HwChannels = 0; - audio_decoder->HwSampleRate = 0; - return err; - } + Debug(3, "codec/audio: audio setup error\n"); + // FIXME: handle errors + audio_decoder->HwChannels = 0; + audio_decoder->HwSampleRate = 0; + return err; + } } - Debug(3, "codec/audio: resample %s %dHz *%d -> %s %dHz *%d\n", - av_get_sample_fmt_name(audio_ctx->sample_fmt), audio_ctx->sample_rate, - audio_ctx->channels, av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), - audio_decoder->HwSampleRate, audio_decoder->HwChannels); + Debug(3, "codec/audio: resample %s %dHz *%d -> %s %dHz *%d\n", av_get_sample_fmt_name(audio_ctx->sample_fmt), + audio_ctx->sample_rate, audio_ctx->channels, av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), + audio_decoder->HwSampleRate, audio_decoder->HwChannels); return 0; } @@ -1023,8 +997,7 @@ static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, ** @param audio_decoder audio decoder data ** @param avpkt undecoded audio packet */ -static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, - const AVPacket * avpkt) +static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPacket * avpkt) { #ifdef USE_PASSTHROUGH const AVCodecContext *audio_ctx; @@ -1032,104 +1005,100 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, audio_ctx = audio_decoder->AudioCtx; // SPDIF/HDMI passthrough if (CodecPassthrough & CodecAC3 && audio_ctx->codec_id == AV_CODEC_ID_AC3) { - uint16_t *spdif; - int spdif_sz; + uint16_t *spdif; + int spdif_sz; - spdif = audio_decoder->Spdif; - spdif_sz = 6144; + spdif = audio_decoder->Spdif; + spdif_sz = 6144; #ifdef USE_AC3_DRIFT_CORRECTION - // FIXME: this works with some TVs/AVReceivers - // FIXME: write burst size drift correction, which should work with all - if (CodecAudioDrift & CORRECT_AC3) { - int x; + // FIXME: this works with some TVs/AVReceivers + // FIXME: write burst size drift correction, which should work with all + if (CodecAudioDrift & CORRECT_AC3) { + int x; - x = (audio_decoder->DriftFrac + - (audio_decoder->DriftCorr * spdif_sz)) / (10 * - audio_decoder->HwSampleRate * 100); - audio_decoder->DriftFrac = - (audio_decoder->DriftFrac + - (audio_decoder->DriftCorr * spdif_sz)) % (10 * - audio_decoder->HwSampleRate * 100); - // round to word border - x *= audio_decoder->HwChannels * 4; - if (x < -64) { // limit correction - x = -64; - } else if (x > 64) { - x = 64; - } - spdif_sz += x; - } + x = (audio_decoder->DriftFrac + + (audio_decoder->DriftCorr * spdif_sz)) / (10 * audio_decoder->HwSampleRate * 100); + audio_decoder->DriftFrac = + (audio_decoder->DriftFrac + + (audio_decoder->DriftCorr * spdif_sz)) % (10 * audio_decoder->HwSampleRate * 100); + // round to word border + x *= audio_decoder->HwChannels * 4; + if (x < -64) { // limit correction + x = -64; + } else if (x > 64) { + x = 64; + } + spdif_sz += x; + } #endif - // build SPDIF header and append A52 audio to it - // avpkt is the original data - if (spdif_sz < avpkt->size + 8) { - Error(_("codec/audio: decoded data smaller than encoded\n")); - return -1; - } - spdif[0] = htole16(0xF872); // iec 61937 sync word - spdif[1] = htole16(0x4E1F); - spdif[2] = htole16(IEC61937_AC3 | (avpkt->data[5] & 0x07) << 8); - spdif[3] = htole16(avpkt->size * 8); - // copy original data for output - // FIXME: not 100% sure, if endian is correct on not intel hardware - swab(avpkt->data, spdif + 4, avpkt->size); - // FIXME: don't need to clear always - memset(spdif + 4 + avpkt->size / 2, 0, spdif_sz - 8 - avpkt->size); - // don't play with the ac-3 samples - AudioEnqueue(spdif, spdif_sz); - return 1; + // build SPDIF header and append A52 audio to it + // avpkt is the original data + if (spdif_sz < avpkt->size + 8) { + Error(_("codec/audio: decoded data smaller than encoded\n")); + return -1; + } + spdif[0] = htole16(0xF872); // iec 61937 sync word + spdif[1] = htole16(0x4E1F); + spdif[2] = htole16(IEC61937_AC3 | (avpkt->data[5] & 0x07) << 8); + spdif[3] = htole16(avpkt->size * 8); + // copy original data for output + // FIXME: not 100% sure, if endian is correct on not intel hardware + swab(avpkt->data, spdif + 4, avpkt->size); + // FIXME: don't need to clear always + memset(spdif + 4 + avpkt->size / 2, 0, spdif_sz - 8 - avpkt->size); + // don't play with the ac-3 samples + AudioEnqueue(spdif, spdif_sz); + return 1; } - if (CodecPassthrough & CodecEAC3 - && audio_ctx->codec_id == AV_CODEC_ID_EAC3) { - uint16_t *spdif; - int spdif_sz; - int repeat; + if (CodecPassthrough & CodecEAC3 && audio_ctx->codec_id == AV_CODEC_ID_EAC3) { + uint16_t *spdif; + int spdif_sz; + int repeat; - // build SPDIF header and append A52 audio to it - // avpkt is the original data - spdif = audio_decoder->Spdif; - spdif_sz = 24576; // 4 * 6144 - if (audio_decoder->HwSampleRate == 48000) { - spdif_sz = 6144; - } - if (spdif_sz < audio_decoder->SpdifIndex + avpkt->size + 8) { - Error(_("codec/audio: decoded data smaller than encoded\n")); - return -1; - } - // check if we must pack multiple packets - repeat = 1; - if ((avpkt->data[4] & 0xc0) != 0xc0) { // fscod - static const uint8_t eac3_repeat[4] = { 6, 3, 2, 1 }; + // build SPDIF header and append A52 audio to it + // avpkt is the original data + spdif = audio_decoder->Spdif; + spdif_sz = 24576; // 4 * 6144 + if (audio_decoder->HwSampleRate == 48000) { + spdif_sz = 6144; + } + if (spdif_sz < audio_decoder->SpdifIndex + avpkt->size + 8) { + Error(_("codec/audio: decoded data smaller than encoded\n")); + return -1; + } + // check if we must pack multiple packets + repeat = 1; + if ((avpkt->data[4] & 0xc0) != 0xc0) { // fscod + static const uint8_t eac3_repeat[4] = { 6, 3, 2, 1 }; - // fscod2 - repeat = eac3_repeat[(avpkt->data[4] & 0x30) >> 4]; - } - // fprintf(stderr, "repeat %d %d\n", repeat, avpkt->size); + // fscod2 + repeat = eac3_repeat[(avpkt->data[4] & 0x30) >> 4]; + } + // fprintf(stderr, "repeat %d %d\n", repeat, avpkt->size); - // copy original data for output - // pack upto repeat EAC-3 pakets into one IEC 61937 burst - // FIXME: not 100% sure, if endian is correct on not intel hardware - swab(avpkt->data, spdif + 4 + audio_decoder->SpdifIndex, avpkt->size); - audio_decoder->SpdifIndex += avpkt->size; - if (++audio_decoder->SpdifCount < repeat) { - return 1; - } + // copy original data for output + // pack upto repeat EAC-3 pakets into one IEC 61937 burst + // FIXME: not 100% sure, if endian is correct on not intel hardware + swab(avpkt->data, spdif + 4 + audio_decoder->SpdifIndex, avpkt->size); + audio_decoder->SpdifIndex += avpkt->size; + if (++audio_decoder->SpdifCount < repeat) { + return 1; + } - spdif[0] = htole16(0xF872); // iec 61937 sync word - spdif[1] = htole16(0x4E1F); - spdif[2] = htole16(IEC61937_EAC3); - spdif[3] = htole16(audio_decoder->SpdifIndex * 8); - memset(spdif + 4 + audio_decoder->SpdifIndex / 2, 0, - spdif_sz - 8 - audio_decoder->SpdifIndex); + spdif[0] = htole16(0xF872); // iec 61937 sync word + spdif[1] = htole16(0x4E1F); + spdif[2] = htole16(IEC61937_EAC3); + spdif[3] = htole16(audio_decoder->SpdifIndex * 8); + memset(spdif + 4 + audio_decoder->SpdifIndex / 2, 0, spdif_sz - 8 - audio_decoder->SpdifIndex); - // don't play with the eac-3 samples - AudioEnqueue(spdif, spdif_sz); + // don't play with the eac-3 samples + AudioEnqueue(spdif, spdif_sz); - audio_decoder->SpdifIndex = 0; - audio_decoder->SpdifCount = 0; - return 1; + audio_decoder->SpdifIndex = 0; + audio_decoder->SpdifCount = 0; + return 1; } #endif return 0; @@ -1156,32 +1125,28 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) delay = AudioGetDelay(); if (!delay) { - return; + return; } clock_gettime(CLOCK_MONOTONIC, &nowtime); if (!audio_decoder->LastDelay) { - audio_decoder->LastTime = nowtime; - audio_decoder->LastPTS = pts; - audio_decoder->LastDelay = delay; - audio_decoder->Drift = 0; - audio_decoder->DriftFrac = 0; - Debug(3, "codec/audio: inital drift delay %" PRId64 "ms\n", - delay / 90); - return; + audio_decoder->LastTime = nowtime; + audio_decoder->LastPTS = pts; + audio_decoder->LastDelay = delay; + audio_decoder->Drift = 0; + audio_decoder->DriftFrac = 0; + Debug(3, "codec/audio: inital drift delay %" PRId64 "ms\n", delay / 90); + return; } // collect over some time pts_diff = pts - audio_decoder->LastPTS; if (pts_diff < 10 * 1000 * 90) { - return; + return; } tim_diff = (nowtime.tv_sec - audio_decoder->LastTime.tv_sec) - * 1000 * 1000 * 1000 + (nowtime.tv_nsec - - audio_decoder->LastTime.tv_nsec); + * 1000 * 1000 * 1000 + (nowtime.tv_nsec - audio_decoder->LastTime.tv_nsec); - drift = - (tim_diff * 90) / (1000 * 1000) - pts_diff + delay - - audio_decoder->LastDelay; + drift = (tim_diff * 90) / (1000 * 1000) - pts_diff + delay - audio_decoder->LastDelay; // adjust rounding error nowtime.tv_nsec -= nowtime.tv_nsec % (1000 * 1000 / 90); @@ -1190,59 +1155,54 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) audio_decoder->LastDelay = delay; if (0) { - Debug(3, - "codec/audio: interval P:%5" PRId64 "ms T:%5" PRId64 "ms D:%4" - PRId64 "ms %f %d\n", pts_diff / 90, tim_diff / (1000 * 1000), - delay / 90, drift / 90.0, audio_decoder->DriftCorr); + Debug(3, "codec/audio: interval P:%5" PRId64 "ms T:%5" PRId64 "ms D:%4" PRId64 "ms %f %d\n", pts_diff / 90, + tim_diff / (1000 * 1000), delay / 90, drift / 90.0, audio_decoder->DriftCorr); } // underruns and av_resample have the same time :((( if (abs(drift) > 10 * 90) { - // drift too big, pts changed? - Debug(3, "codec/audio: drift(%6d) %3dms reset\n", - audio_decoder->DriftCorr, drift / 90); - audio_decoder->LastDelay = 0; + // drift too big, pts changed? + Debug(3, "codec/audio: drift(%6d) %3dms reset\n", audio_decoder->DriftCorr, drift / 90); + audio_decoder->LastDelay = 0; #ifdef DEBUG - corr = 0; // keep gcc happy + corr = 0; // keep gcc happy #endif } else { - drift += audio_decoder->Drift; - audio_decoder->Drift = drift; - corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000); - // SPDIF/HDMI passthrough - if ((CodecAudioDrift & CORRECT_AC3) && (!(CodecPassthrough & CodecAC3) - || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3) - && (!(CodecPassthrough & CodecEAC3) - || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) { - audio_decoder->DriftCorr = -corr; - } + drift += audio_decoder->Drift; + audio_decoder->Drift = drift; + corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000); + // SPDIF/HDMI passthrough + if ((CodecAudioDrift & CORRECT_AC3) && (!(CodecPassthrough & CodecAC3) + || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3) + && (!(CodecPassthrough & CodecEAC3) + || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) { + audio_decoder->DriftCorr = -corr; + } - if (audio_decoder->DriftCorr < -20000) { // limit correction - audio_decoder->DriftCorr = -20000; - } else if (audio_decoder->DriftCorr > 20000) { - audio_decoder->DriftCorr = 20000; - } + if (audio_decoder->DriftCorr < -20000) { // limit correction + audio_decoder->DriftCorr = -20000; + } else if (audio_decoder->DriftCorr > 20000) { + audio_decoder->DriftCorr = 20000; + } } // FIXME: this works with libav 0.8, and only with >10ms with ffmpeg 0.10 if (audio_decoder->AvResample && audio_decoder->DriftCorr) { - int distance; + int distance; - // try workaround for buggy ffmpeg 0.10 - if (abs(audio_decoder->DriftCorr) < 2000) { - distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); - } else { - distance = (pts_diff * audio_decoder->HwSampleRate) / (90 * 1000); - } - av_resample_compensate(audio_decoder->AvResample, - audio_decoder->DriftCorr / 10, distance); + // try workaround for buggy ffmpeg 0.10 + if (abs(audio_decoder->DriftCorr) < 2000) { + distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); + } else { + distance = (pts_diff * audio_decoder->HwSampleRate) / (90 * 1000); + } + av_resample_compensate(audio_decoder->AvResample, audio_decoder->DriftCorr / 10, distance); } if (1) { - static int c; + static int c; - if (!(c++ % 10)) { - Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", - audio_decoder->DriftCorr, drift * 1000 / 90, corr); - } + if (!(c++ % 10)) { + Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", audio_decoder->DriftCorr, drift * 1000 / 90, corr); + } } } @@ -1260,64 +1220,59 @@ static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder) int err; if (audio_decoder->ReSample) { - audio_resample_close(audio_decoder->ReSample); - audio_decoder->ReSample = NULL; + audio_resample_close(audio_decoder->ReSample); + audio_decoder->ReSample = NULL; } if (audio_decoder->AvResample) { - av_resample_close(audio_decoder->AvResample); - audio_decoder->AvResample = NULL; - audio_decoder->RemainCount = 0; + av_resample_close(audio_decoder->AvResample); + audio_decoder->AvResample = NULL; + audio_decoder->RemainCount = 0; } audio_ctx = audio_decoder->AudioCtx; if ((err = CodecAudioUpdateHelper(audio_decoder, &passthrough))) { - Debug(3, "codec/audio: resample %dHz *%d -> %dHz *%d err %d\n", - audio_ctx->sample_rate, audio_ctx->channels, - audio_decoder->HwSampleRate, audio_decoder->HwChannels,err); + Debug(3, "codec/audio: resample %dHz *%d -> %dHz *%d err %d\n", audio_ctx->sample_rate, audio_ctx->channels, + audio_decoder->HwSampleRate, audio_decoder->HwChannels, err); - if (err == 1) { - audio_decoder->ReSample = - av_audio_resample_init(audio_decoder->HwChannels, - audio_ctx->channels, audio_decoder->HwSampleRate, - audio_ctx->sample_rate, audio_ctx->sample_fmt, - audio_ctx->sample_fmt, 16, 10, 0, 0.8); - // libav-0.8_pre didn't support 6 -> 2 channels - if (!audio_decoder->ReSample) { - Error(_("codec/audio: resample setup error\n")); - audio_decoder->HwChannels = 0; - audio_decoder->HwSampleRate = 0; - } - return; - } - Debug(3, "codec/audio: audio setup error\n"); - // FIXME: handle errors - audio_decoder->HwChannels = 0; - audio_decoder->HwSampleRate = 0; - return; + if (err == 1) { + audio_decoder->ReSample = + av_audio_resample_init(audio_decoder->HwChannels, audio_ctx->channels, audio_decoder->HwSampleRate, + audio_ctx->sample_rate, audio_ctx->sample_fmt, audio_ctx->sample_fmt, 16, 10, 0, 0.8); + // libav-0.8_pre didn't support 6 -> 2 channels + if (!audio_decoder->ReSample) { + Error(_("codec/audio: resample setup error\n")); + audio_decoder->HwChannels = 0; + audio_decoder->HwSampleRate = 0; + } + return; + } + Debug(3, "codec/audio: audio setup error\n"); + // FIXME: handle errors + audio_decoder->HwChannels = 0; + audio_decoder->HwSampleRate = 0; + return; } - if (passthrough) { // pass-through no conversion allowed - return; + if (passthrough) { // pass-through no conversion allowed + return; } // prepare audio drift resample #ifdef USE_AUDIO_DRIFT_CORRECTION if (CodecAudioDrift & CORRECT_PCM) { - if (audio_decoder->AvResample) { - Error(_("codec/audio: overwrite resample\n")); - } - audio_decoder->AvResample = - av_resample_init(audio_decoder->HwSampleRate, - audio_decoder->HwSampleRate, 16, 10, 0, 0.8); - if (!audio_decoder->AvResample) { - Error(_("codec/audio: AvResample setup error\n")); - } else { - // reset drift to some default value - audio_decoder->DriftCorr /= 2; - audio_decoder->DriftFrac = 0; - av_resample_compensate(audio_decoder->AvResample, - audio_decoder->DriftCorr / 10, - 10 * audio_decoder->HwSampleRate); - } + if (audio_decoder->AvResample) { + Error(_("codec/audio: overwrite resample\n")); + } + audio_decoder->AvResample = + av_resample_init(audio_decoder->HwSampleRate, audio_decoder->HwSampleRate, 16, 10, 0, 0.8); + if (!audio_decoder->AvResample) { + Error(_("codec/audio: AvResample setup error\n")); + } else { + // reset drift to some default value + audio_decoder->DriftCorr /= 2; + audio_decoder->DriftFrac = 0; + av_resample_compensate(audio_decoder->AvResample, audio_decoder->DriftCorr / 10, + 10 * audio_decoder->HwSampleRate); + } } #endif } @@ -1333,89 +1288,80 @@ void CodecAudioEnqueue(AudioDecoder * audio_decoder, int16_t * data, int count) { #ifdef USE_AUDIO_DRIFT_CORRECTION if ((CodecAudioDrift & CORRECT_PCM) && audio_decoder->AvResample) { - int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + - AV_INPUT_BUFFER_PADDING_SIZE] __attribute__ ((aligned(16))); - int16_t buftmp[MAX_CHANNELS][(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4]; - int consumed; - int i; - int n; - int ch; - int bytes_n; + int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + AV_INPUT_BUFFER_PADDING_SIZE] + __attribute__((aligned(16))); + int16_t buftmp[MAX_CHANNELS][(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4]; + int consumed; + int i; + int n; + int ch; + int bytes_n; - bytes_n = count / audio_decoder->HwChannels; - // resize sample buffer, if needed - if (audio_decoder->RemainCount + bytes_n > audio_decoder->BufferSize) { - audio_decoder->BufferSize = audio_decoder->RemainCount + bytes_n; - for (ch = 0; ch < MAX_CHANNELS; ++ch) { - audio_decoder->Buffer[ch] = - realloc(audio_decoder->Buffer[ch], - audio_decoder->BufferSize); - } - } - // copy remaining bytes into sample buffer - for (ch = 0; ch < audio_decoder->HwChannels; ++ch) { - memcpy(audio_decoder->Buffer[ch], audio_decoder->Remain[ch], - audio_decoder->RemainCount); - } - // deinterleave samples into sample buffer - for (i = 0; i < bytes_n / 2; i++) { - for (ch = 0; ch < audio_decoder->HwChannels; ++ch) { - audio_decoder->Buffer[ch][audio_decoder->RemainCount / 2 + i] - = data[i * audio_decoder->HwChannels + ch]; - } - } + bytes_n = count / audio_decoder->HwChannels; + // resize sample buffer, if needed + if (audio_decoder->RemainCount + bytes_n > audio_decoder->BufferSize) { + audio_decoder->BufferSize = audio_decoder->RemainCount + bytes_n; + for (ch = 0; ch < MAX_CHANNELS; ++ch) { + audio_decoder->Buffer[ch] = realloc(audio_decoder->Buffer[ch], audio_decoder->BufferSize); + } + } + // copy remaining bytes into sample buffer + for (ch = 0; ch < audio_decoder->HwChannels; ++ch) { + memcpy(audio_decoder->Buffer[ch], audio_decoder->Remain[ch], audio_decoder->RemainCount); + } + // deinterleave samples into sample buffer + for (i = 0; i < bytes_n / 2; i++) { + for (ch = 0; ch < audio_decoder->HwChannels; ++ch) { + audio_decoder->Buffer[ch][audio_decoder->RemainCount / 2 + i] + = data[i * audio_decoder->HwChannels + ch]; + } + } - bytes_n += audio_decoder->RemainSize; - n = 0; // keep gcc lucky - // resample the sample buffer into tmp buffer - for (ch = 0; ch < audio_decoder->HwChannels; ++ch) { - n = av_resample(audio_decoder->AvResample, buftmp[ch], - audio_decoder->Buffer[ch], &consumed, bytes_n / 2, - sizeof(buftmp[ch]) / 2, ch == audio_decoder->HwChannels - 1); - // fixme remaining channels - if (bytes_n - consumed * 2 > audio_decoder->RemainSize) { - audio_decoder->RemainSize = bytes_n - consumed * 2; - } - audio_decoder->Remain[ch] = - realloc(audio_decoder->Remain[ch], audio_decoder->RemainSize); - memcpy(audio_decoder->Remain[ch], - audio_decoder->Buffer[ch] + consumed, - audio_decoder->RemainSize); - audio_decoder->RemainCount = audio_decoder->RemainSize; - } + bytes_n += audio_decoder->RemainSize; + n = 0; // keep gcc lucky + // resample the sample buffer into tmp buffer + for (ch = 0; ch < audio_decoder->HwChannels; ++ch) { + n = av_resample(audio_decoder->AvResample, buftmp[ch], audio_decoder->Buffer[ch], &consumed, bytes_n / 2, + sizeof(buftmp[ch]) / 2, ch == audio_decoder->HwChannels - 1); + // fixme remaining channels + if (bytes_n - consumed * 2 > audio_decoder->RemainSize) { + audio_decoder->RemainSize = bytes_n - consumed * 2; + } + audio_decoder->Remain[ch] = realloc(audio_decoder->Remain[ch], audio_decoder->RemainSize); + memcpy(audio_decoder->Remain[ch], audio_decoder->Buffer[ch] + consumed, audio_decoder->RemainSize); + audio_decoder->RemainCount = audio_decoder->RemainSize; + } - // interleave samples from sample buffer - for (i = 0; i < n; i++) { - for (ch = 0; ch < audio_decoder->HwChannels; ++ch) { - buf[i * audio_decoder->HwChannels + ch] = buftmp[ch][i]; - } - } - n *= 2; + // interleave samples from sample buffer + for (i = 0; i < n; i++) { + for (ch = 0; ch < audio_decoder->HwChannels; ++ch) { + buf[i * audio_decoder->HwChannels + ch] = buftmp[ch][i]; + } + } + n *= 2; - n *= audio_decoder->HwChannels; - if (!(audio_decoder->Passthrough & CodecPCM)) { - CodecReorderAudioFrame(buf, n, audio_decoder->HwChannels); - } - AudioEnqueue(buf, n); - return; + n *= audio_decoder->HwChannels; + if (!(audio_decoder->Passthrough & CodecPCM)) { + CodecReorderAudioFrame(buf, n, audio_decoder->HwChannels); + } + AudioEnqueue(buf, n); + return; } #endif if (!(audio_decoder->Passthrough & CodecPCM)) { - CodecReorderAudioFrame(data, count, audio_decoder->HwChannels); + CodecReorderAudioFrame(data, count, audio_decoder->HwChannels); } AudioEnqueue(data, count); } -int myavcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, - int *frame_size_ptr, - AVPacket *avpkt) +int myavcodec_decode_audio3(AVCodecContext * avctx, int16_t * samples, int *frame_size_ptr, AVPacket * avpkt) { AVFrame *frame = av_frame_alloc(); int ret, got_frame = 0; - + if (!frame) return AVERROR(ENOMEM); -#if 0 +#if 0 ret = avcodec_decode_audio4(avctx, frame, &got_frame, avpkt); #else // SUGGESTION @@ -1424,7 +1370,7 @@ int myavcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, // into separate routines or separate threads. // Also now that it always consumes a whole buffer some code // in the caller may be able to be optimized. - ret = avcodec_receive_frame(avctx,frame); + ret = avcodec_receive_frame(avctx, frame); if (ret == 0) got_frame = 1; if (ret == AVERROR(EAGAIN)) @@ -1433,38 +1379,36 @@ int myavcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, ret = avcodec_send_packet(avctx, avpkt); if (ret == AVERROR(EAGAIN)) ret = 0; - else if (ret < 0) - { -// Debug(3, "codec/audio: audio decode error: %1 (%2)\n",av_make_error_string(error, sizeof(error), ret),got_frame); + else if (ret < 0) { +// Debug(3, "codec/audio: audio decode error: %1 (%2)\n",av_make_error_string(error, sizeof(error), ret),got_frame); return ret; - } - else + } else ret = avpkt->size; #endif - if (ret >= 0 && got_frame) { - int i,ch; - int planar = av_sample_fmt_is_planar(avctx->sample_fmt); - int data_size = av_get_bytes_per_sample(avctx->sample_fmt); - if (data_size < 0) { - /* This should not occur, checking just for paranoia */ - fprintf(stderr, "Failed to calculate data size\n"); - exit(1); - } - for (i=0; inb_samples; i++) { - for (ch=0; ch < avctx->channels; ch++) { - memcpy(samples,frame->extended_data[ch]+data_size*i,data_size); - samples = (char *) samples + data_size; - } - } - //Debug(3,"data_size %d nb_samples %d sample_fmt %d channels %d planar %d\n",data_size,frame->nb_samples,avctx->sample_fmt,avctx->channels,planar); - *frame_size_ptr = data_size * avctx->channels * frame->nb_samples; + if (ret >= 0 && got_frame) { + int i, ch; + int planar = av_sample_fmt_is_planar(avctx->sample_fmt); + int data_size = av_get_bytes_per_sample(avctx->sample_fmt); + + if (data_size < 0) { + /* This should not occur, checking just for paranoia */ + fprintf(stderr, "Failed to calculate data size\n"); + exit(1); + } + for (i = 0; i < frame->nb_samples; i++) { + for (ch = 0; ch < avctx->channels; ch++) { + memcpy(samples, frame->extended_data[ch] + data_size * i, data_size); + samples = (char *)samples + data_size; + } + } + //Debug(3,"data_size %d nb_samples %d sample_fmt %d channels %d planar %d\n",data_size,frame->nb_samples,avctx->sample_fmt,avctx->channels,planar); + *frame_size_ptr = data_size * avctx->channels * frame->nb_samples; } else { *frame_size_ptr = 0; } av_frame_free(&frame); return ret; - } - +} /** ** Decode an audio packet. @@ -1476,8 +1420,7 @@ int myavcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, */ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) { - int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + - AV_INPUT_BUFFER_PADDING_SIZE] __attribute__ ((aligned(16))); + int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + AV_INPUT_BUFFER_PADDING_SIZE] __attribute__((aligned(16))); int buf_sz; int l; AVCodecContext *audio_ctx; @@ -1488,59 +1431,54 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) buf_sz = sizeof(buf); l = myavcodec_decode_audio3(audio_ctx, buf, &buf_sz, (AVPacket *) avpkt); if (avpkt->size != l) { - if (l == AVERROR(EAGAIN)) { - Error(_("codec: latm\n")); - return; - } - if (l < 0) { // no audio frame could be decompressed - Error(_("codec: error audio data\n")); - return; - } - Error(_("codec: error more than one frame data\n")); + if (l == AVERROR(EAGAIN)) { + Error(_("codec: latm\n")); + return; + } + if (l < 0) { // no audio frame could be decompressed + Error(_("codec: error audio data\n")); + return; + } + Error(_("codec: error more than one frame data\n")); } // update audio clock if (avpkt->pts != (int64_t) AV_NOPTS_VALUE) { - CodecAudioSetClock(audio_decoder, avpkt->pts); + CodecAudioSetClock(audio_decoder, avpkt->pts); } // FIXME: must first play remainings bytes, than change and play new. - if (audio_decoder->Passthrough != CodecPassthrough - || audio_decoder->SampleRate != audio_ctx->sample_rate - || audio_decoder->Channels != audio_ctx->channels) { - CodecAudioUpdateFormat(audio_decoder); + if (audio_decoder->Passthrough != CodecPassthrough || audio_decoder->SampleRate != audio_ctx->sample_rate + || audio_decoder->Channels != audio_ctx->channels) { + CodecAudioUpdateFormat(audio_decoder); } if (audio_decoder->HwSampleRate && audio_decoder->HwChannels) { - // need to resample audio - if (audio_decoder->ReSample) { - int16_t outbuf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + - AV_INPUT_BUFFER_PADDING_SIZE] - __attribute__ ((aligned(16))); - int outlen; + // need to resample audio + if (audio_decoder->ReSample) { + int16_t outbuf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + AV_INPUT_BUFFER_PADDING_SIZE] + __attribute__((aligned(16))); + int outlen; - // FIXME: libav-0.7.2 crash here - outlen = audio_resample(audio_decoder->ReSample, outbuf, buf, buf_sz); + // FIXME: libav-0.7.2 crash here + outlen = audio_resample(audio_decoder->ReSample, outbuf, buf, buf_sz); #ifdef DEBUG - if (outlen != buf_sz) { - Debug(3, "codec/audio: possible fixed ffmpeg\n"); - } + if (outlen != buf_sz) { + Debug(3, "codec/audio: possible fixed ffmpeg\n"); + } #endif - if (outlen) { - // outlen seems to be wrong in ffmpeg-0.9 - outlen /= audio_decoder->Channels * - av_get_bytes_per_sample(audio_ctx->sample_fmt); - outlen *= - audio_decoder->HwChannels * - av_get_bytes_per_sample(audio_ctx->sample_fmt); - Debug(4, "codec/audio: %d -> %d\n", buf_sz, outlen); - CodecAudioEnqueue(audio_decoder, outbuf, outlen); - } - } else { - if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) { - return; - } + if (outlen) { + // outlen seems to be wrong in ffmpeg-0.9 + outlen /= audio_decoder->Channels * av_get_bytes_per_sample(audio_ctx->sample_fmt); + outlen *= audio_decoder->HwChannels * av_get_bytes_per_sample(audio_ctx->sample_fmt); + Debug(4, "codec/audio: %d -> %d\n", buf_sz, outlen); + CodecAudioEnqueue(audio_decoder, outbuf, outlen); + } + } else { + if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) { + return; + } - CodecAudioEnqueue(audio_decoder, buf, buf_sz); - } + CodecAudioEnqueue(audio_decoder, buf, buf_sz); + } } } @@ -1568,32 +1506,28 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) delay = AudioGetDelay(); if (!delay) { - return; + return; } clock_gettime(CLOCK_MONOTONIC, &nowtime); if (!audio_decoder->LastDelay) { - audio_decoder->LastTime = nowtime; - audio_decoder->LastPTS = pts; - audio_decoder->LastDelay = delay; - audio_decoder->Drift = 0; - audio_decoder->DriftFrac = 0; - Debug(3, "codec/audio: inital drift delay %" PRId64 "ms\n", - delay / 90); - return; + audio_decoder->LastTime = nowtime; + audio_decoder->LastPTS = pts; + audio_decoder->LastDelay = delay; + audio_decoder->Drift = 0; + audio_decoder->DriftFrac = 0; + Debug(3, "codec/audio: inital drift delay %" PRId64 "ms\n", delay / 90); + return; } // collect over some time pts_diff = pts - audio_decoder->LastPTS; if (pts_diff < 10 * 1000 * 90) { - return; + return; } tim_diff = (nowtime.tv_sec - audio_decoder->LastTime.tv_sec) - * 1000 * 1000 * 1000 + (nowtime.tv_nsec - - audio_decoder->LastTime.tv_nsec); + * 1000 * 1000 * 1000 + (nowtime.tv_nsec - audio_decoder->LastTime.tv_nsec); - drift = - (tim_diff * 90) / (1000 * 1000) - pts_diff + delay - - audio_decoder->LastDelay; + drift = (tim_diff * 90) / (1000 * 1000) - pts_diff + delay - audio_decoder->LastDelay; // adjust rounding error nowtime.tv_nsec -= nowtime.tv_nsec % (1000 * 1000 / 90); @@ -1602,74 +1536,68 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) audio_decoder->LastDelay = delay; if (0) { - Debug(3, - "codec/audio: interval P:%5" PRId64 "ms T:%5" PRId64 "ms D:%4" - PRId64 "ms %f %d\n", pts_diff / 90, tim_diff / (1000 * 1000), - delay / 90, drift / 90.0, audio_decoder->DriftCorr); + Debug(3, "codec/audio: interval P:%5" PRId64 "ms T:%5" PRId64 "ms D:%4" PRId64 "ms %f %d\n", pts_diff / 90, + tim_diff / (1000 * 1000), delay / 90, drift / 90.0, audio_decoder->DriftCorr); } // underruns and av_resample have the same time :((( if (abs(drift) > 10 * 90) { - // drift too big, pts changed? - Debug(3, "codec/audio: drift(%6d) %3dms reset\n", - audio_decoder->DriftCorr, drift / 90); - audio_decoder->LastDelay = 0; + // drift too big, pts changed? + Debug(3, "codec/audio: drift(%6d) %3dms reset\n", audio_decoder->DriftCorr, drift / 90); + audio_decoder->LastDelay = 0; #ifdef DEBUG - corr = 0; // keep gcc happy + corr = 0; // keep gcc happy #endif } else { - drift += audio_decoder->Drift; - audio_decoder->Drift = drift; - corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000); - // SPDIF/HDMI passthrough - if ((CodecAudioDrift & CORRECT_AC3) && (!(CodecPassthrough & CodecAC3) - || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3) - && (!(CodecPassthrough & CodecEAC3) - || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) { - audio_decoder->DriftCorr = -corr; - } + drift += audio_decoder->Drift; + audio_decoder->Drift = drift; + corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000); + // SPDIF/HDMI passthrough + if ((CodecAudioDrift & CORRECT_AC3) && (!(CodecPassthrough & CodecAC3) + || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3) + && (!(CodecPassthrough & CodecEAC3) + || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) { + audio_decoder->DriftCorr = -corr; + } - if (audio_decoder->DriftCorr < -20000) { // limit correction - audio_decoder->DriftCorr = -20000; - } else if (audio_decoder->DriftCorr > 20000) { - audio_decoder->DriftCorr = 20000; - } + if (audio_decoder->DriftCorr < -20000) { // limit correction + audio_decoder->DriftCorr = -20000; + } else if (audio_decoder->DriftCorr > 20000) { + audio_decoder->DriftCorr = 20000; + } } #ifdef USE_SWRESAMPLE if (audio_decoder->Resample && audio_decoder->DriftCorr) { - int distance; + int distance; - // try workaround for buggy ffmpeg 0.10 - if (abs(audio_decoder->DriftCorr) < 2000) { - distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); - } else { - distance = (pts_diff * audio_decoder->HwSampleRate) / (90 * 1000); - } - if (swr_set_compensation(audio_decoder->Resample, - audio_decoder->DriftCorr / 10, distance)) { - Debug(3, "codec/audio: swr_set_compensation failed\n"); - } + // try workaround for buggy ffmpeg 0.10 + if (abs(audio_decoder->DriftCorr) < 2000) { + distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); + } else { + distance = (pts_diff * audio_decoder->HwSampleRate) / (90 * 1000); + } + if (swr_set_compensation(audio_decoder->Resample, audio_decoder->DriftCorr / 10, distance)) { + Debug(3, "codec/audio: swr_set_compensation failed\n"); + } } #endif #ifdef USE_AVRESAMPLE if (audio_decoder->Resample && audio_decoder->DriftCorr) { - int distance; + int distance; - distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); - if (avresample_set_compensation(audio_decoder->Resample, - audio_decoder->DriftCorr / 10, distance)) { - Debug(3, "codec/audio: swr_set_compensation failed\n"); - } + distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); + if (avresample_set_compensation(audio_decoder->Resample, audio_decoder->DriftCorr / 10, distance)) { + Debug(3, "codec/audio: swr_set_compensation failed\n"); + } } #endif if (1) { - static int c; + static int c; - if (!(c++ % 10)) { - Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", - audio_decoder->DriftCorr, drift * 1000 / 90, corr); - } + if (!(c++ % 10)) { + Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", audio_decoder->DriftCorr, drift * 1000 / 90, corr); + } } #else AudioSetClock(pts); @@ -1687,60 +1615,52 @@ static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder) const AVCodecContext *audio_ctx; if (CodecAudioUpdateHelper(audio_decoder, &passthrough)) { - // FIXME: handle swresample format conversions. - return; + // FIXME: handle swresample format conversions. + return; } - if (passthrough) { // pass-through no conversion allowed - return; + if (passthrough) { // pass-through no conversion allowed + return; } audio_ctx = audio_decoder->AudioCtx; #ifdef DEBUG - if (audio_ctx->sample_fmt == AV_SAMPLE_FMT_S16 - && audio_ctx->sample_rate == audio_decoder->HwSampleRate - && !CodecAudioDrift) { - // FIXME: use Resample only, when it is needed! - fprintf(stderr, "no resample needed\n"); + if (audio_ctx->sample_fmt == AV_SAMPLE_FMT_S16 && audio_ctx->sample_rate == audio_decoder->HwSampleRate + && !CodecAudioDrift) { + // FIXME: use Resample only, when it is needed! + fprintf(stderr, "no resample needed\n"); } #endif #ifdef USE_SWRESAMPLE audio_decoder->Resample = - swr_alloc_set_opts(audio_decoder->Resample, audio_ctx->channel_layout, - AV_SAMPLE_FMT_S16, audio_decoder->HwSampleRate, - audio_ctx->channel_layout, audio_ctx->sample_fmt, - audio_ctx->sample_rate, 0, NULL); + swr_alloc_set_opts(audio_decoder->Resample, audio_ctx->channel_layout, AV_SAMPLE_FMT_S16, + audio_decoder->HwSampleRate, audio_ctx->channel_layout, audio_ctx->sample_fmt, audio_ctx->sample_rate, 0, + NULL); if (audio_decoder->Resample) { - swr_init(audio_decoder->Resample); + swr_init(audio_decoder->Resample); } else { - Error(_("codec/audio: can't setup resample\n")); + Error(_("codec/audio: can't setup resample\n")); } #endif #ifdef USE_AVRESAMPLE if (!(audio_decoder->Resample = avresample_alloc_context())) { - Error(_("codec/audio: can't setup resample\n")); - return; + Error(_("codec/audio: can't setup resample\n")); + return; } - av_opt_set_int(audio_decoder->Resample, "in_channel_layout", - audio_ctx->channel_layout, 0); - av_opt_set_int(audio_decoder->Resample, "in_sample_fmt", - audio_ctx->sample_fmt, 0); - av_opt_set_int(audio_decoder->Resample, "in_sample_rate", - audio_ctx->sample_rate, 0); - av_opt_set_int(audio_decoder->Resample, "out_channel_layout", - audio_ctx->channel_layout, 0); - av_opt_set_int(audio_decoder->Resample, "out_sample_fmt", - AV_SAMPLE_FMT_S16, 0); - av_opt_set_int(audio_decoder->Resample, "out_sample_rate", - audio_decoder->HwSampleRate, 0); + av_opt_set_int(audio_decoder->Resample, "in_channel_layout", audio_ctx->channel_layout, 0); + av_opt_set_int(audio_decoder->Resample, "in_sample_fmt", audio_ctx->sample_fmt, 0); + av_opt_set_int(audio_decoder->Resample, "in_sample_rate", audio_ctx->sample_rate, 0); + av_opt_set_int(audio_decoder->Resample, "out_channel_layout", audio_ctx->channel_layout, 0); + av_opt_set_int(audio_decoder->Resample, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + av_opt_set_int(audio_decoder->Resample, "out_sample_rate", audio_decoder->HwSampleRate, 0); if (avresample_open(audio_decoder->Resample)) { - avresample_free(&audio_decoder->Resample); - audio_decoder->Resample = NULL; - Error(_("codec/audio: can't open resample\n")); - return; + avresample_free(&audio_decoder->Resample); + audio_decoder->Resample = NULL; + Error(_("codec/audio: can't open resample\n")); + return; } #endif } @@ -1761,56 +1681,57 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) AVCodecContext *audio_ctx = audio_decoder->AudioCtx; if (audio_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { - int ret; - AVPacket pkt[1]; - AVFrame *frame = audio_decoder->Frame; + int ret; + AVPacket pkt[1]; + AVFrame *frame = audio_decoder->Frame; - av_frame_unref(frame); - *pkt = *avpkt; // use copy - ret = avcodec_send_packet(audio_ctx, pkt); - if (ret < 0) { - Debug(3, "codec: sending audio packet failed"); - return; - } - ret = avcodec_receive_frame(audio_ctx, frame); - if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { - Debug(3, "codec: receiving audio frame failed"); - return; - } + av_frame_unref(frame); + *pkt = *avpkt; // use copy + ret = avcodec_send_packet(audio_ctx, pkt); + if (ret < 0) { + Debug(3, "codec: sending audio packet failed"); + return; + } + ret = avcodec_receive_frame(audio_ctx, frame); + if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { + Debug(3, "codec: receiving audio frame failed"); + return; + } - if (ret >= 0) { - // update audio clock - if (avpkt->pts != (int64_t) AV_NOPTS_VALUE) { - CodecAudioSetClock(audio_decoder, avpkt->pts); - } - // format change - if (audio_decoder->Passthrough != CodecPassthrough || audio_decoder->SampleRate != audio_ctx->sample_rate - || audio_decoder->Channels != audio_ctx->channels) { - CodecAudioUpdateFormat(audio_decoder); - } - if (!audio_decoder->HwSampleRate || !audio_decoder->HwChannels) { - return; // unsupported sample format - } - if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) { - return; - } - if (audio_decoder->Resample) { - uint8_t outbuf[8192 * 2 * 8]; - uint8_t *out[1]; + if (ret >= 0) { + // update audio clock + if (avpkt->pts != (int64_t) AV_NOPTS_VALUE) { + CodecAudioSetClock(audio_decoder, avpkt->pts); + } + // format change + if (audio_decoder->Passthrough != CodecPassthrough || audio_decoder->SampleRate != audio_ctx->sample_rate + || audio_decoder->Channels != audio_ctx->channels) { + CodecAudioUpdateFormat(audio_decoder); + } + if (!audio_decoder->HwSampleRate || !audio_decoder->HwChannels) { + return; // unsupported sample format + } + if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) { + return; + } + if (audio_decoder->Resample) { + uint8_t outbuf[8192 * 2 * 8]; + uint8_t *out[1]; - out[0] = outbuf; - ret = swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels), - (const uint8_t **)frame->extended_data, frame->nb_samples); - if (ret > 0) { - if (!(audio_decoder->Passthrough & CodecPCM)) { - CodecReorderAudioFrame((int16_t *) outbuf, ret * 2 * audio_decoder->HwChannels, - audio_decoder->HwChannels); - } - AudioEnqueue(outbuf, ret * 2 * audio_decoder->HwChannels); - } - return; - } - } + out[0] = outbuf; + ret = + swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels), + (const uint8_t **)frame->extended_data, frame->nb_samples); + if (ret > 0) { + if (!(audio_decoder->Passthrough & CodecPCM)) { + CodecReorderAudioFrame((int16_t *) outbuf, ret * 2 * audio_decoder->HwChannels, + audio_decoder->HwChannels); + } + AudioEnqueue(outbuf, ret * 2 * audio_decoder->HwChannels); + } + return; + } + } } } @@ -1827,16 +1748,16 @@ void CodecAudioFlushBuffers(AudioDecoder * decoder) } //---------------------------------------------------------------------------- -// Codec +// Codec //---------------------------------------------------------------------------- /** ** Empty log callback */ -static void CodecNoopCallback( __attribute__ ((unused)) - void *ptr, __attribute__ ((unused)) - int level, __attribute__ ((unused)) - const char *fmt, __attribute__ ((unused)) va_list vl) +static void CodecNoopCallback( __attribute__((unused)) + void *ptr, __attribute__((unused)) + int level, __attribute__((unused)) + const char *fmt, __attribute__((unused)) va_list vl) { } @@ -1852,7 +1773,7 @@ void CodecInit(void) #else (void)CodecNoopCallback; #endif - avcodec_register_all(); // register all formats and codecs + avcodec_register_all(); // register all formats and codecs } /** diff --git a/codec.h b/codec.h index 6396968..26f5cbc 100644 --- a/codec.h +++ b/codec.h @@ -1,97 +1,97 @@ /// -/// @file codec.h @brief Codec module headerfile +/// @file codec.h @brief Codec module headerfile /// -/// Copyright (c) 2009 - 2013, 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2013, 2015 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: bdb4d18dbe371e497d039e45faa7c134b019860a $ +/// $Id: bdb4d18dbe371e497d039e45faa7c134b019860a $ ////////////////////////////////////////////////////////////////////////////// /// @addtogroup Codec /// @{ //---------------------------------------------------------------------------- -// Defines +// Defines //---------------------------------------------------------------------------- -#define CodecPCM 0x01 ///< PCM bit mask -#define CodecMPA 0x02 ///< MPA bit mask (planned) -#define CodecAC3 0x04 ///< AC-3 bit mask -#define CodecEAC3 0x08 ///< E-AC-3 bit mask -#define CodecDTS 0x10 ///< DTS bit mask (planned) +#define CodecPCM 0x01 ///< PCM bit mask +#define CodecMPA 0x02 ///< MPA bit mask (planned) +#define CodecAC3 0x04 ///< AC-3 bit mask +#define CodecEAC3 0x08 ///< E-AC-3 bit mask +#define CodecDTS 0x10 ///< DTS bit mask (planned) #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 -enum HWAccelID { - HWACCEL_NONE = 0, - HWACCEL_AUTO, - HWACCEL_VDPAU, - HWACCEL_DXVA2, - HWACCEL_VDA, - HWACCEL_VIDEOTOOLBOX, - HWACCEL_QSV, - HWACCEL_VAAPI, - HWACCEL_CUVID, +enum HWAccelID +{ + HWACCEL_NONE = 0, + HWACCEL_AUTO, + HWACCEL_VDPAU, + HWACCEL_DXVA2, + HWACCEL_VDA, + HWACCEL_VIDEOTOOLBOX, + HWACCEL_QSV, + HWACCEL_VAAPI, + HWACCEL_CUVID, }; - + extern AVBufferRef *hw_device_ctx; + /// -/// Video decoder structure. +/// Video decoder structure. /// struct _video_decoder_ { - VideoHwDecoder *HwDecoder; ///< video hardware decoder - - int GetFormatDone; ///< flag get format called! - AVCodec *VideoCodec; ///< video codec - AVCodecContext *VideoCtx; ///< video codec context - //#ifdef FFMPEG_WORKAROUND_ARTIFACTS - int FirstKeyFrame; ///< flag first frame - //#endif -// AVFrame *Frame; ///< decoded video frame + VideoHwDecoder *HwDecoder; ///< video hardware decoder - int filter; // flag for deint filter + int GetFormatDone; ///< flag get format called! + AVCodec *VideoCodec; ///< video codec + AVCodecContext *VideoCtx; ///< video codec context + //#ifdef FFMPEG_WORKAROUND_ARTIFACTS + int FirstKeyFrame; ///< flag first frame + //#endif +// AVFrame *Frame; ///< decoded video frame - /* hwaccel options */ - enum HWAccelID hwaccel_id; - char *hwaccel_device; - enum AVPixelFormat hwaccel_output_format; - - /* hwaccel context */ - enum HWAccelID active_hwaccel_id; - void *hwaccel_ctx; - void (*hwaccel_uninit)(AVCodecContext *s); - int (*hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags); - int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame); - enum AVPixelFormat hwaccel_pix_fmt; - enum AVPixelFormat hwaccel_retrieved_pix_fmt; - AVBufferRef *hw_frames_ctx; + int filter; // flag for deint filter - void *hwdec_priv; - // For HDR side-data caching - double cached_hdr_peak; - // From VO - struct mp_hwdec_devices *hwdec_devs; + /* hwaccel options */ + enum HWAccelID hwaccel_id; + char *hwaccel_device; + enum AVPixelFormat hwaccel_output_format; + /* hwaccel context */ + enum HWAccelID active_hwaccel_id; + void *hwaccel_ctx; + void (*hwaccel_uninit)(AVCodecContext * s); + int (*hwaccel_get_buffer)(AVCodecContext * s, AVFrame * frame, int flags); + int (*hwaccel_retrieve_data)(AVCodecContext * s, AVFrame * frame); + enum AVPixelFormat hwaccel_pix_fmt; + enum AVPixelFormat hwaccel_retrieved_pix_fmt; + AVBufferRef *hw_frames_ctx; + + void *hwdec_priv; + // For HDR side-data caching + double cached_hdr_peak; + // From VO + struct mp_hwdec_devices *hwdec_devs; - }; //---------------------------------------------------------------------------- -// Typedefs +// Typedefs //---------------------------------------------------------------------------- /// Video decoder typedef. @@ -111,14 +111,14 @@ extern const char *X11DisplayName; extern AVBufferRef *HwDeviceContext; //---------------------------------------------------------------------------- -// Variables +// Variables //---------------------------------------------------------------------------- /// Flag prefer fast xhannel switch extern char CodecUsePossibleDefectFrames; //---------------------------------------------------------------------------- -// Prototypes +// Prototypes //---------------------------------------------------------------------------- /// Allocate a new video decoder context. diff --git a/common.h b/common.h index 287eb82..381bdc5 100644 --- a/common.h +++ b/common.h @@ -1,3 +1,4 @@ + /* * This file is part of mpv. * @@ -8,7 +9,7 @@ * * mpv is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public @@ -52,22 +53,23 @@ struct GL; typedef struct GL GL; -enum { - MPGL_CAP_ROW_LENGTH = (1 << 4), // GL_[UN]PACK_ROW_LENGTH - MPGL_CAP_FB = (1 << 5), - MPGL_CAP_VAO = (1 << 6), - MPGL_CAP_TEX_RG = (1 << 10), // GL_ARB_texture_rg / GL 3.x - MPGL_CAP_VDPAU = (1 << 11), // GL_NV_vdpau_interop - MPGL_CAP_APPLE_RGB_422 = (1 << 12), // GL_APPLE_rgb_422 - MPGL_CAP_1D_TEX = (1 << 14), - MPGL_CAP_3D_TEX = (1 << 15), - MPGL_CAP_DEBUG = (1 << 16), - MPGL_CAP_DXINTEROP = (1 << 17), // WGL_NV_DX_interop - MPGL_CAP_EXT16 = (1 << 18), // GL_EXT_texture_norm16 - MPGL_CAP_ARB_FLOAT = (1 << 19), // GL_ARB_texture_float - MPGL_CAP_EXT_CR_HFLOAT = (1 << 20), // GL_EXT_color_buffer_half_float +enum +{ + MPGL_CAP_ROW_LENGTH = (1 << 4), // GL_[UN]PACK_ROW_LENGTH + MPGL_CAP_FB = (1 << 5), + MPGL_CAP_VAO = (1 << 6), + MPGL_CAP_TEX_RG = (1 << 10), // GL_ARB_texture_rg / GL 3.x + MPGL_CAP_VDPAU = (1 << 11), // GL_NV_vdpau_interop + MPGL_CAP_APPLE_RGB_422 = (1 << 12), // GL_APPLE_rgb_422 + MPGL_CAP_1D_TEX = (1 << 14), + MPGL_CAP_3D_TEX = (1 << 15), + MPGL_CAP_DEBUG = (1 << 16), + MPGL_CAP_DXINTEROP = (1 << 17), // WGL_NV_DX_interop + MPGL_CAP_EXT16 = (1 << 18), // GL_EXT_texture_norm16 + MPGL_CAP_ARB_FLOAT = (1 << 19), // GL_ARB_texture_float + MPGL_CAP_EXT_CR_HFLOAT = (1 << 20), // GL_EXT_color_buffer_half_float - MPGL_CAP_SW = (1 << 30), // indirect or sw renderer + MPGL_CAP_SW = (1 << 30), // indirect or sw renderer }; // E.g. 310 means 3.1 @@ -78,167 +80,146 @@ enum { #define MPGL_VER_P(ver) MPGL_VER_GET_MAJOR(ver), MPGL_VER_GET_MINOR(ver) -void mpgl_load_functions(GL *gl, void *(*getProcAddress)(const GLubyte *), - const char *ext2, struct mp_log *log); -void mpgl_load_functions2(GL *gl, void *(*get_fn)(void *ctx, const char *n), - void *fn_ctx, const char *ext2, struct mp_log *log); +void mpgl_load_functions(GL * gl, void *(*getProcAddress)(const GLubyte *), const char *ext2, struct mp_log *log); +void mpgl_load_functions2(GL * gl, void *(*get_fn)(void *ctx, const char *n), void *fn_ctx, const char *ext2, + struct mp_log *log); -typedef void (GLAPIENTRY *MP_GLDEBUGPROC)(GLenum, GLenum, GLuint, GLenum, - GLsizei, const GLchar *,const void *); +typedef void (GLAPIENTRY * MP_GLDEBUGPROC) (GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar *, const void *); //function pointers loaded from the OpenGL library -struct GL { - int version; // MPGL_VER() mangled (e.g. 210 for 2.1) - int es; // es version (e.g. 300), 0 for desktop GL - int glsl_version; // e.g. 130 for GLSL 1.30 - char *extensions; // Equivalent to GL_EXTENSIONS - int mpgl_caps; // Bitfield of MPGL_CAP_* constants - bool debug_context; // use of e.g. GLX_CONTEXT_DEBUG_BIT_ARB - GLuint main_fb; // framebuffer to render to (normally 0) +struct GL +{ + int version; // MPGL_VER() mangled (e.g. 210 for 2.1) + int es; // es version (e.g. 300), 0 for desktop GL + int glsl_version; // e.g. 130 for GLSL 1.30 + char *extensions; // Equivalent to GL_EXTENSIONS + int mpgl_caps; // Bitfield of MPGL_CAP_* constants + bool debug_context; // use of e.g. GLX_CONTEXT_DEBUG_BIT_ARB + GLuint main_fb; // framebuffer to render to (normally 0) - void (GLAPIENTRY *Viewport)(GLint, GLint, GLsizei, GLsizei); - void (GLAPIENTRY *Clear)(GLbitfield); - void (GLAPIENTRY *GenTextures)(GLsizei, GLuint *); - void (GLAPIENTRY *DeleteTextures)(GLsizei, const GLuint *); - void (GLAPIENTRY *ClearColor)(GLclampf, GLclampf, GLclampf, GLclampf); - void (GLAPIENTRY *Enable)(GLenum); - void (GLAPIENTRY *Disable)(GLenum); - const GLubyte *(GLAPIENTRY * GetString)(GLenum); - void (GLAPIENTRY *BlendFuncSeparate)(GLenum, GLenum, GLenum, GLenum); - void (GLAPIENTRY *Flush)(void); - void (GLAPIENTRY *Finish)(void); - void (GLAPIENTRY *PixelStorei)(GLenum, GLint); - void (GLAPIENTRY *TexImage1D)(GLenum, GLint, GLint, GLsizei, GLint, - GLenum, GLenum, const GLvoid *); - void (GLAPIENTRY *TexImage2D)(GLenum, GLint, GLint, GLsizei, GLsizei, - GLint, GLenum, GLenum, const GLvoid *); - void (GLAPIENTRY *TexSubImage2D)(GLenum, GLint, GLint, GLint, - GLsizei, GLsizei, GLenum, GLenum, - const GLvoid *); - void (GLAPIENTRY *TexParameteri)(GLenum, GLenum, GLint); - void (GLAPIENTRY *GetIntegerv)(GLenum, GLint *); - void (GLAPIENTRY *ReadPixels)(GLint, GLint, GLsizei, GLsizei, GLenum, - GLenum, GLvoid *); - void (GLAPIENTRY *ReadBuffer)(GLenum); - void (GLAPIENTRY *DrawArrays)(GLenum, GLint, GLsizei); - GLenum (GLAPIENTRY *GetError)(void); - void (GLAPIENTRY *GetTexLevelParameteriv)(GLenum, GLint, GLenum, GLint *); - void (GLAPIENTRY *Scissor)(GLint, GLint, GLsizei, GLsizei); + void (GLAPIENTRY * Viewport) (GLint, GLint, GLsizei, GLsizei); + void (GLAPIENTRY * Clear) (GLbitfield); + void (GLAPIENTRY * GenTextures) (GLsizei, GLuint *); + void (GLAPIENTRY * DeleteTextures) (GLsizei, const GLuint *); + void (GLAPIENTRY * ClearColor) (GLclampf, GLclampf, GLclampf, GLclampf); + void (GLAPIENTRY * Enable) (GLenum); + void (GLAPIENTRY * Disable) (GLenum); + const GLubyte *(GLAPIENTRY * GetString) (GLenum); + void (GLAPIENTRY * BlendFuncSeparate) (GLenum, GLenum, GLenum, GLenum); + void (GLAPIENTRY * Flush) (void); + void (GLAPIENTRY * Finish) (void); + void (GLAPIENTRY * PixelStorei) (GLenum, GLint); + void (GLAPIENTRY * TexImage1D) (GLenum, GLint, GLint, GLsizei, GLint, GLenum, GLenum, const GLvoid *); + void (GLAPIENTRY * TexImage2D) (GLenum, GLint, GLint, GLsizei, GLsizei, GLint, GLenum, GLenum, const GLvoid *); + void (GLAPIENTRY * TexSubImage2D) (GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, const GLvoid *); + void (GLAPIENTRY * TexParameteri) (GLenum, GLenum, GLint); + void (GLAPIENTRY * GetIntegerv) (GLenum, GLint *); + void (GLAPIENTRY * ReadPixels) (GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, GLvoid *); + void (GLAPIENTRY * ReadBuffer) (GLenum); + void (GLAPIENTRY * DrawArrays) (GLenum, GLint, GLsizei); + GLenum(GLAPIENTRY * GetError) (void); + void (GLAPIENTRY * GetTexLevelParameteriv) (GLenum, GLint, GLenum, GLint *); + void (GLAPIENTRY * Scissor) (GLint, GLint, GLsizei, GLsizei); - void (GLAPIENTRY *GenBuffers)(GLsizei, GLuint *); - void (GLAPIENTRY *DeleteBuffers)(GLsizei, const GLuint *); - void (GLAPIENTRY *BindBuffer)(GLenum, GLuint); - void (GLAPIENTRY *BindBufferBase)(GLenum, GLuint, GLuint); - GLvoid * (GLAPIENTRY *MapBufferRange)(GLenum, GLintptr, GLsizeiptr, - GLbitfield); - GLboolean (GLAPIENTRY *UnmapBuffer)(GLenum); - void (GLAPIENTRY *BufferData)(GLenum, intptr_t, const GLvoid *, GLenum); - void (GLAPIENTRY *ActiveTexture)(GLenum); - void (GLAPIENTRY *BindTexture)(GLenum, GLuint); - int (GLAPIENTRY *SwapInterval)(int); - void (GLAPIENTRY *TexImage3D)(GLenum, GLint, GLenum, GLsizei, GLsizei, - GLsizei, GLint, GLenum, GLenum, - const GLvoid *); + void (GLAPIENTRY * GenBuffers) (GLsizei, GLuint *); + void (GLAPIENTRY * DeleteBuffers) (GLsizei, const GLuint *); + void (GLAPIENTRY * BindBuffer) (GLenum, GLuint); + void (GLAPIENTRY * BindBufferBase) (GLenum, GLuint, GLuint); + GLvoid *(GLAPIENTRY * MapBufferRange) (GLenum, GLintptr, GLsizeiptr, GLbitfield); + GLboolean(GLAPIENTRY * UnmapBuffer) (GLenum); + void (GLAPIENTRY * BufferData) (GLenum, intptr_t, const GLvoid *, GLenum); + void (GLAPIENTRY * ActiveTexture) (GLenum); + void (GLAPIENTRY * BindTexture) (GLenum, GLuint); + int (GLAPIENTRY * SwapInterval) (int); + void (GLAPIENTRY * TexImage3D) (GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, + const GLvoid *); - void (GLAPIENTRY *GenVertexArrays)(GLsizei, GLuint *); - void (GLAPIENTRY *BindVertexArray)(GLuint); - GLint (GLAPIENTRY *GetAttribLocation)(GLuint, const GLchar *); - void (GLAPIENTRY *EnableVertexAttribArray)(GLuint); - void (GLAPIENTRY *DisableVertexAttribArray)(GLuint); - void (GLAPIENTRY *VertexAttribPointer)(GLuint, GLint, GLenum, GLboolean, - GLsizei, const GLvoid *); - void (GLAPIENTRY *DeleteVertexArrays)(GLsizei, const GLuint *); - void (GLAPIENTRY *UseProgram)(GLuint); - GLint (GLAPIENTRY *GetUniformLocation)(GLuint, const GLchar *); - void (GLAPIENTRY *CompileShader)(GLuint); - GLuint (GLAPIENTRY *CreateProgram)(void); - GLuint (GLAPIENTRY *CreateShader)(GLenum); - void (GLAPIENTRY *ShaderSource)(GLuint, GLsizei, const GLchar **, - const GLint *); - void (GLAPIENTRY *LinkProgram)(GLuint); - void (GLAPIENTRY *AttachShader)(GLuint, GLuint); - void (GLAPIENTRY *DeleteShader)(GLuint); - void (GLAPIENTRY *DeleteProgram)(GLuint); - void (GLAPIENTRY *GetShaderInfoLog)(GLuint, GLsizei, GLsizei *, GLchar *); - void (GLAPIENTRY *GetShaderiv)(GLuint, GLenum, GLint *); - void (GLAPIENTRY *GetProgramInfoLog)(GLuint, GLsizei, GLsizei *, GLchar *); - void (GLAPIENTRY *GetProgramiv)(GLenum, GLenum, GLint *); - const GLubyte* (GLAPIENTRY *GetStringi)(GLenum, GLuint); - void (GLAPIENTRY *BindAttribLocation)(GLuint, GLuint, const GLchar *); - void (GLAPIENTRY *BindFramebuffer)(GLenum, GLuint); - void (GLAPIENTRY *GenFramebuffers)(GLsizei, GLuint *); - void (GLAPIENTRY *DeleteFramebuffers)(GLsizei, const GLuint *); - GLenum (GLAPIENTRY *CheckFramebufferStatus)(GLenum); - void (GLAPIENTRY *FramebufferTexture2D)(GLenum, GLenum, GLenum, GLuint, - GLint); - void (GLAPIENTRY *BlitFramebuffer)(GLint, GLint, GLint, GLint, GLint, GLint, - GLint, GLint, GLbitfield, GLenum); - void (GLAPIENTRY *GetFramebufferAttachmentParameteriv)(GLenum, GLenum, - GLenum, GLint *); + void (GLAPIENTRY * GenVertexArrays) (GLsizei, GLuint *); + void (GLAPIENTRY * BindVertexArray) (GLuint); + GLint(GLAPIENTRY * GetAttribLocation) (GLuint, const GLchar *); + void (GLAPIENTRY * EnableVertexAttribArray) (GLuint); + void (GLAPIENTRY * DisableVertexAttribArray) (GLuint); + void (GLAPIENTRY * VertexAttribPointer) (GLuint, GLint, GLenum, GLboolean, GLsizei, const GLvoid *); + void (GLAPIENTRY * DeleteVertexArrays) (GLsizei, const GLuint *); + void (GLAPIENTRY * UseProgram) (GLuint); + GLint(GLAPIENTRY * GetUniformLocation) (GLuint, const GLchar *); + void (GLAPIENTRY * CompileShader) (GLuint); + GLuint(GLAPIENTRY * CreateProgram) (void); + GLuint(GLAPIENTRY * CreateShader) (GLenum); + void (GLAPIENTRY * ShaderSource) (GLuint, GLsizei, const GLchar **, const GLint *); + void (GLAPIENTRY * LinkProgram) (GLuint); + void (GLAPIENTRY * AttachShader) (GLuint, GLuint); + void (GLAPIENTRY * DeleteShader) (GLuint); + void (GLAPIENTRY * DeleteProgram) (GLuint); + void (GLAPIENTRY * GetShaderInfoLog) (GLuint, GLsizei, GLsizei *, GLchar *); + void (GLAPIENTRY * GetShaderiv) (GLuint, GLenum, GLint *); + void (GLAPIENTRY * GetProgramInfoLog) (GLuint, GLsizei, GLsizei *, GLchar *); + void (GLAPIENTRY * GetProgramiv) (GLenum, GLenum, GLint *); + const GLubyte *(GLAPIENTRY * GetStringi) (GLenum, GLuint); + void (GLAPIENTRY * BindAttribLocation) (GLuint, GLuint, const GLchar *); + void (GLAPIENTRY * BindFramebuffer) (GLenum, GLuint); + void (GLAPIENTRY * GenFramebuffers) (GLsizei, GLuint *); + void (GLAPIENTRY * DeleteFramebuffers) (GLsizei, const GLuint *); + GLenum(GLAPIENTRY * CheckFramebufferStatus) (GLenum); + void (GLAPIENTRY * FramebufferTexture2D) (GLenum, GLenum, GLenum, GLuint, GLint); + void (GLAPIENTRY * BlitFramebuffer) (GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLbitfield, GLenum); + void (GLAPIENTRY * GetFramebufferAttachmentParameteriv) (GLenum, GLenum, GLenum, GLint *); - void (GLAPIENTRY *Uniform1f)(GLint, GLfloat); - void (GLAPIENTRY *Uniform2f)(GLint, GLfloat, GLfloat); - void (GLAPIENTRY *Uniform3f)(GLint, GLfloat, GLfloat, GLfloat); - void (GLAPIENTRY *Uniform4f)(GLint, GLfloat, GLfloat, GLfloat, GLfloat); - void (GLAPIENTRY *Uniform1i)(GLint, GLint); - void (GLAPIENTRY *UniformMatrix2fv)(GLint, GLsizei, GLboolean, - const GLfloat *); - void (GLAPIENTRY *UniformMatrix3fv)(GLint, GLsizei, GLboolean, - const GLfloat *); + void (GLAPIENTRY * Uniform1f) (GLint, GLfloat); + void (GLAPIENTRY * Uniform2f) (GLint, GLfloat, GLfloat); + void (GLAPIENTRY * Uniform3f) (GLint, GLfloat, GLfloat, GLfloat); + void (GLAPIENTRY * Uniform4f) (GLint, GLfloat, GLfloat, GLfloat, GLfloat); + void (GLAPIENTRY * Uniform1i) (GLint, GLint); + void (GLAPIENTRY * UniformMatrix2fv) (GLint, GLsizei, GLboolean, const GLfloat *); + void (GLAPIENTRY * UniformMatrix3fv) (GLint, GLsizei, GLboolean, const GLfloat *); - void (GLAPIENTRY *InvalidateFramebuffer)(GLenum, GLsizei, const GLenum *); + void (GLAPIENTRY * InvalidateFramebuffer) (GLenum, GLsizei, const GLenum *); - GLsync (GLAPIENTRY *FenceSync)(GLenum, GLbitfield); - GLenum (GLAPIENTRY *ClientWaitSync)(GLsync, GLbitfield, GLuint64); - void (GLAPIENTRY *DeleteSync)(GLsync sync); + GLsync(GLAPIENTRY * FenceSync) (GLenum, GLbitfield); + GLenum(GLAPIENTRY * ClientWaitSync) (GLsync, GLbitfield, GLuint64); + void (GLAPIENTRY * DeleteSync) (GLsync sync); - void (GLAPIENTRY *GenQueries)(GLsizei, GLuint *); - void (GLAPIENTRY *DeleteQueries)(GLsizei, const GLuint *); - void (GLAPIENTRY *BeginQuery)(GLenum, GLuint); - void (GLAPIENTRY *EndQuery)(GLenum); - void (GLAPIENTRY *QueryCounter)(GLuint, GLenum); - GLboolean (GLAPIENTRY *IsQuery)(GLuint); - void (GLAPIENTRY *GetQueryObjectiv)(GLuint, GLenum, GLint *); - void (GLAPIENTRY *GetQueryObjecti64v)(GLuint, GLenum, GLint64 *); - void (GLAPIENTRY *GetQueryObjectuiv)(GLuint, GLenum, GLuint *); - void (GLAPIENTRY *GetQueryObjectui64v)(GLuint, GLenum, GLuint64 *); + void (GLAPIENTRY * GenQueries) (GLsizei, GLuint *); + void (GLAPIENTRY * DeleteQueries) (GLsizei, const GLuint *); + void (GLAPIENTRY * BeginQuery) (GLenum, GLuint); + void (GLAPIENTRY * EndQuery) (GLenum); + void (GLAPIENTRY * QueryCounter) (GLuint, GLenum); + GLboolean(GLAPIENTRY * IsQuery) (GLuint); + void (GLAPIENTRY * GetQueryObjectiv) (GLuint, GLenum, GLint *); + void (GLAPIENTRY * GetQueryObjecti64v) (GLuint, GLenum, GLint64 *); + void (GLAPIENTRY * GetQueryObjectuiv) (GLuint, GLenum, GLuint *); + void (GLAPIENTRY * GetQueryObjectui64v) (GLuint, GLenum, GLuint64 *); - void (GLAPIENTRY *VDPAUInitNV)(const GLvoid *, const GLvoid *); - void (GLAPIENTRY *VDPAUFiniNV)(void); - GLvdpauSurfaceNV (GLAPIENTRY *VDPAURegisterOutputSurfaceNV) - (GLvoid *, GLenum, GLsizei, const GLuint *); - GLvdpauSurfaceNV (GLAPIENTRY *VDPAURegisterVideoSurfaceNV) - (GLvoid *, GLenum, GLsizei, const GLuint *); - void (GLAPIENTRY *VDPAUUnregisterSurfaceNV)(GLvdpauSurfaceNV); - void (GLAPIENTRY *VDPAUSurfaceAccessNV)(GLvdpauSurfaceNV, GLenum); - void (GLAPIENTRY *VDPAUMapSurfacesNV)(GLsizei, const GLvdpauSurfaceNV *); - void (GLAPIENTRY *VDPAUUnmapSurfacesNV)(GLsizei, const GLvdpauSurfaceNV *); + void (GLAPIENTRY * VDPAUInitNV) (const GLvoid *, const GLvoid *); + void (GLAPIENTRY * VDPAUFiniNV) (void); + GLvdpauSurfaceNV(GLAPIENTRY * VDPAURegisterOutputSurfaceNV) + (GLvoid *, GLenum, GLsizei, const GLuint *); + GLvdpauSurfaceNV(GLAPIENTRY * VDPAURegisterVideoSurfaceNV) + (GLvoid *, GLenum, GLsizei, const GLuint *); + void (GLAPIENTRY * VDPAUUnregisterSurfaceNV) (GLvdpauSurfaceNV); + void (GLAPIENTRY * VDPAUSurfaceAccessNV) (GLvdpauSurfaceNV, GLenum); + void (GLAPIENTRY * VDPAUMapSurfacesNV) (GLsizei, const GLvdpauSurfaceNV *); + void (GLAPIENTRY * VDPAUUnmapSurfacesNV) (GLsizei, const GLvdpauSurfaceNV *); #if HAVE_GL_WIN32 // The HANDLE type might not be present on non-Win32 - BOOL (GLAPIENTRY *DXSetResourceShareHandleNV)(void *dxObject, - HANDLE shareHandle); - HANDLE (GLAPIENTRY *DXOpenDeviceNV)(void *dxDevice); - BOOL (GLAPIENTRY *DXCloseDeviceNV)(HANDLE hDevice); - HANDLE (GLAPIENTRY *DXRegisterObjectNV)(HANDLE hDevice, void *dxObject, - GLuint name, GLenum type, GLenum access); - BOOL (GLAPIENTRY *DXUnregisterObjectNV)(HANDLE hDevice, HANDLE hObject); - BOOL (GLAPIENTRY *DXLockObjectsNV)(HANDLE hDevice, GLint count, - HANDLE *hObjects); - BOOL (GLAPIENTRY *DXUnlockObjectsNV)(HANDLE hDevice, GLint count, - HANDLE *hObjects); + BOOL(GLAPIENTRY * DXSetResourceShareHandleNV) (void *dxObject, HANDLE shareHandle); + HANDLE(GLAPIENTRY * DXOpenDeviceNV) (void *dxDevice); + BOOL(GLAPIENTRY * DXCloseDeviceNV) (HANDLE hDevice); + HANDLE(GLAPIENTRY * DXRegisterObjectNV) (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access); + BOOL(GLAPIENTRY * DXUnregisterObjectNV) (HANDLE hDevice, HANDLE hObject); + BOOL(GLAPIENTRY * DXLockObjectsNV) (HANDLE hDevice, GLint count, HANDLE * hObjects); + BOOL(GLAPIENTRY * DXUnlockObjectsNV) (HANDLE hDevice, GLint count, HANDLE * hObjects); #endif - GLint (GLAPIENTRY *GetVideoSync)(GLuint *); - GLint (GLAPIENTRY *WaitVideoSync)(GLint, GLint, unsigned int *); + GLint(GLAPIENTRY * GetVideoSync) (GLuint *); + GLint(GLAPIENTRY * WaitVideoSync) (GLint, GLint, unsigned int *); - void (GLAPIENTRY *GetTranslatedShaderSourceANGLE)(GLuint, GLsizei, - GLsizei*, GLchar* source); + void (GLAPIENTRY * GetTranslatedShaderSourceANGLE) (GLuint, GLsizei, GLsizei *, GLchar * source); - void (GLAPIENTRY *DebugMessageCallback)(MP_GLDEBUGPROC callback, - const void *userParam); + void (GLAPIENTRY * DebugMessageCallback) (MP_GLDEBUGPROC callback, const void *userParam); - void *(GLAPIENTRY *MPGetNativeDisplay)(const char *name); + void *(GLAPIENTRY * MPGetNativeDisplay) (const char *name); }; #endif /* MPLAYER_GL_COMMON_H */ diff --git a/config.h b/config.h index 7f016e8..5d300dd 100644 --- a/config.h +++ b/config.h @@ -1,3 +1,4 @@ + /* * This file is part of libplacebo. * @@ -8,7 +9,7 @@ * * libplacebo is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public @@ -39,5 +40,4 @@ #define PL_HAVE_SHADERC 0 #define PL_HAVE_VULKAN 1 - #endif // LIBPLACEBO_CONTEXT_H_ diff --git a/drvapi_error_string.h b/drvapi_error_string.h index 3c07b4f..485f9be 100644 --- a/drvapi_error_string.h +++ b/drvapi_error_string.h @@ -1,3 +1,4 @@ + /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * @@ -16,88 +17,91 @@ #include #include -#ifdef __cuda_cuda_h__ // check to see if CUDA_H is included above +#ifdef __cuda_cuda_h__ // check to see if CUDA_H is included above // Error Code string definitions here typedef struct { char const *error_string; - unsigned int error_id; + unsigned int error_id; } s_CudaErrorStr; /** * Error codes */ -s_CudaErrorStr sCudaDrvErrorString[] = -{ +s_CudaErrorStr sCudaDrvErrorString[] = { + /** * The API call returned with no errors. In the case of query calls, this * can also mean that the operation being queried is complete (see * ::cuEventQuery() and ::cuStreamQuery()). */ - { "CUDA_SUCCESS", 0 }, + {"CUDA_SUCCESS", 0}, /** * This indicates that one or more of the parameters passed to the API call * is not within an acceptable range of values. */ - { "CUDA_ERROR_INVALID_VALUE", 1 }, + {"CUDA_ERROR_INVALID_VALUE", 1}, /** * The API call failed because it was unable to allocate enough memory to * perform the requested operation. */ - { "CUDA_ERROR_OUT_OF_MEMORY", 2 }, + {"CUDA_ERROR_OUT_OF_MEMORY", 2}, /** * This indicates that the CUDA driver has not been initialized with * ::cuInit() or that initialization has failed. */ - { "CUDA_ERROR_NOT_INITIALIZED", 3 }, + {"CUDA_ERROR_NOT_INITIALIZED", 3}, /** * This indicates that the CUDA driver is in the process of shutting down. */ - { "CUDA_ERROR_DEINITIALIZED", 4 }, + {"CUDA_ERROR_DEINITIALIZED", 4}, /** * This indicates profiling APIs are called while application is running * in visual profiler mode. */ - { "CUDA_ERROR_PROFILER_DISABLED", 5 }, + {"CUDA_ERROR_PROFILER_DISABLED", 5}, + /** * This indicates profiling has not been initialized for this context. * Call cuProfilerInitialize() to resolve this. */ - { "CUDA_ERROR_PROFILER_NOT_INITIALIZED", 6 }, + {"CUDA_ERROR_PROFILER_NOT_INITIALIZED", 6}, + /** * This indicates profiler has already been started and probably * cuProfilerStart() is incorrectly called. */ - { "CUDA_ERROR_PROFILER_ALREADY_STARTED", 7 }, + {"CUDA_ERROR_PROFILER_ALREADY_STARTED", 7}, + /** * This indicates profiler has already been stopped and probably * cuProfilerStop() is incorrectly called. */ - { "CUDA_ERROR_PROFILER_ALREADY_STOPPED", 8 }, + {"CUDA_ERROR_PROFILER_ALREADY_STOPPED", 8}, + /** * This indicates that no CUDA-capable devices were detected by the installed * CUDA driver. */ - { "CUDA_ERROR_NO_DEVICE (no CUDA-capable devices were detected)", 100 }, + {"CUDA_ERROR_NO_DEVICE (no CUDA-capable devices were detected)", 100}, /** * This indicates that the device ordinal supplied by the user does not * correspond to a valid CUDA device. */ - { "CUDA_ERROR_INVALID_DEVICE (device specified is not a valid CUDA device)", 101 }, - + {"CUDA_ERROR_INVALID_DEVICE (device specified is not a valid CUDA device)", 101}, /** * This indicates that the device kernel image is invalid. This can also * indicate an invalid CUDA module. */ - { "CUDA_ERROR_INVALID_IMAGE", 200 }, + {"CUDA_ERROR_INVALID_IMAGE", 200}, /** * This most frequently indicates that there is no context bound to the @@ -107,7 +111,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * mixes different API versions (i.e. 3010 context with 3020 API calls). * See ::cuCtxGetApiVersion() for more details. */ - { "CUDA_ERROR_INVALID_CONTEXT", 201 }, + {"CUDA_ERROR_INVALID_CONTEXT", 201}, /** * This indicated that the context being supplied as a parameter to the @@ -116,28 +120,28 @@ s_CudaErrorStr sCudaDrvErrorString[] = * This error return is deprecated as of CUDA 3.2. It is no longer an * error to attempt to push the active context via ::cuCtxPushCurrent(). */ - { "CUDA_ERROR_CONTEXT_ALREADY_CURRENT", 202 }, + {"CUDA_ERROR_CONTEXT_ALREADY_CURRENT", 202}, /** * This indicates that a map or register operation has failed. */ - { "CUDA_ERROR_MAP_FAILED", 205 }, + {"CUDA_ERROR_MAP_FAILED", 205}, /** * This indicates that an unmap or unregister operation has failed. */ - { "CUDA_ERROR_UNMAP_FAILED", 206 }, + {"CUDA_ERROR_UNMAP_FAILED", 206}, /** * This indicates that the specified array is currently mapped and thus * cannot be destroyed. */ - { "CUDA_ERROR_ARRAY_IS_MAPPED", 207 }, + {"CUDA_ERROR_ARRAY_IS_MAPPED", 207}, /** * This indicates that the resource is already mapped. */ - { "CUDA_ERROR_ALREADY_MAPPED", 208 }, + {"CUDA_ERROR_ALREADY_MAPPED", 208}, /** * This indicates that there is no kernel image available that is suitable @@ -145,115 +149,112 @@ s_CudaErrorStr sCudaDrvErrorString[] = * options for a particular CUDA source file that do not include the * corresponding device configuration. */ - { "CUDA_ERROR_NO_BINARY_FOR_GPU", 209 }, + {"CUDA_ERROR_NO_BINARY_FOR_GPU", 209}, /** * This indicates that a resource has already been acquired. */ - { "CUDA_ERROR_ALREADY_ACQUIRED", 210 }, + {"CUDA_ERROR_ALREADY_ACQUIRED", 210}, /** * This indicates that a resource is not mapped. */ - { "CUDA_ERROR_NOT_MAPPED", 211 }, + {"CUDA_ERROR_NOT_MAPPED", 211}, /** * This indicates that a mapped resource is not available for access as an * array. */ - { "CUDA_ERROR_NOT_MAPPED_AS_ARRAY", 212 }, + {"CUDA_ERROR_NOT_MAPPED_AS_ARRAY", 212}, /** * This indicates that a mapped resource is not available for access as a * pointer. */ - { "CUDA_ERROR_NOT_MAPPED_AS_POINTER", 213 }, + {"CUDA_ERROR_NOT_MAPPED_AS_POINTER", 213}, /** * This indicates that an uncorrectable ECC error was detected during * execution. */ - { "CUDA_ERROR_ECC_UNCORRECTABLE", 214 }, + {"CUDA_ERROR_ECC_UNCORRECTABLE", 214}, /** * This indicates that the ::CUlimit passed to the API call is not * supported by the active device. */ - { "CUDA_ERROR_UNSUPPORTED_LIMIT", 215 }, + {"CUDA_ERROR_UNSUPPORTED_LIMIT", 215}, /** * This indicates that the ::CUcontext passed to the API call can * only be bound to a single CPU thread at a time but is already * bound to a CPU thread. */ - { "CUDA_ERROR_CONTEXT_ALREADY_IN_USE", 216 }, + {"CUDA_ERROR_CONTEXT_ALREADY_IN_USE", 216}, /** * This indicates that peer access is not supported across the given * devices. */ - { "CUDA_ERROR_PEER_ACCESS_UNSUPPORTED", 217 }, + {"CUDA_ERROR_PEER_ACCESS_UNSUPPORTED", 217}, /** * This indicates that a PTX JIT compilation failed. */ - { "CUDA_ERROR_INVALID_PTX", 218 }, + {"CUDA_ERROR_INVALID_PTX", 218}, /** * This indicates an error with OpenGL or DirectX context. */ - { "CUDA_ERROR_INVALID_GRAPHICS_CONTEXT", 219 }, + {"CUDA_ERROR_INVALID_GRAPHICS_CONTEXT", 219}, /** * This indicates that an uncorrectable NVLink error was detected during the * execution. */ - { "CUDA_ERROR_NVLINK_UNCORRECTABLE", 220 }, + {"CUDA_ERROR_NVLINK_UNCORRECTABLE", 220}, /** * This indicates that the PTX JIT compiler library was not found. */ - { "CUDA_ERROR_JIT_COMPILER_NOT_FOUND", 221 }, + {"CUDA_ERROR_JIT_COMPILER_NOT_FOUND", 221}, /** * This indicates that the device kernel source is invalid. */ - { "CUDA_ERROR_INVALID_SOURCE", 300 }, + {"CUDA_ERROR_INVALID_SOURCE", 300}, /** * This indicates that the file specified was not found. */ - { "CUDA_ERROR_FILE_NOT_FOUND", 301 }, + {"CUDA_ERROR_FILE_NOT_FOUND", 301}, /** * This indicates that a link to a shared object failed to resolve. */ - { "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND", 302 }, + {"CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND", 302}, /** * This indicates that initialization of a shared object failed. */ - { "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED", 303 }, + {"CUDA_ERROR_SHARED_OBJECT_INIT_FAILED", 303}, /** * This indicates that an OS call failed. */ - { "CUDA_ERROR_OPERATING_SYSTEM", 304 }, - + {"CUDA_ERROR_OPERATING_SYSTEM", 304}, /** * This indicates that a resource handle passed to the API call was not * valid. Resource handles are opaque types like ::CUstream and ::CUevent. */ - { "CUDA_ERROR_INVALID_HANDLE", 400 }, - + {"CUDA_ERROR_INVALID_HANDLE", 400}, /** * This indicates that a named symbol was not found. Examples of symbols * are global/constant variable names, texture names }, and surface names. */ - { "CUDA_ERROR_NOT_FOUND", 500 }, - + {"CUDA_ERROR_NOT_FOUND", 500}, /** * This indicates that asynchronous operations issued previously have not @@ -261,8 +262,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * differently than ::CUDA_SUCCESS (which indicates completion). Calls that * may return this value include ::cuEventQuery() and ::cuStreamQuery(). */ - { "CUDA_ERROR_NOT_READY", 600 }, - + {"CUDA_ERROR_NOT_READY", 600}, /** * While executing a kernel, the device encountered a @@ -271,7 +271,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * will return the same error. To continue using CUDA, the process must be terminated * and relaunched. */ - { "CUDA_ERROR_ILLEGAL_ADDRESS", 700 }, + {"CUDA_ERROR_ILLEGAL_ADDRESS", 700}, /** * This indicates that a launch did not occur because it did not have @@ -282,7 +282,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * when a 32-bit int is expected) is equivalent to passing too many * arguments and can also result in this error. */ - { "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES", 701 }, + {"CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES", 701}, /** * This indicates that the device kernel took too long to execute. This can @@ -293,40 +293,40 @@ s_CudaErrorStr sCudaDrvErrorString[] = * this context are invalid and must be reconstructed if the program is to * continue using CUDA. */ - { "CUDA_ERROR_LAUNCH_TIMEOUT", 702 }, + {"CUDA_ERROR_LAUNCH_TIMEOUT", 702}, /** * This error indicates a kernel launch that uses an incompatible texturing * mode. */ - { "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING", 703 }, + {"CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING", 703}, /** * This error indicates that a call to ::cuCtxEnablePeerAccess() is * trying to re-enable peer access to a context which has already * had peer access to it enabled. */ - { "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED", 704 }, + {"CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED", 704}, /** * This error indicates that ::cuCtxDisablePeerAccess() is * trying to disable peer access which has not been enabled yet * via ::cuCtxEnablePeerAccess(). */ - { "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED", 705 }, + {"CUDA_ERROR_PEER_ACCESS_NOT_ENABLED", 705}, /** * This error indicates that the primary context for the specified device * has already been initialized. */ - { "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE", 708 }, + {"CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE", 708}, /** * This error indicates that the context current to the calling thread * has been destroyed using ::cuCtxDestroy }, or is a primary context which * has not yet been initialized. */ - { "CUDA_ERROR_CONTEXT_IS_DESTROYED", 709 }, + {"CUDA_ERROR_CONTEXT_IS_DESTROYED", 709}, /** * A device-side assert triggered during kernel execution. The context @@ -334,26 +334,26 @@ s_CudaErrorStr sCudaDrvErrorString[] = * memory allocations from this context are invalid and must be * reconstructed if the program is to continue using CUDA. */ - { "CUDA_ERROR_ASSERT", 710 }, + {"CUDA_ERROR_ASSERT", 710}, /** * This error indicates that the hardware resources required to enable * peer access have been exhausted for one or more of the devices * passed to ::cuCtxEnablePeerAccess(). */ - { "CUDA_ERROR_TOO_MANY_PEERS", 711 }, + {"CUDA_ERROR_TOO_MANY_PEERS", 711}, /** * This error indicates that the memory range passed to ::cuMemHostRegister() * has already been registered. */ - { "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", 712 }, + {"CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", 712}, /** * This error indicates that the pointer passed to ::cuMemHostUnregister() * does not correspond to any currently registered memory region. */ - { "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED", 713 }, + {"CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED", 713}, /** * While executing a kernel, the device encountered a stack error. @@ -362,7 +362,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * will return the same error. To continue using CUDA, the process must be terminated * and relaunched. */ - { "CUDA_ERROR_HARDWARE_STACK_ERROR", 714 }, + {"CUDA_ERROR_HARDWARE_STACK_ERROR", 714}, /** * While executing a kernel, the device encountered an illegal instruction. @@ -370,7 +370,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * will return the same error. To continue using CUDA, the process must be terminated * and relaunched. */ - { "CUDA_ERROR_ILLEGAL_INSTRUCTION", 715 }, + {"CUDA_ERROR_ILLEGAL_INSTRUCTION", 715}, /** * While executing a kernel, the device encountered a load or store instruction @@ -379,7 +379,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * will return the same error. To continue using CUDA, the process must be terminated * and relaunched. */ - { "CUDA_ERROR_MISALIGNED_ADDRESS", 716 }, + {"CUDA_ERROR_MISALIGNED_ADDRESS", 716}, /** * While executing a kernel, the device encountered an instruction @@ -390,7 +390,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * will return the same error. To continue using CUDA, the process must be terminated * and relaunched. */ - { "CUDA_ERROR_INVALID_ADDRESS_SPACE", 717 }, + {"CUDA_ERROR_INVALID_ADDRESS_SPACE", 717}, /** * While executing a kernel, the device program counter wrapped its address space. @@ -398,7 +398,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * will return the same error. To continue using CUDA, the process must be terminated * and relaunched. */ - { "CUDA_ERROR_INVALID_PC", 718 }, + {"CUDA_ERROR_INVALID_PC", 718}, /** * An exception occurred on the device while executing a kernel. Common @@ -408,7 +408,7 @@ s_CudaErrorStr sCudaDrvErrorString[] = * memory allocations from this context are invalid and must be * reconstructed if the program is to continue using CUDA. */ - { "CUDA_ERROR_LAUNCH_FAILED", 719 }, + {"CUDA_ERROR_LAUNCH_FAILED", 719}, /** * This error indicates that the number of blocks launched per grid for a kernel that was @@ -417,26 +417,24 @@ s_CudaErrorStr sCudaDrvErrorString[] = * or ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. */ - { "CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE", 720 }, - + {"CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE", 720}, /** * This error indicates that the attempted operation is not permitted. */ - { "CUDA_ERROR_NOT_PERMITTED", 800 }, + {"CUDA_ERROR_NOT_PERMITTED", 800}, /** * This error indicates that the attempted operation is not supported * on the current system or device. */ - { "CUDA_ERROR_NOT_SUPPORTED", 801 }, - + {"CUDA_ERROR_NOT_SUPPORTED", 801}, /** * This indicates that an unknown internal error has occurred. */ - { "CUDA_ERROR_UNKNOWN", 999 }, - { NULL, -1 } + {"CUDA_ERROR_UNKNOWN", 999}, + {NULL, -1} }; // This is just a linear search through the array, since the error_id's are not @@ -445,9 +443,7 @@ static inline const char *getCudaDrvErrorString(CUresult error_id) { int index = 0; - while (sCudaDrvErrorString[index].error_id != error_id && - (int)sCudaDrvErrorString[index].error_id != -1) - { + while (sCudaDrvErrorString[index].error_id != error_id && (int)sCudaDrvErrorString[index].error_id != -1) { index++; } @@ -459,5 +455,4 @@ static inline const char *getCudaDrvErrorString(CUresult error_id) #endif // __cuda_cuda_h__ - #endif diff --git a/iatomic.h b/iatomic.h index f0021fb..aa62922 100644 --- a/iatomic.h +++ b/iatomic.h @@ -1,23 +1,23 @@ /// -/// @file iatomic.h @brief Misc function header file +/// @file iatomic.h @brief Misc function header file /// -/// Copyright (c) 2014 by Johns. All Rights Reserved. +/// Copyright (c) 2014 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: 59e1684aaa6678ecdebb6ce0df6ce5b5f461dd6e $ +/// $Id: 59e1684aaa6678ecdebb6ce0df6ce5b5f461dd6e $ ////////////////////////////////////////////////////////////////////////////// /// @addtogroup iatomic @@ -27,8 +27,8 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) -// gcc before 4.7 didn't support atomic builtins, -// use alsa atomic functions. +// gcc before 4.7 didn't support atomic builtins, +// use alsa atomic functions. #if GCC_VERSION < 40700 #include @@ -36,58 +36,58 @@ #else ////////////////////////////////////////////////////////////////////////////// -// Defines +// Defines ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// -// Declares +// Declares ////////////////////////////////////////////////////////////////////////////// /// -/// atomic type, 24 bit useable, +/// atomic type, 24 bit useable, /// typedef volatile int atomic_t; ////////////////////////////////////////////////////////////////////////////// -// Prototypes +// Prototypes ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// -// Inlines +// Inlines ////////////////////////////////////////////////////////////////////////////// /// -/// Set atomic value. +/// Set atomic value. /// #define atomic_set(ptr, val) \ __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST) /// -/// Read atomic value. +/// Read atomic value. /// #define atomic_read(ptr) \ __atomic_load_n(ptr, __ATOMIC_SEQ_CST) /// -/// Increment atomic value. +/// Increment atomic value. /// #define atomic_inc(ptr) \ __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) /// -/// Decrement atomic value. +/// Decrement atomic value. /// #define atomic_dec(ptr) \ __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) /// -/// Add to atomic value. +/// Add to atomic value. /// #define atomic_add(val, ptr) \ __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST) /// -/// Subtract from atomic value. +/// Subtract from atomic value. /// #define atomic_sub(val, ptr) \ __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST) diff --git a/misc.h b/misc.h index de29cc5..075ec12 100644 --- a/misc.h +++ b/misc.h @@ -1,24 +1,24 @@ /// -/// @file misc.h @brief Misc function header file +/// @file misc.h @brief Misc function header file /// -/// Copyright (c) 2009 - 2012 by Lutz Sammer. All Rights Reserved. +/// Copyright (c) 2009 - 2012 by Lutz Sammer. All Rights Reserved. /// -/// Contributor(s): -/// Copied from uwm. +/// Contributor(s): +/// Copied from uwm. /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: f5ff4b300aa33eb721d658c0c9374c8499b67318 $ +/// $Id: f5ff4b300aa33eb721d658c0c9374c8499b67318 $ ////////////////////////////////////////////////////////////////////////////// /// @addtogroup misc @@ -26,37 +26,37 @@ #include #include -#include // clock_gettime +#include // clock_gettime ////////////////////////////////////////////////////////////////////////////// -// Defines +// Defines ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// -// Declares +// Declares ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// -// Variables +// Variables ////////////////////////////////////////////////////////////////////////////// -extern int SysLogLevel; ///< how much information wanted +extern int SysLogLevel; ///< how much information wanted ////////////////////////////////////////////////////////////////////////////// -// Prototypes +// Prototypes ////////////////////////////////////////////////////////////////////////////// static inline void Syslog(const int, const char *format, ...) - __attribute__ ((format(printf, 2, 3))); + __attribute__((format(printf, 2, 3))); ////////////////////////////////////////////////////////////////////////////// -// Inlines +// Inlines ////////////////////////////////////////////////////////////////////////////// #ifdef DEBUG -#define DebugLevel 4 /// private debug level +#define DebugLevel 4 /// private debug level #else -#define DebugLevel 0 /// private debug level +#define DebugLevel 0 /// private debug level #endif /** @@ -70,11 +70,11 @@ static inline void Syslog(const int, const char *format, ...) static inline void Syslog(const int level, const char *format, ...) { if (SysLogLevel > level || DebugLevel > level) { - va_list ap; + va_list ap; - va_start(ap, format); - vsyslog(LOG_ERR, format, ap); - va_end(ap); + va_start(ap, format); + vsyslog(LOG_ERR, format, ap); + va_end(ap); } } @@ -104,7 +104,7 @@ static inline void Syslog(const int level, const char *format, ...) #ifdef DEBUG #define Debug(level, fmt...) Syslog(level, fmt) #else -#define Debug(level, fmt...) /* disabled */ +#define Debug(level, fmt...) /* disabled */ #endif #ifndef AV_NOPTS_VALUE @@ -122,12 +122,11 @@ static inline const char *Timestamp2String(int64_t ts) static int idx; if (ts == (int64_t) AV_NOPTS_VALUE) { - return "--:--:--.---"; + return "--:--:--.---"; } idx = (idx + 1) % 3; - snprintf(buf[idx], sizeof(buf[idx]), "%2d:%02d:%02d.%03d", - (int)(ts / (90 * 3600000)), (int)((ts / (90 * 60000)) % 60), - (int)((ts / (90 * 1000)) % 60), (int)((ts / 90) % 1000)); + snprintf(buf[idx], sizeof(buf[idx]), "%2d:%02d:%02d.%03d", (int)(ts / (90 * 3600000)), + (int)((ts / (90 * 60000)) % 60), (int)((ts / (90 * 1000)) % 60), (int)((ts / 90) % 1000)); return buf[idx]; } @@ -148,24 +147,25 @@ static inline uint32_t GetMsTicks(void) struct timeval tval; if (gettimeofday(&tval, NULL) < 0) { - return 0; + return 0; } return (tval.tv_sec * 1000) + (tval.tv_usec / 1000); #endif } + static inline uint64_t GetusTicks(void) { - + #ifdef CLOCK_MONOTONIC struct timespec tspec; clock_gettime(CLOCK_MONOTONIC, &tspec); - return (uint64_t) (tspec.tv_sec * 1000000) + (tspec.tv_nsec) ; + return (uint64_t) (tspec.tv_sec * 1000000) + (tspec.tv_nsec); #else struct timeval tval; if (gettimeofday(&tval, NULL) < 0) { - return 0; + return 0; } return (tval.tv_sec * 1000) + (tval.tv_usec / 1000); #endif diff --git a/openglosd.cpp b/openglosd.cpp index c69af55..27adcd3 100644 --- a/openglosd.cpp +++ b/openglosd.cpp @@ -616,6 +616,7 @@ void cOglFb::BindWrite(void) { } void cOglFb::Unbind(void) { + glFinish(); glBindFramebuffer(GL_FRAMEBUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); } diff --git a/openglosd.h b/openglosd.h index b1c803a..0dc6d88 100644 --- a/openglosd.h +++ b/openglosd.h @@ -17,25 +17,21 @@ #include FT_STROKER_H #undef __FTERRORS_H__ -#define FT_ERRORDEF( e, v, s ) { e, s }, -#define FT_ERROR_START_LIST { -#define FT_ERROR_END_LIST { 0, 0 } }; -const struct { - int code; - const char* message; +#define FT_ERRORDEF( e, v, s ) { e, s }, +#define FT_ERROR_START_LIST { +#define FT_ERROR_END_LIST { 0, 0 } }; +const struct +{ + int code; + const char *message; } FT_Errors[] = #include FT_ERRORS_H - - #include #include - #include #include #include - #include "softhddev.h" - extern "C" { #include @@ -49,7 +45,8 @@ extern "C" extern "C" pthread_mutex_t OSDMutex; -struct sOglImage { +struct sOglImage +{ GLuint texture; GLint width; GLint height; @@ -60,69 +57,97 @@ struct sOglImage { * Helpers ****************************************************************************************/ -void ConvertColor(const GLint &colARGB, glm::vec4 &col); +void ConvertColor(const GLint & colARGB, glm::vec4 & col); /**************************************************************************************** * cShader ****************************************************************************************/ -enum eShaderType { +enum eShaderType +{ stRect, stTexture, stText, stCount }; -class cShader { -private: +class cShader +{ + private: eShaderType type; GLuint id; bool Compile(const char *vertexCode, const char *fragmentCode); bool CheckCompileErrors(GLuint object, bool program = false); -public: - cShader(void) {}; - virtual ~cShader(void) {}; + public: + cShader(void) + { + }; + virtual ~ cShader(void) + { + }; bool Load(eShaderType type); void Use(void); - void SetFloat (const GLchar *name, GLfloat value); - void SetInteger (const GLchar *name, GLint value); - void SetVector2f (const GLchar *name, GLfloat x, GLfloat y); - void SetVector3f (const GLchar *name, GLfloat x, GLfloat y, GLfloat z); - void SetVector4f (const GLchar *name, GLfloat x, GLfloat y, GLfloat z, GLfloat w); - void SetMatrix4 (const GLchar *name, const glm::mat4 &matrix); + void SetFloat(const GLchar * name, GLfloat value); + void SetInteger(const GLchar * name, GLint value); + void SetVector2f(const GLchar * name, GLfloat x, GLfloat y); + void SetVector3f(const GLchar * name, GLfloat x, GLfloat y, GLfloat z); + void SetVector4f(const GLchar * name, GLfloat x, GLfloat y, GLfloat z, GLfloat w); + void SetMatrix4(const GLchar * name, const glm::mat4 & matrix); }; /**************************************************************************************** * cOglGlyph ****************************************************************************************/ -class cOglGlyph : public cListObject { -private: - struct tKerning { - public: - tKerning(uint prevSym, GLfloat kerning = 0.0f) { - this->prevSym = prevSym; - this->kerning = kerning; - } - uint prevSym; - GLfloat kerning; +class cOglGlyph:public cListObject +{ + private: + struct tKerning + { + public: + tKerning(uint prevSym, GLfloat kerning = 0.0f) { + this->prevSym = prevSym; + this->kerning = kerning; + } + uint prevSym; + GLfloat kerning; }; uint charCode; int bearingLeft; int bearingTop; int width; int height; - int advanceX; - cVector kerningCache; + int advanceX; + + cVector < tKerning > kerningCache; GLuint texture; void LoadTexture(FT_BitmapGlyph ftGlyph); -public: + + public: cOglGlyph(uint charCode, FT_BitmapGlyph ftGlyph); - virtual ~cOglGlyph(); - uint CharCode(void) { return charCode; } - int AdvanceX(void) { return advanceX; } - int BearingLeft(void) const { return bearingLeft; } - int BearingTop(void) const { return bearingTop; } - int Width(void) const { return width; } - int Height(void) const { return height; } + virtual ~ cOglGlyph(); + uint CharCode(void) + { + return charCode; + } + int AdvanceX(void) + { + return advanceX; + } + int BearingLeft(void) const + { + return bearingLeft; + } + int BearingTop(void) const + { + return bearingTop; + } + int Width(void) const + { + return width; + } + int Height(void) const + { + return height; + } int GetKerningCache(uint prevSym); void SetKerningCache(uint prevSym, int kerning); void BindTexture(void); @@ -131,8 +156,9 @@ public: /**************************************************************************************** * cOglFont ****************************************************************************************/ -class cOglFont : public cListObject { -private: +class cOglFont:public cListObject +{ + private: static bool initiated; cString name; int size; @@ -140,41 +166,57 @@ private: int bottom; static FT_Library ftLib; FT_Face face; - static cList *fonts; - mutable cList glyphCache; - cOglFont(const char *fontName, int charHeight); + static cList < cOglFont > *fonts; + mutable cList < cOglGlyph > glyphCache; + cOglFont(const char *fontName, int charHeight); static void Init(void); -public: - virtual ~cOglFont(void); + public: + virtual ~ cOglFont(void); static cOglFont *Get(const char *name, int charHeight); static void Cleanup(void); - const char *Name(void) { return *name; }; - int Size(void) { return size; }; - int Bottom(void) {return bottom; }; - int Height(void) {return height; }; - cOglGlyph* Glyph(uint charCode) const; - int Kerning(cOglGlyph *glyph, uint prevSym) const; + const char *Name(void) + { + return *name; + }; + int Size(void) + { + return size; + }; + int Bottom(void) + { + return bottom; + }; + int Height(void) + { + return height; + }; + cOglGlyph *Glyph(uint charCode) const; + int Kerning(cOglGlyph * glyph, uint prevSym) const; }; /**************************************************************************************** * cOglFb * Framebuffer Object - OpenGL part of a Pixmap ****************************************************************************************/ -class cOglFb { -protected: +class cOglFb +{ + protected: bool initiated; // GLuint fb; // GLuint texture; GLint width, height; GLint viewPortWidth, viewPortHeight; bool scrollable; -public: - GLuint fb; - GLuint texture; - - cOglFb(GLint width, GLint height, GLint viewPortWidth, GLint viewPortHeight); - virtual ~cOglFb(void); - bool Initiated(void) { return initiated; } + public: + GLuint fb; + GLuint texture; + + cOglFb(GLint width, GLint height, GLint viewPortWidth, GLint viewPortHeight); + virtual ~ cOglFb(void); + bool Initiated(void) + { + return initiated; + } virtual bool Init(void); void Bind(void); void BindRead(void); @@ -182,27 +224,43 @@ public: virtual void Unbind(void); bool BindTexture(void); void Blit(GLint destX1, GLint destY1, GLint destX2, GLint destY2); - GLint Width(void) { return width; }; - GLint Height(void) { return height; }; - bool Scrollable(void) { return scrollable; }; - GLint ViewportWidth(void) { return viewPortWidth; }; - GLint ViewportHeight(void) { return viewPortHeight; }; + GLint Width(void) + { + return width; + }; + GLint Height(void) + { + return height; + }; + bool Scrollable(void) + { + return scrollable; + }; + GLint ViewportWidth(void) + { + return viewPortWidth; + }; + GLint ViewportHeight(void) + { + return viewPortHeight; + }; }; /**************************************************************************************** * cOglOutputFb * Output Framebuffer Object - holds Vdpau Output Surface which is our "output framebuffer" ****************************************************************************************/ -class cOglOutputFb : public cOglFb { -protected: +class cOglOutputFb:public cOglFb +{ + protected: bool initiated; -private: - GLvdpauSurfaceNV surface; -public: - GLuint fb; - GLuint texture; - cOglOutputFb(GLint width, GLint height); - virtual ~cOglOutputFb(void); + private: + GLvdpauSurfaceNV surface; + public: + GLuint fb; + GLuint texture; + cOglOutputFb(GLint width, GLint height); + virtual ~ cOglOutputFb(void); virtual bool Init(void); virtual void BindWrite(void); virtual void Unbind(void); @@ -212,7 +270,8 @@ public: * cOglVb * Vertex Buffer - OpenGl Vertices for the different drawing commands ****************************************************************************************/ -enum eVertexBufferType { +enum eVertexBufferType +{ vbRect, vbEllipse, vbSlope, @@ -221,8 +280,9 @@ enum eVertexBufferType { vbCount }; -class cOglVb { -private: +class cOglVb +{ + private: eVertexBufferType type; eShaderType shader; GLuint vao; @@ -231,9 +291,9 @@ private: int sizeVertex2; int numVertices; GLuint drawMode; -public: - cOglVb(int type); - virtual ~cOglVb(void); + public: + cOglVb(int type); + virtual ~ cOglVb(void); bool Init(void); void Bind(void); void Unbind(void); @@ -243,184 +303,271 @@ public: void SetShaderColor(GLint color); void SetShaderAlpha(GLint alpha); void SetShaderProjectionMatrix(GLint width, GLint height); - void SetVertexData(GLfloat *vertices, int count = 0); + void SetVertexData(GLfloat * vertices, int count = 0); void DrawArrays(int count = 0); }; /**************************************************************************************** * cOpenGLCmd ****************************************************************************************/ -class cOglCmd { -protected: - cOglFb *fb; -public: - cOglCmd(cOglFb *fb) { this->fb = fb; }; - virtual ~cOglCmd(void) {}; - virtual const char* Description(void) = 0; +class cOglCmd +{ + protected: + cOglFb * fb; + public: + cOglCmd(cOglFb * fb) + { + this->fb = fb; + }; + virtual ~ cOglCmd(void) + { + }; + virtual const char *Description(void) = 0; virtual bool Execute(void) = 0; }; -class cOglCmdInitOutputFb : public cOglCmd { -private: - cOglOutputFb *oFb; -public: - cOglCmdInitOutputFb(cOglOutputFb *oFb); - virtual ~cOglCmdInitOutputFb(void) {}; - virtual const char* Description(void) { return "InitOutputFramebuffer"; } +class cOglCmdInitOutputFb:public cOglCmd +{ + private: + cOglOutputFb * oFb; + public: + cOglCmdInitOutputFb(cOglOutputFb * oFb); + virtual ~ cOglCmdInitOutputFb(void) + { + }; + virtual const char *Description(void) + { + return "InitOutputFramebuffer"; + } virtual bool Execute(void); }; -class cOglCmdInitFb : public cOglCmd { -private: - cCondWait *wait; -public: - cOglCmdInitFb(cOglFb *fb, cCondWait *wait = NULL); - virtual ~cOglCmdInitFb(void) {}; - virtual const char* Description(void) { return "InitFramebuffer"; } +class cOglCmdInitFb:public cOglCmd +{ + private: + cCondWait * wait; + public: + cOglCmdInitFb(cOglFb * fb, cCondWait * wait = NULL); + virtual ~ cOglCmdInitFb(void) + { + }; + virtual const char *Description(void) + { + return "InitFramebuffer"; + } virtual bool Execute(void); }; -class cOglCmdDeleteFb : public cOglCmd { -public: - cOglCmdDeleteFb(cOglFb *fb); - virtual ~cOglCmdDeleteFb(void) {}; - virtual const char* Description(void) { return "DeleteFramebuffer"; } +class cOglCmdDeleteFb:public cOglCmd +{ + public: + cOglCmdDeleteFb(cOglFb * fb); + virtual ~ cOglCmdDeleteFb(void) + { + }; + virtual const char *Description(void) + { + return "DeleteFramebuffer"; + } virtual bool Execute(void); }; -class cOglCmdRenderFbToBufferFb : public cOglCmd { -private: - cOglFb *buffer; +class cOglCmdRenderFbToBufferFb:public cOglCmd +{ + private: + cOglFb * buffer; GLfloat x, y; GLfloat drawPortX, drawPortY; GLint transparency; -public: - cOglCmdRenderFbToBufferFb(cOglFb *fb, cOglFb *buffer, GLint x, GLint y, GLint transparency, GLint drawPortX, GLint drawPortY); - virtual ~cOglCmdRenderFbToBufferFb(void) {}; - virtual const char* Description(void) { return "Render Framebuffer to Buffer"; } + public: + cOglCmdRenderFbToBufferFb(cOglFb * fb, cOglFb * buffer, GLint x, GLint y, GLint transparency, GLint drawPortX, + GLint drawPortY); + virtual ~ cOglCmdRenderFbToBufferFb(void) + { + }; + virtual const char *Description(void) + { + return "Render Framebuffer to Buffer"; + } virtual bool Execute(void); }; -class cOglCmdCopyBufferToOutputFb : public cOglCmd { -private: - cOglOutputFb *oFb; +class cOglCmdCopyBufferToOutputFb:public cOglCmd +{ + private: + cOglOutputFb * oFb; GLint x, y; -public: - cOglCmdCopyBufferToOutputFb(cOglFb *fb, cOglOutputFb *oFb, GLint x, GLint y); - virtual ~cOglCmdCopyBufferToOutputFb(void) {}; - virtual const char* Description(void) { return "Copy buffer to OutputFramebuffer"; } + public: + cOglCmdCopyBufferToOutputFb(cOglFb * fb, cOglOutputFb * oFb, GLint x, GLint y); + virtual ~ cOglCmdCopyBufferToOutputFb(void) + { + }; + virtual const char *Description(void) + { + return "Copy buffer to OutputFramebuffer"; + } virtual bool Execute(void); }; -class cOglCmdFill : public cOglCmd { -private: +class cOglCmdFill:public cOglCmd +{ + private: GLint color; -public: - cOglCmdFill(cOglFb *fb, GLint color); - virtual ~cOglCmdFill(void) {}; - virtual const char* Description(void) { return "Fill"; } + public: + cOglCmdFill(cOglFb * fb, GLint color); + virtual ~ cOglCmdFill(void) + { + }; + virtual const char *Description(void) + { + return "Fill"; + } virtual bool Execute(void); }; -class cOglCmdDrawRectangle : public cOglCmd { -private: +class cOglCmdDrawRectangle:public cOglCmd +{ + private: GLint x, y; GLint width, height; GLint color; -public: - cOglCmdDrawRectangle(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color); - virtual ~cOglCmdDrawRectangle(void) {}; - virtual const char* Description(void) { return "DrawRectangle"; } + public: + cOglCmdDrawRectangle(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color); + virtual ~ cOglCmdDrawRectangle(void) + { + }; + virtual const char *Description(void) + { + return "DrawRectangle"; + } virtual bool Execute(void); }; -class cOglCmdDrawEllipse : public cOglCmd { -private: +class cOglCmdDrawEllipse:public cOglCmd +{ + private: GLint x, y; GLint width, height; GLint color; GLint quadrants; GLfloat *CreateVerticesFull(int &numVertices); GLfloat *CreateVerticesQuadrant(int &numVertices); - GLfloat *CreateVerticesHalf(int &numVertices); -public: - cOglCmdDrawEllipse(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint quadrants); - virtual ~cOglCmdDrawEllipse(void) {}; - virtual const char* Description(void) { return "DrawEllipse"; } + GLfloat *CreateVerticesHalf(int &numVertices); + public: + cOglCmdDrawEllipse(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint quadrants); + virtual ~ cOglCmdDrawEllipse(void) + { + }; + virtual const char *Description(void) + { + return "DrawEllipse"; + } virtual bool Execute(void); }; -class cOglCmdDrawSlope : public cOglCmd { -private: +class cOglCmdDrawSlope:public cOglCmd +{ + private: GLint x, y; GLint width, height; GLint color; GLint type; -public: - cOglCmdDrawSlope(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint type); - virtual ~cOglCmdDrawSlope(void) {}; - virtual const char* Description(void) { return "DrawSlope"; } + public: + cOglCmdDrawSlope(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint type); + virtual ~ cOglCmdDrawSlope(void) + { + }; + virtual const char *Description(void) + { + return "DrawSlope"; + } virtual bool Execute(void); }; -class cOglCmdDrawText : public cOglCmd { -private: +class cOglCmdDrawText:public cOglCmd +{ + private: GLint x, y; GLint limitX; GLint colorText; cString fontName; int fontSize; unsigned int *symbols; -public: - cOglCmdDrawText(cOglFb *fb, GLint x, GLint y, unsigned int *symbols, GLint limitX, const char *name, int fontSize, tColor colorText); - virtual ~cOglCmdDrawText(void); - virtual const char* Description(void) { return "DrawText"; } + public: + cOglCmdDrawText(cOglFb * fb, GLint x, GLint y, unsigned int *symbols, GLint limitX, const char *name, + int fontSize, tColor colorText); + virtual ~ cOglCmdDrawText(void); + virtual const char *Description(void) + { + return "DrawText"; + } virtual bool Execute(void); }; -class cOglCmdDrawImage : public cOglCmd { -private: - tColor *argb; +class cOglCmdDrawImage:public cOglCmd +{ + private: + tColor * argb; GLint x, y, width, height; bool overlay; GLfloat scaleX, scaleY; -public: - cOglCmdDrawImage(cOglFb *fb, tColor *argb, GLint width, GLint height, GLint x, GLint y, bool overlay = true, double scaleX = 1.0f, double scaleY = 1.0f); - virtual ~cOglCmdDrawImage(void); - virtual const char* Description(void) { return "Draw Image"; } + public: + cOglCmdDrawImage(cOglFb * fb, tColor * argb, GLint width, GLint height, GLint x, GLint y, bool overlay = + true, double scaleX = 1.0f, double scaleY = 1.0f); + virtual ~ cOglCmdDrawImage(void); + virtual const char *Description(void) + { + return "Draw Image"; + } virtual bool Execute(void); }; -class cOglCmdDrawTexture : public cOglCmd { -private: - sOglImage *imageRef; +class cOglCmdDrawTexture:public cOglCmd +{ + private: + sOglImage * imageRef; GLint x, y; -public: - cOglCmdDrawTexture(cOglFb *fb, sOglImage *imageRef, GLint x, GLint y); - virtual ~cOglCmdDrawTexture(void) {}; - virtual const char* Description(void) { return "Draw Texture"; } + public: + cOglCmdDrawTexture(cOglFb * fb, sOglImage * imageRef, GLint x, GLint y); + virtual ~ cOglCmdDrawTexture(void) + { + }; + virtual const char *Description(void) + { + return "Draw Texture"; + } virtual bool Execute(void); }; -class cOglCmdStoreImage : public cOglCmd { -private: - sOglImage *imageRef; +class cOglCmdStoreImage:public cOglCmd +{ + private: + sOglImage * imageRef; tColor *data; -public: - cOglCmdStoreImage(sOglImage *imageRef, tColor *argb); - virtual ~cOglCmdStoreImage(void); - virtual const char* Description(void) { return "Store Image"; } + public: + cOglCmdStoreImage(sOglImage * imageRef, tColor * argb); + virtual ~ cOglCmdStoreImage(void); + virtual const char *Description(void) + { + return "Store Image"; + } virtual bool Execute(void); }; -class cOglCmdDropImage : public cOglCmd { -private: - sOglImage *imageRef; +class cOglCmdDropImage:public cOglCmd +{ + private: + sOglImage * imageRef; cCondWait *wait; -public: - cOglCmdDropImage(sOglImage *imageRef, cCondWait *wait); - virtual ~cOglCmdDropImage(void) {}; - virtual const char* Description(void) { return "Drop Image"; } + public: + cOglCmdDropImage(sOglImage * imageRef, cCondWait * wait); + virtual ~ cOglCmdDropImage(void) + { + }; + virtual const char *Description(void) + { + return "Drop Image"; + } virtual bool Execute(void); }; @@ -430,12 +577,13 @@ public: #define OGL_MAX_OSDIMAGES 256 #define OGL_CMDQUEUE_SIZE 100 -class cOglThread : public cThread { -private: - cCondWait *startWait; +class cOglThread:public cThread +{ + private: + cCondWait * startWait; cCondWait *wait; bool stalled; - std::queue commands; + std::queue < cOglCmd * >commands; GLint maxTextureSize; sOglImage imageCache[OGL_MAX_OSDIMAGES]; long memCached; @@ -449,73 +597,96 @@ private: void Cleanup(void); int GetFreeSlot(void); void ClearSlot(int slot); -protected: - virtual void Action(void); -public: - cOglThread(cCondWait *startWait, int maxCacheSize); - virtual ~cOglThread(); + protected: + virtual void Action(void); + public: + cOglThread(cCondWait * startWait, int maxCacheSize); + virtual ~ cOglThread(); void Stop(void); - void DoCmd(cOglCmd* cmd); - int StoreImage(const cImage &image); + void DoCmd(cOglCmd * cmd); + int StoreImage(const cImage & image); void DropImageData(int imageHandle); sOglImage *GetImageRef(int slot); - int MaxTextureSize(void) { return maxTextureSize; }; + int MaxTextureSize(void) + { + return maxTextureSize; + }; }; /**************************************************************************************** * cOglPixmap ****************************************************************************************/ -class cOglPixmap : public cPixmap { -private: - cOglFb *fb; - std::shared_ptr oglThread; +class cOglPixmap:public cPixmap +{ + private: + cOglFb * fb; + std::shared_ptr < cOglThread > oglThread; bool dirty; -public: - cOglPixmap(std::shared_ptr oglThread, int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null); - virtual ~cOglPixmap(void); - cOglFb *Fb(void) { return fb; }; - int X(void) { return ViewPort().X(); }; - int Y(void) { return ViewPort().Y(); }; - virtual bool IsDirty(void) { return dirty; } - virtual void SetDirty(bool dirty = true) { this->dirty = dirty; } + public: + cOglPixmap(std::shared_ptr < cOglThread > oglThread, int Layer, const cRect & ViewPort, const cRect & DrawPort = + cRect::Null); + virtual ~ cOglPixmap(void); + cOglFb *Fb(void) + { + return fb; + }; + int X(void) + { + return ViewPort().X(); + }; + int Y(void) + { + return ViewPort().Y(); + }; + virtual bool IsDirty(void) + { + return dirty; + } + virtual void SetDirty(bool dirty = true) { + this->dirty = dirty; + } virtual void SetAlpha(int Alpha); virtual void SetTile(bool Tile); - virtual void SetViewPort(const cRect &Rect); - virtual void SetDrawPortPoint(const cPoint &Point, bool Dirty = true); + virtual void SetViewPort(const cRect & Rect); + virtual void SetDrawPortPoint(const cPoint & Point, bool Dirty = true); virtual void Clear(void); virtual void Fill(tColor Color); - virtual void DrawImage(const cPoint &Point, const cImage &Image); - virtual void DrawImage(const cPoint &Point, int ImageHandle); - virtual void DrawPixel(const cPoint &Point, tColor Color); - virtual void DrawBitmap(const cPoint &Point, const cBitmap &Bitmap, tColor ColorFg = 0, tColor ColorBg = 0, bool Overlay = false); - virtual void DrawText(const cPoint &Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont *Font, int Width = 0, int Height = 0, int Alignment = taDefault); - virtual void DrawRectangle(const cRect &Rect, tColor Color); - virtual void DrawEllipse(const cRect &Rect, tColor Color, int Quadrants = 0); - virtual void DrawSlope(const cRect &Rect, tColor Color, int Type); - virtual void Render(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest); - virtual void Copy(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest); - virtual void Scroll(const cPoint &Dest, const cRect &Source = cRect::Null); - virtual void Pan(const cPoint &Dest, const cRect &Source = cRect::Null); + virtual void DrawImage(const cPoint & Point, const cImage & Image); + virtual void DrawImage(const cPoint & Point, int ImageHandle); + virtual void DrawPixel(const cPoint & Point, tColor Color); + virtual void DrawBitmap(const cPoint & Point, const cBitmap & Bitmap, tColor ColorFg = 0, tColor ColorBg = + 0, bool Overlay = false); + virtual void DrawText(const cPoint & Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont * Font, + int Width = 0, int Height = 0, int Alignment = taDefault); + virtual void DrawRectangle(const cRect & Rect, tColor Color); + virtual void DrawEllipse(const cRect & Rect, tColor Color, int Quadrants = 0); + virtual void DrawSlope(const cRect & Rect, tColor Color, int Type); + virtual void Render(const cPixmap * Pixmap, const cRect & Source, const cPoint & Dest); + virtual void Copy(const cPixmap * Pixmap, const cRect & Source, const cPoint & Dest); + virtual void Scroll(const cPoint & Dest, const cRect & Source = cRect::Null); + virtual void Pan(const cPoint & Dest, const cRect & Source = cRect::Null); }; /****************************************************************************** * cOglOsd ******************************************************************************/ -class cOglOsd : public cOsd { -private: - cOglFb *bFb; - std::shared_ptr oglThread; - cVector oglPixmaps; +class cOglOsd:public cOsd +{ + private: + cOglFb * bFb; + std::shared_ptr < cOglThread > oglThread; + cVector < cOglPixmap * >oglPixmaps; bool isSubtitleOsd; -protected: -public: - cOglOsd(int Left, int Top, uint Level, std::shared_ptr oglThread); - virtual ~cOglOsd(); - virtual eOsdError SetAreas(const tArea *Areas, int NumAreas); - virtual cPixmap *CreatePixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null); - virtual void DestroyPixmap(cPixmap *Pixmap); + protected: + public: + cOglOsd(int Left, int Top, uint Level, std::shared_ptr < cOglThread > oglThread); + virtual ~ cOglOsd(); + virtual eOsdError SetAreas(const tArea * Areas, int NumAreas); + virtual cPixmap *CreatePixmap(int Layer, const cRect & ViewPort, const cRect & DrawPort = cRect::Null); + virtual void DestroyPixmap(cPixmap * Pixmap); virtual void Flush(void); - virtual void DrawScaledBitmap(int x, int y, const cBitmap &Bitmap, double FactorX, double FactorY, bool AntiAlias = false); + virtual void DrawScaledBitmap(int x, int y, const cBitmap & Bitmap, double FactorX, double FactorY, + bool AntiAlias = false); static cOglOutputFb *oFb; }; diff --git a/po/de_DE.po b/po/de_DE.po index 7ee637b..8d388ca 100644 --- a/po/de_DE.po +++ b/po/de_DE.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: VDR \n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2019-10-04 14:23+0200\n" +"POT-Creation-Date: 2019-10-26 18:41+0200\n" "PO-Revision-Date: blabla\n" "Last-Translator: blabla\n" "Language-Team: blabla\n" @@ -792,7 +792,13 @@ msgstr "" msgid "[softhddev] ready%s\n" msgstr "" -msgid "video/egl: GlxSetupWindow can't make egl context current\n" +msgid "video: can't lock thread\n" +msgstr "" + +msgid "video: can't unlock thread\n" +msgstr "" + +msgid "video/egl: GlxSetupWindow can't make egl/glx context current\n" msgstr "" msgid "video/glx: no v-sync\n" @@ -891,21 +897,6 @@ msgstr "" msgid "Failed rendering frame!\n" msgstr "" -#, c-format -msgid "video/vdpau: can't get video surface parameters: %s\n" -msgstr "" - -msgid "video/vdpau: out of memory\n" -msgstr "" - -#, c-format -msgid "video/vdpau: unsupported chroma type %d\n" -msgstr "" - -#, c-format -msgid "video/vdpau: can't get video surface bits: %s\n" -msgstr "" - #, c-format msgid "video/vdpau: output buffer full, dropping frame (%d/%d)\n" msgstr "" @@ -924,9 +915,6 @@ msgstr "" msgid "video: decoder buffer empty, duping frame (%d/%d) %d v-buf\n" msgstr "" -msgid "Failed creating vulkan swapchain!" -msgstr "" - msgid "video: fatal i/o error\n" msgstr "" @@ -934,12 +922,6 @@ msgstr "" msgid "video/event: No symbol for %d\n" msgstr "" -msgid "video: can't lock thread\n" -msgstr "" - -msgid "video: can't unlock thread\n" -msgstr "" - msgid "Cant get memory for PLACEBO struct" msgstr "" @@ -952,6 +934,9 @@ msgstr "" msgid "Failed to create Vulkan Device" msgstr "" +msgid "Failed creating vulkan swapchain!" +msgstr "" + msgid "Failed initializing libplacebo renderer\n" msgstr "" diff --git a/ringbuffer.c b/ringbuffer.c index 5bd922e..998715c 100644 --- a/ringbuffer.c +++ b/ringbuffer.c @@ -1,29 +1,29 @@ /// -/// @file ringbuffer.c @brief Ringbuffer module +/// @file ringbuffer.c @brief Ringbuffer module /// -/// Copyright (c) 2009, 2011, 2014 by Johns. All Rights Reserved. +/// Copyright (c) 2009, 2011, 2014 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: c9497b197ce7e7a6ba397944edc7ccb161152efd $ +/// $Id: c9497b197ce7e7a6ba397944edc7ccb161152efd $ ////////////////////////////////////////////////////////////////////////////// /// -/// @defgroup Ringbuffer The ring buffer module. +/// @defgroup Ringbuffer The ring buffer module. /// -/// Lock free ring buffer with only one writer and one reader. +/// Lock free ring buffer with only one writer and one reader. /// #include @@ -36,15 +36,15 @@ /// ring buffer structure struct _ring_buffer_ { - char *Buffer; ///< ring buffer data - const char *BufferEnd; ///< end of buffer - size_t Size; ///< bytes in buffer (for faster calc) + char *Buffer; ///< ring buffer data + const char *BufferEnd; ///< end of buffer + size_t Size; ///< bytes in buffer (for faster calc) - const char *ReadPointer; ///< only used by reader - char *WritePointer; ///< only used by writer + const char *ReadPointer; ///< only used by reader + char *WritePointer; ///< only used by writer /// The only thing modified by both - atomic_t Filled; ///< how many of the buffer is used + atomic_t Filled; ///< how many of the buffer is used }; /** @@ -71,12 +71,12 @@ RingBuffer *RingBufferNew(size_t size) { RingBuffer *rb; - if (!(rb = malloc(sizeof(*rb)))) { // allocate structure - return rb; + if (!(rb = malloc(sizeof(*rb)))) { // allocate structure + return rb; } - if (!(rb->Buffer = malloc(size))) { // allocate buffer - free(rb); - return NULL; + if (!(rb->Buffer = malloc(size))) { // allocate buffer + free(rb); + return NULL; } rb->Size = size; @@ -108,25 +108,25 @@ size_t RingBufferWriteAdvance(RingBuffer * rb, size_t cnt) size_t n; n = rb->Size - atomic_read(&rb->Filled); - if (cnt > n) { // not enough space - cnt = n; + if (cnt > n) { // not enough space + cnt = n; } // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->WritePointer; - if (n > cnt) { // don't cross the end - rb->WritePointer += cnt; - } else { // reached or cross the end - rb->WritePointer = rb->Buffer; - if (n < cnt) { - n = cnt - n; - rb->WritePointer += n; - } + if (n > cnt) { // don't cross the end + rb->WritePointer += cnt; + } else { // reached or cross the end + rb->WritePointer = rb->Buffer; + if (n < cnt) { + n = cnt - n; + rb->WritePointer += n; + } } // - // Only atomic modification! + // Only atomic modification! // atomic_add(cnt, &rb->Filled); return cnt; @@ -147,29 +147,29 @@ size_t RingBufferWrite(RingBuffer * rb, const void *buf, size_t cnt) size_t n; n = rb->Size - atomic_read(&rb->Filled); - if (cnt > n) { // not enough space - cnt = n; + if (cnt > n) { // not enough space + cnt = n; } // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->WritePointer; - if (n > cnt) { // don't cross the end - memcpy(rb->WritePointer, buf, cnt); - rb->WritePointer += cnt; - } else { // reached or cross the end - memcpy(rb->WritePointer, buf, n); - rb->WritePointer = rb->Buffer; - if (n < cnt) { - buf += n; - n = cnt - n; - memcpy(rb->WritePointer, buf, n); - rb->WritePointer += n; - } + if (n > cnt) { // don't cross the end + memcpy(rb->WritePointer, buf, cnt); + rb->WritePointer += cnt; + } else { // reached or cross the end + memcpy(rb->WritePointer, buf, n); + rb->WritePointer = rb->Buffer; + if (n < cnt) { + buf += n; + n = cnt - n; + memcpy(rb->WritePointer, buf, n); + rb->WritePointer += n; + } } // - // Only atomic modification! + // Only atomic modification! // atomic_add(cnt, &rb->Filled); return cnt; @@ -189,17 +189,17 @@ size_t RingBufferGetWritePointer(RingBuffer * rb, void **wp) size_t n; size_t cnt; - // Total free bytes available in ring buffer + // Total free bytes available in ring buffer cnt = rb->Size - atomic_read(&rb->Filled); *wp = rb->WritePointer; // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->WritePointer; - if (n <= cnt) { // reached or cross the end - return n; + if (n <= cnt) { // reached or cross the end + return n; } return cnt; } @@ -217,25 +217,25 @@ size_t RingBufferReadAdvance(RingBuffer * rb, size_t cnt) size_t n; n = atomic_read(&rb->Filled); - if (cnt > n) { // not enough filled - cnt = n; + if (cnt > n) { // not enough filled + cnt = n; } // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->ReadPointer; - if (n > cnt) { // don't cross the end - rb->ReadPointer += cnt; - } else { // reached or cross the end - rb->ReadPointer = rb->Buffer; - if (n < cnt) { - n = cnt - n; - rb->ReadPointer += n; - } + if (n > cnt) { // don't cross the end + rb->ReadPointer += cnt; + } else { // reached or cross the end + rb->ReadPointer = rb->Buffer; + if (n < cnt) { + n = cnt - n; + rb->ReadPointer += n; + } } // - // Only atomic modification! + // Only atomic modification! // atomic_sub(cnt, &rb->Filled); return cnt; @@ -255,29 +255,29 @@ size_t RingBufferRead(RingBuffer * rb, void *buf, size_t cnt) size_t n; n = atomic_read(&rb->Filled); - if (cnt > n) { // not enough filled - cnt = n; + if (cnt > n) { // not enough filled + cnt = n; } // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->ReadPointer; - if (n > cnt) { // don't cross the end - memcpy(buf, rb->ReadPointer, cnt); - rb->ReadPointer += cnt; - } else { // reached or cross the end - memcpy(buf, rb->ReadPointer, n); - rb->ReadPointer = rb->Buffer; - if (n < cnt) { - buf += n; - n = cnt - n; - memcpy(buf, rb->ReadPointer, n); - rb->ReadPointer += n; - } + if (n > cnt) { // don't cross the end + memcpy(buf, rb->ReadPointer, cnt); + rb->ReadPointer += cnt; + } else { // reached or cross the end + memcpy(buf, rb->ReadPointer, n); + rb->ReadPointer = rb->Buffer; + if (n < cnt) { + buf += n; + n = cnt - n; + memcpy(buf, rb->ReadPointer, n); + rb->ReadPointer += n; + } } // - // Only atomic modification! + // Only atomic modification! // atomic_sub(cnt, &rb->Filled); return cnt; @@ -297,17 +297,17 @@ size_t RingBufferGetReadPointer(RingBuffer * rb, const void **rp) size_t n; size_t cnt; - // Total used bytes in ring buffer + // Total used bytes in ring buffer cnt = atomic_read(&rb->Filled); *rp = rb->ReadPointer; // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->ReadPointer; - if (n <= cnt) { // reached or cross the end - return n; + if (n <= cnt) { // reached or cross the end + return n; } return cnt; } diff --git a/ringbuffer.h b/ringbuffer.h index c53c495..045a348 100644 --- a/ringbuffer.h +++ b/ringbuffer.h @@ -1,23 +1,23 @@ /// -/// @file ringbuffer.h @brief Ringbuffer module header file +/// @file ringbuffer.h @brief Ringbuffer module header file /// -/// Copyright (c) 2009, 2011 by Johns. All Rights Reserved. +/// Copyright (c) 2009, 2011 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: 8a2b4c171f1024afb8b8a7a7add631b7ebe2d45e $ +/// $Id: 8a2b4c171f1024afb8b8a7a7add631b7ebe2d45e $ ////////////////////////////////////////////////////////////////////////////// /// @addtogroup Ringbuffer diff --git a/shaders.h b/shaders.h index 83afc03..7dd3af8 100644 --- a/shaders.h +++ b/shaders.h @@ -1,465 +1,467 @@ - -// shader -#ifdef CUVID -char vertex_osd[] = {"\ -#version 330\n\ -in vec2 vertex_position;\n\ -in vec2 vertex_texcoord0;\n\ -out vec2 texcoord0;\n\ -void main() {\n\ -gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ -texcoord0 = vertex_texcoord0;\n\ -}\n"}; - -char fragment_osd[] = {"\ -#version 330\n\ -#define texture1D texture\n\ -precision mediump float; \ -layout(location = 0) out vec4 out_color;\n\ -in vec2 texcoord0;\n\ -uniform sampler2D texture0;\n\ -void main() {\n\ -vec4 color; \n\ -color = vec4(texture(texture0, texcoord0));\n\ -out_color = color;\n\ -}\n"}; - -char vertex[] = {"\ -#version 310 es\n\ -in vec2 vertex_position;\n\ -in vec2 vertex_texcoord0;\n\ -out vec2 texcoord0;\n\ -in vec2 vertex_texcoord1;\n\ -out vec2 texcoord1;\n\ -void main() {\n\ -gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ -texcoord0 = vertex_texcoord0;\n\ -texcoord1 = vertex_texcoord1;\n\ -}\n"}; - -char fragment[] = {"\ -#version 310 es\n\ -#define texture1D texture\n\ -#define texture3D texture\n\ -precision mediump float; \ -layout(location = 0) out vec4 out_color;\n\ -in vec2 texcoord0;\n\ -in vec2 texcoord1;\n\ -uniform mat3 colormatrix;\n\ -uniform vec3 colormatrix_c;\n\ -uniform sampler2D texture0;\n\ -uniform sampler2D texture1;\n\ -void main() {\n\ -vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ -color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\ -color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;\n\ -// color conversion\n\ -color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ -color.a = 1.0;\n\ -// color mapping\n\ -out_color = color;\n\ -}\n"}; - -char fragment_bt2100[] = {"\ -#version 310 es\n \ -#define texture1D texture\n\ -#define texture3D texture\n\ -precision mediump float; \ -layout(location = 0) out vec4 out_color;\n\ -in vec2 texcoord0;\n\ -in vec2 texcoord1;\n\ -uniform mat3 colormatrix;\n\ -uniform vec3 colormatrix_c;\n\ -uniform mat3 cms_matrix;\n\ -uniform sampler2D texture0;\n\ -uniform sampler2D texture1;\n\ -//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ -void main() {\n\ -vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ -color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;\n\ -color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n\ -// color conversion\n\ -color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ -color.a = 1.0;\n\ -// color mapping\n\ -color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ -color.rgb = pow(color.rgb, vec3(2.4));\n\ -color.rgb = cms_matrix * color.rgb;\n\ -color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ -color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\ -out_color = color;\n\ -}\n"}; - - -#else -char vertex_osd[] = {"\ -\n\ -in vec2 vertex_position;\n\ -in vec2 vertex_texcoord0;\n\ -out vec2 texcoord0;\n\ -void main() {\n\ -gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ -texcoord0 = vertex_texcoord0;\n\ -}\n"}; - -char fragment_osd[] = {"\ -\n\ -#define texture1D texture\n\ -precision mediump float; \ -layout(location = 0) out vec4 out_color;\n\ -in vec2 texcoord0;\n\ -uniform sampler2D texture0;\n\ -void main() {\n\ -vec4 color; \n\ -color = vec4(texture(texture0, texcoord0));\n\ -out_color = color;\n\ -}\n"}; - -char vertex[] = {"\ -\n\ -in vec2 vertex_position;\n\ -in vec2 vertex_texcoord0;\n\ -out vec2 texcoord0;\n\ -in vec2 vertex_texcoord1;\n\ -out vec2 texcoord1;\n\ -void main() {\n\ -gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ -texcoord0 = vertex_texcoord0;\n\ -texcoord1 = vertex_texcoord1;\n\ -}\n"}; - -char fragment[] = {"\ -\n\ -#define texture1D texture\n\ -#define texture3D texture\n\ -precision mediump float; \ -layout(location = 0) out vec4 out_color;\n\ -in vec2 texcoord0;\n\ -in vec2 texcoord1;\n\ -uniform mat3 colormatrix;\n\ -uniform vec3 colormatrix_c;\n\ -uniform sampler2D texture0;\n\ -uniform sampler2D texture1;\n\ -//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ -void main() {\n\ -vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ -color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\ -color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;\n\ -// color conversion\n\ -color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ -color.a = 1.0;\n\ -// color mapping\n\ -out_color = color;\n\ -}\n"}; - -char fragment_bt2100[] = {"\ -\n \ -#define texture1D texture\n\ -#define texture3D texture\n\ -precision mediump float; \ -layout(location = 0) out vec4 out_color;\n\ -in vec2 texcoord0;\n\ -in vec2 texcoord1;\n\ -uniform mat3 colormatrix;\n\ -uniform vec3 colormatrix_c;\n\ -uniform mat3 cms_matrix;\n\ -uniform sampler2D texture0;\n\ -uniform sampler2D texture1;\n\ -//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ -void main() {\n\ -vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ -color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;\n\ -color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n\ -// color conversion\n\ -color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ -color.a = 1.0;\n\ -// color mapping\n\ -color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ -color.rgb = pow(color.rgb, vec3(2.4));\n\ -color.rgb = cms_matrix * color.rgb;\n\ -color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ -color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\ -out_color = color;\n\ -}\n"}; -#endif -/* Color conversion matrix: RGB = m * YUV + c - * m is in row-major matrix, with m[row][col], e.g.: - * [ a11 a12 a13 ] float m[3][3] = { { a11, a12, a13 }, - * [ a21 a22 a23 ] { a21, a22, a23 }, - * [ a31 a32 a33 ] { a31, a32, a33 } }; - * This is accessed as e.g.: m[2-1][1-1] = a21 - * In particular, each row contains all the coefficients for one of R, G, B, - * while each column contains all the coefficients for one of Y, U, V: - * m[r,g,b][y,u,v] = ... - * The matrix could also be viewed as group of 3 vectors, e.g. the 1st column - * is the Y vector (1, 1, 1), the 2nd is the U vector, the 3rd the V vector. - * The matrix might also be used for other conversions and colorspaces. - */ -struct mp_cmat { - GLfloat m[3][3]; // colormatrix - GLfloat c[3]; //colormatrix_c -}; - -struct mp_mat { - GLfloat m[3][3]; -}; - -// YUV input limited range (16-235 for luma, 16-240 for chroma) -// ITU-R BT.601 (SD) -struct mp_cmat yuv_bt601 = {\ -{{ 1.164384, 1.164384, 1.164384 },\ -{ 0.00000, -0.391762, 2.017232 },\ -{ 1.596027, -0.812968 , 0.000000 }},\ -{-0.874202, 0.531668, -1.085631 } }; - -// ITU-R BT.709 (HD) -struct mp_cmat yuv_bt709 = {\ -{{ 1.164384, 1.164384, 1.164384 },\ -{ 0.00000, -0.213249, 2.112402 },\ -{ 1.792741, -0.532909 , 0.000000 }},\ -{-0.972945, 0.301483, -1.133402 } }; - -// ITU-R BT.2020 non-constant luminance system -struct mp_cmat yuv_bt2020ncl = {\ -{{ 1.164384, 1.164384, 1.164384 },\ -{ 0.00000, -0.187326, 2.141772 },\ -{ 1.678674, -0.650424 , 0.000000 }},\ -{-0.915688, 0.347459, -1.148145 } }; - -// ITU-R BT.2020 constant luminance system -struct mp_cmat yuv_bt2020cl = {\ -{{ 0.0000, 1.164384, 0.000000 },\ -{ 0.00000, 0.000000, 1.138393 },\ -{ 1.138393, 0.000000 , 0.000000 }},\ -{-0.571429, -0.073059, -0.571429 } }; - -float cms_matrix[3][3] = \ -{{ 1.660497, -0.124547, -0.018154},\ -{-0.587657, 1.132895, -0.100597},\ -{-0.072840, -0.008348, 1.118751}}; - -struct gl_vao_entry { - // used for shader / glBindAttribLocation - const char *name; - // glVertexAttribPointer() arguments - int num_elems; // size (number of elements) - GLenum type; - bool normalized; - int offset; -}; - -struct vertex_pt { - float x, y; -}; - -struct vertex_pi { - GLint x, y; -}; - -#define TEXUNIT_VIDEO_NUM 6 - - -struct vertex { - struct vertex_pt position; - struct vertex_pt texcoord[TEXUNIT_VIDEO_NUM]; -}; - -static const struct gl_vao_entry vertex_vao[] = { - {"position", 2, GL_FLOAT, false, offsetof(struct vertex, position)}, - {"texcoord0", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[0])}, - {"texcoord1", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[1])}, - {0} -}; - - -static void compile_attach_shader(GLuint program, - GLenum type, const char *source) -{ - GLuint shader; - GLint status, log_length; - char log[4000]; - GLsizei len; - shader = glCreateShader(type); - glShaderSource(shader, 1, &source, NULL); - glCompileShader(shader); - status = 0; - glGetShaderiv(shader, GL_COMPILE_STATUS, &status); - log_length = 0; - glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length); - glGetShaderInfoLog(shader,4000,&len,log); - GlxCheck(); -Debug(3,"compile Status %d loglen %d >%s<\n",status,log_length,log); - - glAttachShader(program, shader); - glDeleteShader(shader); -} - -static void link_shader(GLuint program) -{ - GLint status,log_length; - - glLinkProgram(program); - status = 0; - glGetProgramiv(program, GL_LINK_STATUS, &status); - log_length = 0; - glGetProgramiv(program, GL_INFO_LOG_LENGTH, &log_length); -Debug(3,"Link Status %d loglen %d\n",status,log_length); -} - -static GLuint sc_generate_osd(GLuint gl_prog) { - - Debug(3,"vor create osd\n"); - gl_prog = glCreateProgram(); - Debug(3,"vor compile vertex osd\n"); - compile_attach_shader(gl_prog, GL_VERTEX_SHADER, vertex_osd); - Debug(3,"vor compile fragment osd \n"); - compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, fragment_osd); - glBindAttribLocation(gl_prog,0,"vertex_position"); - glBindAttribLocation(gl_prog,1,"vertex_texcoord0"); - - link_shader(gl_prog); - return gl_prog; -} - - -static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) { - - char vname[80]; - int n; - GLint cmsLoc; - float *m,*c,*cms; - char *frag; - - switch (colorspace) { - case AVCOL_SPC_RGB: - m = &yuv_bt601.m[0][0]; - c = &yuv_bt601.c[0]; - frag = fragment; - Debug(3,"BT601 Colorspace used\n"); - break; - case AVCOL_SPC_BT709: - case AVCOL_SPC_UNSPECIFIED: // comes with UHD - m = &yuv_bt709.m[0][0]; - c = &yuv_bt709.c[0]; - frag = fragment; - Debug(3,"BT709 Colorspace used\n"); - break; - case AVCOL_SPC_BT2020_NCL: - m = &yuv_bt2020ncl.m[0][0]; - c = &yuv_bt2020ncl.c[0]; - cms = &cms_matrix[0][0]; - frag = fragment_bt2100; - Debug(3,"BT2020NCL Colorspace used\n"); - break; - default: // fallback - m = &yuv_bt709.m[0][0]; - c = &yuv_bt709.c[0]; - frag = fragment; - Debug(3,"default BT709 Colorspace used %d\n",colorspace); - break; - } - - Debug(3,"vor create\n"); - gl_prog = glCreateProgram(); - Debug(3,"vor compile vertex\n"); - compile_attach_shader(gl_prog, GL_VERTEX_SHADER, vertex); - Debug(3,"vor compile fragment\n"); - compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, frag); - glBindAttribLocation(gl_prog,0,"vertex_position"); - - for (n=0;n<6;n++) { - sprintf(vname,"vertex_texcoord%1d",n); - glBindAttribLocation(gl_prog,n+1,vname); - } - - link_shader(gl_prog); - - gl_colormatrix = glGetUniformLocation(gl_prog,"colormatrix"); - Debug(3,"get uniform colormatrix %d \n",gl_colormatrix); - if (gl_colormatrix != -1) - glProgramUniformMatrix3fv(gl_prog,gl_colormatrix,1,0,m); - GlxCheck(); - Debug(3,"nach set colormatrix\n"); - - gl_colormatrix_c = glGetUniformLocation(gl_prog,"colormatrix_c"); - Debug(3,"get uniform colormatrix_c %d %f\n",gl_colormatrix_c,*c); - if (gl_colormatrix_c != -1) - glProgramUniform3fv(gl_prog,gl_colormatrix_c,1,c); - GlxCheck(); - - if (colorspace == AVCOL_SPC_BT2020_NCL) { - cmsLoc = glGetUniformLocation(gl_prog,"cms_matrix"); - if (cmsLoc != -1) - glProgramUniformMatrix3fv(gl_prog,cmsLoc,1,0,cms); - GlxCheck(); - } - - return gl_prog; -} - -static void render_pass_quad(int flip, float xcrop, float ycrop) -{ - struct vertex va[4]; - int n; - const struct gl_vao_entry *e; - // uhhhh what a hack - if (!flip ) { - va[0].position.x = (float) -1.0; - va[0].position.y = (float) 1.0; - va[1].position.x = (float) -1.0; - va[1].position.y = (float) -1.0; - va[2].position.x = (float) 1.0; - va[2].position.y = (float) 1.0; - va[3].position.x = (float) 1.0; - va[3].position.y = (float) -1.0; - } else { - va[0].position.x = (float) -1.0; - va[0].position.y = (float) -1.0; - va[1].position.x = (float) -1.0; - va[1].position.y = (float) 1.0; - va[2].position.x = (float) 1.0; - va[2].position.y = (float) -1.0; - va[3].position.x = (float) 1.0; - va[3].position.y = (float) 1.0; - } - - va[0].texcoord[0].x = (float) 0.0 + xcrop; - va[0].texcoord[0].y = (float) 0.0 + ycrop; // abgeschnitten von links oben - va[0].texcoord[1].x = (float) 0.0 + xcrop; - va[0].texcoord[1].y = (float) 0.0 + ycrop; // abgeschnitten von links oben - va[1].texcoord[0].x = (float) 0.0 + xcrop; - va[1].texcoord[0].y = (float) 1.0 - ycrop; // abgeschnitten links unten 1.0 - Wert - va[1].texcoord[1].x = (float) 0.0 + xcrop; - va[1].texcoord[1].y = (float) 1.0 - ycrop; // abgeschnitten links unten 1.0 - Wert - va[2].texcoord[0].x = (float) 1.0 - xcrop; - va[2].texcoord[0].y = (float) 0.0 + ycrop; // abgeschnitten von rechts oben - va[2].texcoord[1].x = (float) 1.0 - xcrop; - va[2].texcoord[1].y = (float) 0.0 + ycrop; // abgeschnitten von rechts oben - va[3].texcoord[0].x = (float) 1.0 - xcrop; - va[3].texcoord[0].y = (float) 1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert - va[3].texcoord[1].x = (float) 1.0 - xcrop; - va[3].texcoord[1].y = (float) 1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert - - - - glBindBuffer(GL_ARRAY_BUFFER, vao_buffer); - glBufferData(GL_ARRAY_BUFFER, 4 * sizeof(struct vertex), va, GL_DYNAMIC_DRAW); - glBindBuffer(GL_ARRAY_BUFFER, 0); - - // enable attribs - glBindBuffer(GL_ARRAY_BUFFER, vao_buffer); - for ( n = 0; vertex_vao[n].name; n++) { - e = &vertex_vao[n]; - glEnableVertexAttribArray(n); - glVertexAttribPointer(n, e->num_elems, e->type, e->normalized, - sizeof(struct vertex), (void *)(intptr_t)e->offset); - } - glBindBuffer(GL_ARRAY_BUFFER, 0); - - // draw quad - glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); - for ( n = 0; vertex_vao[n].name; n++) - glDisableVertexAttribArray(n); -} - - + +// shader +#ifdef CUVID +char vertex_osd[] = { "\ +#version 330\n\ +in vec2 vertex_position;\n\ +in vec2 vertex_texcoord0;\n\ +out vec2 texcoord0;\n\ +void main() {\n\ +gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ +texcoord0 = vertex_texcoord0;\n\ +}\n" }; + +char fragment_osd[] = { "\ +#version 330\n\ +#define texture1D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +uniform sampler2D texture0;\n\ +void main() {\n\ +vec4 color; \n\ +color = vec4(texture(texture0, texcoord0));\n\ +out_color = color;\n\ +}\n" }; + +char vertex[] = { "\ +#version 310 es\n\ +in vec2 vertex_position;\n\ +in vec2 vertex_texcoord0;\n\ +out vec2 texcoord0;\n\ +in vec2 vertex_texcoord1;\n\ +out vec2 texcoord1;\n\ +void main() {\n\ +gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ +texcoord0 = vertex_texcoord0;\n\ +texcoord1 = vertex_texcoord1;\n\ +}\n" }; + +char fragment[] = { "\ +#version 310 es\n\ +#define texture1D texture\n\ +#define texture3D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +in vec2 texcoord1;\n\ +uniform mat3 colormatrix;\n\ +uniform vec3 colormatrix_c;\n\ +uniform sampler2D texture0;\n\ +uniform sampler2D texture1;\n\ +void main() {\n\ +vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ +color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\ +color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;\n\ +// color conversion\n\ +color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ +color.a = 1.0;\n\ +// color mapping\n\ +out_color = color;\n\ +}\n" }; + +char fragment_bt2100[] = { "\ +#version 310 es\n \ +#define texture1D texture\n\ +#define texture3D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +in vec2 texcoord1;\n\ +uniform mat3 colormatrix;\n\ +uniform vec3 colormatrix_c;\n\ +uniform mat3 cms_matrix;\n\ +uniform sampler2D texture0;\n\ +uniform sampler2D texture1;\n\ +//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ +void main() {\n\ +vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ +color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;\n\ +color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n\ +// color conversion\n\ +color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ +color.a = 1.0;\n\ +// color mapping\n\ +color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ +color.rgb = pow(color.rgb, vec3(2.4));\n\ +color.rgb = cms_matrix * color.rgb;\n\ +color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ +color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\ +out_color = color;\n\ +}\n" }; + +#else +char vertex_osd[] = { "\ +\n\ +in vec2 vertex_position;\n\ +in vec2 vertex_texcoord0;\n\ +out vec2 texcoord0;\n\ +void main() {\n\ +gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ +texcoord0 = vertex_texcoord0;\n\ +}\n" }; + +char fragment_osd[] = { "\ +\n\ +#define texture1D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +uniform sampler2D texture0;\n\ +void main() {\n\ +vec4 color; \n\ +color = vec4(texture(texture0, texcoord0));\n\ +out_color = color;\n\ +}\n" }; + +char vertex[] = { "\ +\n\ +in vec2 vertex_position;\n\ +in vec2 vertex_texcoord0;\n\ +out vec2 texcoord0;\n\ +in vec2 vertex_texcoord1;\n\ +out vec2 texcoord1;\n\ +void main() {\n\ +gl_Position = vec4(vertex_position, 1.0, 1.0);\n\ +texcoord0 = vertex_texcoord0;\n\ +texcoord1 = vertex_texcoord1;\n\ +}\n" }; + +char fragment[] = { "\ +\n\ +#define texture1D texture\n\ +#define texture3D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +in vec2 texcoord1;\n\ +uniform mat3 colormatrix;\n\ +uniform vec3 colormatrix_c;\n\ +uniform sampler2D texture0;\n\ +uniform sampler2D texture1;\n\ +//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ +void main() {\n\ +vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ +color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\ +color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;\n\ +// color conversion\n\ +color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ +color.a = 1.0;\n\ +// color mapping\n\ +out_color = color;\n\ +}\n" }; + +char fragment_bt2100[] = { "\ +\n \ +#define texture1D texture\n\ +#define texture3D texture\n\ +precision mediump float; \ +layout(location = 0) out vec4 out_color;\n\ +in vec2 texcoord0;\n\ +in vec2 texcoord1;\n\ +uniform mat3 colormatrix;\n\ +uniform vec3 colormatrix_c;\n\ +uniform mat3 cms_matrix;\n\ +uniform sampler2D texture0;\n\ +uniform sampler2D texture1;\n\ +//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\ +void main() {\n\ +vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\ +color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;\n\ +color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n\ +// color conversion\n\ +color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\ +color.a = 1.0;\n\ +// color mapping\n\ +color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ +color.rgb = pow(color.rgb, vec3(2.4));\n\ +color.rgb = cms_matrix * color.rgb;\n\ +color.rgb = clamp(color.rgb, 0.0, 1.0);\n\ +color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\ +out_color = color;\n\ +}\n" }; +#endif + +/* Color conversion matrix: RGB = m * YUV + c + * m is in row-major matrix, with m[row][col], e.g.: + * [ a11 a12 a13 ] float m[3][3] = { { a11, a12, a13 }, + * [ a21 a22 a23 ] { a21, a22, a23 }, + * [ a31 a32 a33 ] { a31, a32, a33 } }; + * This is accessed as e.g.: m[2-1][1-1] = a21 + * In particular, each row contains all the coefficients for one of R, G, B, + * while each column contains all the coefficients for one of Y, U, V: + * m[r,g,b][y,u,v] = ... + * The matrix could also be viewed as group of 3 vectors, e.g. the 1st column + * is the Y vector (1, 1, 1), the 2nd is the U vector, the 3rd the V vector. + * The matrix might also be used for other conversions and colorspaces. + */ +struct mp_cmat +{ + GLfloat m[3][3]; // colormatrix + GLfloat c[3]; //colormatrix_c +}; + +struct mp_mat +{ + GLfloat m[3][3]; +}; + +// YUV input limited range (16-235 for luma, 16-240 for chroma) +// ITU-R BT.601 (SD) +struct mp_cmat yuv_bt601 = { {{1.164384, 1.164384, 1.164384}, + {0.00000, -0.391762, 2.017232}, + {1.596027, -0.812968, 0.000000}}, +{-0.874202, 0.531668, -1.085631} +}; + +// ITU-R BT.709 (HD) +struct mp_cmat yuv_bt709 = { {{1.164384, 1.164384, 1.164384}, + {0.00000, -0.213249, 2.112402}, + {1.792741, -0.532909, 0.000000}}, +{-0.972945, 0.301483, -1.133402} +}; + +// ITU-R BT.2020 non-constant luminance system +struct mp_cmat yuv_bt2020ncl = { {{1.164384, 1.164384, 1.164384}, + {0.00000, -0.187326, 2.141772}, + {1.678674, -0.650424, 0.000000}}, +{-0.915688, 0.347459, -1.148145} +}; + +// ITU-R BT.2020 constant luminance system +struct mp_cmat yuv_bt2020cl = { {{0.0000, 1.164384, 0.000000}, + {0.00000, 0.000000, 1.138393}, + {1.138393, 0.000000, 0.000000}}, +{-0.571429, -0.073059, -0.571429} +}; + +float cms_matrix[3][3] = { {1.660497, -0.124547, -0.018154}, +{-0.587657, 1.132895, -0.100597}, +{-0.072840, -0.008348, 1.118751} +}; + +struct gl_vao_entry +{ + // used for shader / glBindAttribLocation + const char *name; + // glVertexAttribPointer() arguments + int num_elems; // size (number of elements) + GLenum type; + bool normalized; + int offset; +}; + +struct vertex_pt +{ + float x, y; +}; + +struct vertex_pi +{ + GLint x, y; +}; + +#define TEXUNIT_VIDEO_NUM 6 + +struct vertex +{ + struct vertex_pt position; + struct vertex_pt texcoord[TEXUNIT_VIDEO_NUM]; +}; + +static const struct gl_vao_entry vertex_vao[] = { + {"position", 2, GL_FLOAT, false, offsetof(struct vertex, position)}, + {"texcoord0", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[0])}, + {"texcoord1", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[1])}, + {0} +}; + +static void compile_attach_shader(GLuint program, GLenum type, const char *source) +{ + GLuint shader; + GLint status, log_length; + char log[4000]; + GLsizei len; + + shader = glCreateShader(type); + glShaderSource(shader, 1, &source, NULL); + glCompileShader(shader); + status = 0; + glGetShaderiv(shader, GL_COMPILE_STATUS, &status); + log_length = 0; + glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length); + glGetShaderInfoLog(shader, 4000, &len, log); + GlxCheck(); + Debug(3, "compile Status %d loglen %d >%s<\n", status, log_length, log); + + glAttachShader(program, shader); + glDeleteShader(shader); +} + +static void link_shader(GLuint program) +{ + GLint status, log_length; + + glLinkProgram(program); + status = 0; + glGetProgramiv(program, GL_LINK_STATUS, &status); + log_length = 0; + glGetProgramiv(program, GL_INFO_LOG_LENGTH, &log_length); + Debug(3, "Link Status %d loglen %d\n", status, log_length); +} + +static GLuint sc_generate_osd(GLuint gl_prog) +{ + + Debug(3, "vor create osd\n"); + gl_prog = glCreateProgram(); + Debug(3, "vor compile vertex osd\n"); + compile_attach_shader(gl_prog, GL_VERTEX_SHADER, vertex_osd); + Debug(3, "vor compile fragment osd \n"); + compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, fragment_osd); + glBindAttribLocation(gl_prog, 0, "vertex_position"); + glBindAttribLocation(gl_prog, 1, "vertex_texcoord0"); + + link_shader(gl_prog); + return gl_prog; +} + +static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) +{ + + char vname[80]; + int n; + GLint cmsLoc; + float *m, *c, *cms; + char *frag; + + switch (colorspace) { + case AVCOL_SPC_RGB: + m = &yuv_bt601.m[0][0]; + c = &yuv_bt601.c[0]; + frag = fragment; + Debug(3, "BT601 Colorspace used\n"); + break; + case AVCOL_SPC_BT709: + case AVCOL_SPC_UNSPECIFIED: // comes with UHD + m = &yuv_bt709.m[0][0]; + c = &yuv_bt709.c[0]; + frag = fragment; + Debug(3, "BT709 Colorspace used\n"); + break; + case AVCOL_SPC_BT2020_NCL: + m = &yuv_bt2020ncl.m[0][0]; + c = &yuv_bt2020ncl.c[0]; + cms = &cms_matrix[0][0]; + frag = fragment_bt2100; + Debug(3, "BT2020NCL Colorspace used\n"); + break; + default: // fallback + m = &yuv_bt709.m[0][0]; + c = &yuv_bt709.c[0]; + frag = fragment; + Debug(3, "default BT709 Colorspace used %d\n", colorspace); + break; + } + + Debug(3, "vor create\n"); + gl_prog = glCreateProgram(); + Debug(3, "vor compile vertex\n"); + compile_attach_shader(gl_prog, GL_VERTEX_SHADER, vertex); + Debug(3, "vor compile fragment\n"); + compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, frag); + glBindAttribLocation(gl_prog, 0, "vertex_position"); + + for (n = 0; n < 6; n++) { + sprintf(vname, "vertex_texcoord%1d", n); + glBindAttribLocation(gl_prog, n + 1, vname); + } + + link_shader(gl_prog); + + gl_colormatrix = glGetUniformLocation(gl_prog, "colormatrix"); + Debug(3, "get uniform colormatrix %d \n", gl_colormatrix); + if (gl_colormatrix != -1) + glProgramUniformMatrix3fv(gl_prog, gl_colormatrix, 1, 0, m); + GlxCheck(); + Debug(3, "nach set colormatrix\n"); + + gl_colormatrix_c = glGetUniformLocation(gl_prog, "colormatrix_c"); + Debug(3, "get uniform colormatrix_c %d %f\n", gl_colormatrix_c, *c); + if (gl_colormatrix_c != -1) + glProgramUniform3fv(gl_prog, gl_colormatrix_c, 1, c); + GlxCheck(); + + if (colorspace == AVCOL_SPC_BT2020_NCL) { + cmsLoc = glGetUniformLocation(gl_prog, "cms_matrix"); + if (cmsLoc != -1) + glProgramUniformMatrix3fv(gl_prog, cmsLoc, 1, 0, cms); + GlxCheck(); + } + + return gl_prog; +} + +static void render_pass_quad(int flip, float xcrop, float ycrop) +{ + struct vertex va[4]; + int n; + const struct gl_vao_entry *e; + + // uhhhh what a hack + if (!flip) { + va[0].position.x = (float)-1.0; + va[0].position.y = (float)1.0; + va[1].position.x = (float)-1.0; + va[1].position.y = (float)-1.0; + va[2].position.x = (float)1.0; + va[2].position.y = (float)1.0; + va[3].position.x = (float)1.0; + va[3].position.y = (float)-1.0; + } else { + va[0].position.x = (float)-1.0; + va[0].position.y = (float)-1.0; + va[1].position.x = (float)-1.0; + va[1].position.y = (float)1.0; + va[2].position.x = (float)1.0; + va[2].position.y = (float)-1.0; + va[3].position.x = (float)1.0; + va[3].position.y = (float)1.0; + } + + va[0].texcoord[0].x = (float)0.0 + xcrop; + va[0].texcoord[0].y = (float)0.0 + ycrop; // abgeschnitten von links oben + va[0].texcoord[1].x = (float)0.0 + xcrop; + va[0].texcoord[1].y = (float)0.0 + ycrop; // abgeschnitten von links oben + va[1].texcoord[0].x = (float)0.0 + xcrop; + va[1].texcoord[0].y = (float)1.0 - ycrop; // abgeschnitten links unten 1.0 - Wert + va[1].texcoord[1].x = (float)0.0 + xcrop; + va[1].texcoord[1].y = (float)1.0 - ycrop; // abgeschnitten links unten 1.0 - Wert + va[2].texcoord[0].x = (float)1.0 - xcrop; + va[2].texcoord[0].y = (float)0.0 + ycrop; // abgeschnitten von rechts oben + va[2].texcoord[1].x = (float)1.0 - xcrop; + va[2].texcoord[1].y = (float)0.0 + ycrop; // abgeschnitten von rechts oben + va[3].texcoord[0].x = (float)1.0 - xcrop; + va[3].texcoord[0].y = (float)1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert + va[3].texcoord[1].x = (float)1.0 - xcrop; + va[3].texcoord[1].y = (float)1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert + + glBindBuffer(GL_ARRAY_BUFFER, vao_buffer); + glBufferData(GL_ARRAY_BUFFER, 4 * sizeof(struct vertex), va, GL_DYNAMIC_DRAW); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + // enable attribs + glBindBuffer(GL_ARRAY_BUFFER, vao_buffer); + for (n = 0; vertex_vao[n].name; n++) { + e = &vertex_vao[n]; + glEnableVertexAttribArray(n); + glVertexAttribPointer(n, e->num_elems, e->type, e->normalized, sizeof(struct vertex), + (void *)(intptr_t) e->offset); + } + glBindBuffer(GL_ARRAY_BUFFER, 0); + + // draw quad + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + for (n = 0; vertex_vao[n].name; n++) + glDisableVertexAttribArray(n); +} diff --git a/softhdcuvid.cpp b/softhdcuvid.cpp index 1142642..9fd4f4b 100644 --- a/softhdcuvid.cpp +++ b/softhdcuvid.cpp @@ -1,26 +1,26 @@ /// -/// @file softhddevice.cpp @brief A software HD device plugin for VDR. +/// @file softhddevice.cpp @brief A software HD device plugin for VDR. /// -/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: fa6a877682f47297580ff5f502425fc7948cb2fa $ +/// $Id: fa6a877682f47297580ff5f502425fc7948cb2fa $ ////////////////////////////////////////////////////////////////////////////// -#define __STDC_CONSTANT_MACROS ///< needed for ffmpeg UINT64_C +#define __STDC_CONSTANT_MACROS ///< needed for ffmpeg UINT64_C #include #include @@ -46,12 +46,12 @@ extern "C" #include #include #ifndef USE_OPENGLOSD - #include "audio.h" - #include "video.h" - #include "codec.h" +#include "audio.h" +#include "video.h" +#include "codec.h" #endif #if PLACEBO - #include +#include #endif } @@ -74,8 +74,7 @@ static const char *const VERSION = "2.1.0" ; /// vdr-plugin description. -static const char *const DESCRIPTION = -trNOOP("A software and GPU emulated UHD device"); +static const char *const DESCRIPTION = trNOOP("A software and GPU emulated UHD device"); /// vdr-plugin text of main menu entry static const char *MAINMENUENTRY = trNOOP("SoftUHD"); @@ -85,38 +84,37 @@ static class cSoftHdDevice *MyDevice; ////////////////////////////////////////////////////////////////////////////// -#define RESOLUTIONS 5 ///< number of resolutions +#define RESOLUTIONS 5 ///< number of resolutions /// resolutions names static const char *const Resolution[RESOLUTIONS] = { "576i", "720p", "1080i_fake", "1080i", "UHD" }; +static char ConfigMakePrimary; ///< config primary wanted +static char ConfigHideMainMenuEntry; ///< config hide main menu entry +static char ConfigDetachFromMainMenu; ///< detach from main menu entry instead of suspend +static char ConfigSuspendClose; ///< suspend should close devices +static char ConfigSuspendX11; ///< suspend should stop x11 -static char ConfigMakePrimary; ///< config primary wanted -static char ConfigHideMainMenuEntry; ///< config hide main menu entry -static char ConfigDetachFromMainMenu; ///< detach from main menu entry instead of suspend -static char ConfigSuspendClose; ///< suspend should close devices -static char ConfigSuspendX11; ///< suspend should stop x11 +static char Config4to3DisplayFormat = 1; ///< config 4:3 display format +static char ConfigOtherDisplayFormat = 1; ///< config other display format +static uint32_t ConfigVideoBackground; ///< config video background color +static int ConfigOsdWidth; ///< config OSD width +static int ConfigOsdHeight; ///< config OSD height +static char ConfigVideoStudioLevels; ///< config use studio levels +static char ConfigVideo60HzMode; ///< config use 60Hz display mode +static char ConfigVideoSoftStartSync; ///< config use softstart sync +static char ConfigVideoBlackPicture; ///< config enable black picture mode +char ConfigVideoClearOnSwitch; ///< config enable Clear on channel switch -static char Config4to3DisplayFormat = 1; ///< config 4:3 display format -static char ConfigOtherDisplayFormat = 1; ///< config other display format -static uint32_t ConfigVideoBackground; ///< config video background color -static int ConfigOsdWidth; ///< config OSD width -static int ConfigOsdHeight; ///< config OSD height -static char ConfigVideoStudioLevels; ///< config use studio levels -static char ConfigVideo60HzMode; ///< config use 60Hz display mode -static char ConfigVideoSoftStartSync; ///< config use softstart sync -static char ConfigVideoBlackPicture; ///< config enable black picture mode -char ConfigVideoClearOnSwitch; ///< config enable Clear on channel switch - -static int ConfigVideoBrightness; ///< config video brightness -static int ConfigVideoContrast = 100; ///< config video contrast -static int ConfigVideoSaturation = 100; ///< config video saturation -static int ConfigVideoHue; ///< config video hue -static int ConfigGamma; ///< config Gamma -static int ConfigTargetColorSpace; ///< config Target Colrospace -static int ConfigScalerTest; /// Test for Scalers +static int ConfigVideoBrightness; ///< config video brightness +static int ConfigVideoContrast = 100; ///< config video contrast +static int ConfigVideoSaturation = 100; ///< config video saturation +static int ConfigVideoHue; ///< config video hue +static int ConfigGamma; ///< config Gamma +static int ConfigTargetColorSpace; ///< config Target Colrospace +static int ConfigScalerTest; /// Test for Scalers static int ConfigColorBlindness; static int ConfigColorBlindnessFaktor; @@ -144,68 +142,68 @@ static int ConfigVideoCutTopBottom[RESOLUTIONS]; /// config cut left and right pixels static int ConfigVideoCutLeftRight[RESOLUTIONS]; -static int ConfigAutoCropEnabled; ///< auto crop detection enabled -static int ConfigAutoCropInterval; ///< auto crop detection interval -static int ConfigAutoCropDelay; ///< auto crop detection delay -static int ConfigAutoCropTolerance; ///< auto crop detection tolerance +static int ConfigAutoCropEnabled; ///< auto crop detection enabled +static int ConfigAutoCropInterval; ///< auto crop detection interval +static int ConfigAutoCropDelay; ///< auto crop detection delay +static int ConfigAutoCropTolerance; ///< auto crop detection tolerance -static int ConfigVideoAudioDelay; ///< config audio delay -static char ConfigAudioDrift; ///< config audio drift -static char ConfigAudioPassthrough; ///< config audio pass-through mask -static char AudioPassthroughState; ///< flag audio pass-through on/off -static char ConfigAudioDownmix; ///< config ffmpeg audio downmix -static char ConfigAudioSoftvol; ///< config use software volume -static char ConfigAudioNormalize; ///< config use normalize volume -static int ConfigAudioMaxNormalize; ///< config max normalize factor -static char ConfigAudioCompression; ///< config use volume compression -static int ConfigAudioMaxCompression; ///< config max volume compression -static int ConfigAudioStereoDescent; ///< config reduce stereo loudness -int ConfigAudioBufferTime; ///< config size ms of audio buffer -static int ConfigAudioAutoAES; ///< config automatic AES handling +static int ConfigVideoAudioDelay; ///< config audio delay +static char ConfigAudioDrift; ///< config audio drift +static char ConfigAudioPassthrough; ///< config audio pass-through mask +static char AudioPassthroughState; ///< flag audio pass-through on/off +static char ConfigAudioDownmix; ///< config ffmpeg audio downmix +static char ConfigAudioSoftvol; ///< config use software volume +static char ConfigAudioNormalize; ///< config use normalize volume +static int ConfigAudioMaxNormalize; ///< config max normalize factor +static char ConfigAudioCompression; ///< config use volume compression +static int ConfigAudioMaxCompression; ///< config max volume compression +static int ConfigAudioStereoDescent; ///< config reduce stereo loudness +int ConfigAudioBufferTime; ///< config size ms of audio buffer +static int ConfigAudioAutoAES; ///< config automatic AES handling -static char *ConfigX11Display; ///< config x11 display -static char *ConfigAudioDevice; ///< config audio stereo device -static char *ConfigPassthroughDevice; ///< config audio pass-through device +static char *ConfigX11Display; ///< config x11 display +static char *ConfigAudioDevice; ///< config audio stereo device +static char *ConfigPassthroughDevice; ///< config audio pass-through device #ifdef USE_PIP -static int ConfigPipX = 100 - 3 - 18; ///< config pip pip x in % -static int ConfigPipY = 100 - 4 - 18; ///< config pip pip y in % -static int ConfigPipWidth = 18; ///< config pip pip width in % -static int ConfigPipHeight = 18; ///< config pip pip height in % -static int ConfigPipVideoX; ///< config pip video x in % -static int ConfigPipVideoY; ///< config pip video y in % -static int ConfigPipVideoWidth; ///< config pip video width in % -static int ConfigPipVideoHeight; ///< config pip video height in % -static int ConfigPipAltX; ///< config pip alt. pip x in % -static int ConfigPipAltY = 50; ///< config pip alt. pip y in % -static int ConfigPipAltWidth; ///< config pip alt. pip width in % -static int ConfigPipAltHeight = 50; ///< config pip alt. pip height in % -static int ConfigPipAltVideoX; ///< config pip alt. video x in % -static int ConfigPipAltVideoY; ///< config pip alt. video y in % -static int ConfigPipAltVideoWidth; ///< config pip alt. video width in % -static int ConfigPipAltVideoHeight = 50; ///< config pip alt. video height in % +static int ConfigPipX = 100 - 3 - 18; ///< config pip pip x in % +static int ConfigPipY = 100 - 4 - 18; ///< config pip pip y in % +static int ConfigPipWidth = 18; ///< config pip pip width in % +static int ConfigPipHeight = 18; ///< config pip pip height in % +static int ConfigPipVideoX; ///< config pip video x in % +static int ConfigPipVideoY; ///< config pip video y in % +static int ConfigPipVideoWidth; ///< config pip video width in % +static int ConfigPipVideoHeight; ///< config pip video height in % +static int ConfigPipAltX; ///< config pip alt. pip x in % +static int ConfigPipAltY = 50; ///< config pip alt. pip y in % +static int ConfigPipAltWidth; ///< config pip alt. pip width in % +static int ConfigPipAltHeight = 50; ///< config pip alt. pip height in % +static int ConfigPipAltVideoX; ///< config pip alt. video x in % +static int ConfigPipAltVideoY; ///< config pip alt. video y in % +static int ConfigPipAltVideoWidth; ///< config pip alt. video width in % +static int ConfigPipAltVideoHeight = 50; ///< config pip alt. video height in % #endif #ifdef USE_SCREENSAVER -static char ConfigEnableDPMSatBlackScreen; ///< Enable DPMS(Screensaver) while displaying black screen(radio) +static char ConfigEnableDPMSatBlackScreen; ///< Enable DPMS(Screensaver) while displaying black screen(radio) #endif #ifdef USE_OPENGLOSD -static int ConfigMaxSizeGPUImageCache = 128; ///< maximum size of GPU mem to be used for image caching +static int ConfigMaxSizeGPUImageCache = 128; ///< maximum size of GPU mem to be used for image caching #endif -static volatile int DoMakePrimary; ///< switch primary device to this +static volatile int DoMakePrimary; ///< switch primary device to this -#define SUSPEND_EXTERNAL -1 ///< play external suspend mode -#define NOT_SUSPENDED 0 ///< not suspend mode -#define SUSPEND_NORMAL 1 ///< normal suspend mode -#define SUSPEND_DETACHED 2 ///< detached suspend mode -static signed char SuspendMode; ///< suspend mode +#define SUSPEND_EXTERNAL -1 ///< play external suspend mode +#define NOT_SUSPENDED 0 ///< not suspend mode +#define SUSPEND_NORMAL 1 ///< normal suspend mode +#define SUSPEND_DETACHED 2 ///< detached suspend mode +static signed char SuspendMode; ///< suspend mode ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// -// C Callbacks +// C Callbacks ////////////////////////////////////////////////////////////////////////////// /** @@ -232,7 +230,7 @@ class cSoftRemote:public cRemote ** @param release flag key released */ bool Put(const char *code, bool repeat = false, bool release = false) { - return cRemote::Put(code, repeat, release); + return cRemote::Put(code, repeat, release); } }; @@ -245,47 +243,46 @@ class cSoftRemote:public cRemote ** @param release released key flag ** @param letter x11 character string (system setting locale) */ -extern "C" void FeedKeyPress(const char *keymap, const char *key, int repeat, - int release, const char *letter) +extern "C" void FeedKeyPress(const char *keymap, const char *key, int repeat, int release, const char *letter) { cRemote *remote; - cSoftRemote *csoft; + cSoftRemote *csoft; if (!keymap || !key) { - return; + return; } // find remote for (remote = Remotes.First(); remote; remote = Remotes.Next(remote)) { - if (!strcmp(remote->Name(), keymap)) { - break; - } + if (!strcmp(remote->Name(), keymap)) { + break; + } } // if remote not already exists, create it if (remote) { - csoft = (cSoftRemote *) remote; + csoft = (cSoftRemote *) remote; } else { - dsyslog("[softhddev]%s: remote '%s' not found\n", __FUNCTION__, keymap); - csoft = new cSoftRemote(keymap); + dsyslog("[softhddev]%s: remote '%s' not found\n", __FUNCTION__, keymap); + csoft = new cSoftRemote(keymap); } //dsyslog("[softhddev]%s %s, %s, %s\n", __FUNCTION__, keymap, key, letter); - if (key[1]) { // no single character - if (!csoft->Put(key, repeat, release) && letter && !cRemote::IsLearning()) { - cCharSetConv conv; - unsigned code; + if (key[1]) { // no single character + if (!csoft->Put(key, repeat, release) && letter && !cRemote::IsLearning()) { + cCharSetConv conv; + unsigned code; - code = Utf8CharGet(conv.Convert(letter)); - if (code <= 0xFF) { - cRemote::Put(KBDKEY(code)); // feed it for edit mode - } - } + code = Utf8CharGet(conv.Convert(letter)); + if (code <= 0xFF) { + cRemote::Put(KBDKEY(code)); // feed it for edit mode + } + } } else if (!csoft->Put(key, repeat, release)) { - cRemote::Put(KBDKEY(key[0])); // feed it for edit mode + cRemote::Put(KBDKEY(key[0])); // feed it for edit mode } } ////////////////////////////////////////////////////////////////////////////// -// OSD +// OSD ////////////////////////////////////////////////////////////////////////////// /** @@ -294,18 +291,18 @@ extern "C" void FeedKeyPress(const char *keymap, const char *key, int repeat, class cSoftOsd:public cOsd { public: - static volatile char Dirty; ///< flag force redraw everything - int OsdLevel; ///< current osd level FIXME: remove + static volatile char Dirty; ///< flag force redraw everything + int OsdLevel; ///< current osd level FIXME: remove - cSoftOsd(int, int, uint); ///< osd constructor - virtual ~ cSoftOsd(void); ///< osd destructor + cSoftOsd(int, int, uint); ///< osd constructor + virtual ~ cSoftOsd(void); ///< osd destructor /// set the sub-areas to the given areas virtual eOsdError SetAreas(const tArea *, int); - virtual void Flush(void); ///< commits all data to the hardware - virtual void SetActive(bool); ///< sets OSD to be the active one + virtual void Flush(void); ///< commits all data to the hardware + virtual void SetActive(bool); ///< sets OSD to be the active one }; -volatile char cSoftOsd::Dirty; ///< flag force redraw everything +volatile char cSoftOsd::Dirty; ///< flag force redraw everything /** ** Sets this OSD to be the active one. @@ -322,18 +319,18 @@ void cSoftOsd::SetActive(bool on) #endif if (Active() == on) { - return; // already active, no action + return; // already active, no action } cOsd::SetActive(on); if (on) { - Dirty = 1; - // only flush here if there are already bitmaps - if (GetBitmap(0)) { - Flush(); - } + Dirty = 1; + // only flush here if there are already bitmaps + if (GetBitmap(0)) { + Flush(); + } } else { - OsdClose(); + OsdClose(); } } @@ -352,8 +349,7 @@ cSoftOsd::cSoftOsd(int left, int top, uint level) #ifdef OSD_DEBUG /* FIXME: OsdWidth/OsdHeight not correct! */ - dsyslog("[softhddev]%s: %dx%d%+d%+d, %d\n", __FUNCTION__, OsdWidth(), - OsdHeight(), left, top, level); + dsyslog("[softhddev]%s: %dx%d%+d%+d, %d\n", __FUNCTION__, OsdWidth(), OsdHeight(), left, top, level); #endif OsdLevel = level; @@ -375,14 +371,14 @@ cSoftOsd::~cSoftOsd(void) #ifdef USE_YAEPG // support yaepghd, video window - if (vidWin.bpp) { // restore fullsized video - int width; - int height; - double video_aspect; + if (vidWin.bpp) { // restore fullsized video + int width; + int height; + double video_aspect; - ::GetOsdSize(&width, &height, &video_aspect); - // works osd relative - ::ScaleVideo(0, 0, width, height); + ::GetOsdSize(&width, &height, &video_aspect); + // works osd relative + ::ScaleVideo(0, 0, width, height); } #endif } @@ -398,16 +394,16 @@ eOsdError cSoftOsd::SetAreas(const tArea * areas, int n) // clear old OSD, when new areas are set if (!IsTrueColor()) { - cBitmap *bitmap; - int i; + cBitmap *bitmap; + int i; - for (i = 0; (bitmap = GetBitmap(i)); i++) { - bitmap->Clean(); - } + for (i = 0; (bitmap = GetBitmap(i)); i++) { + bitmap->Clean(); + } } if (Active()) { - VideoOsdClear(); - Dirty = 1; + VideoOsdClear(); + Dirty = 1; } return cOsd::SetAreas(areas, n); } @@ -420,260 +416,335 @@ void cSoftOsd::Flush(void) cPixmapMemory *pm; #ifdef OSD_DEBUG - dsyslog("[softhddev]%s: level %d active %d\n", __FUNCTION__, OsdLevel, - Active()); + dsyslog("[softhddev]%s: level %d active %d\n", __FUNCTION__, OsdLevel, Active()); #endif - if (!Active()) { // this osd is not active - return; + if (!Active()) { // this osd is not active + return; } #ifdef USE_YAEPG // support yaepghd, video window if (vidWin.bpp) { #ifdef OSD_DEBUG - dsyslog("[softhddev]%s: %dx%d%+d%+d\n", __FUNCTION__, vidWin.Width(), vidWin.Height(), vidWin.x1, vidWin.y2); + dsyslog("[softhddev]%s: %dx%d%+d%+d\n", __FUNCTION__, vidWin.Width(), vidWin.Height(), vidWin.x1, vidWin.y2); #endif - // FIXME: vidWin is OSD relative not video window. - // FIXME: doesn't work if fixed OSD width != real window width - // FIXME: solved in VideoSetOutputPosition - ::ScaleVideo(Left() + vidWin.x1, Top() + vidWin.y1, vidWin.Width(), vidWin.Height()); + // FIXME: vidWin is OSD relative not video window. + // FIXME: doesn't work if fixed OSD width != real window width + // FIXME: solved in VideoSetOutputPosition + ::ScaleVideo(Left() + vidWin.x1, Top() + vidWin.y1, vidWin.Width(), vidWin.Height()); } #endif if (!IsTrueColor()) { - cBitmap *bitmap; - int i; + cBitmap *bitmap; + int i; #ifdef OSD_DEBUG - static char warned; + static char warned; - if (!warned) { - dsyslog("[softhddev]%s: FIXME: should be truecolor\n", - __FUNCTION__); - warned = 1; - } + if (!warned) { + dsyslog("[softhddev]%s: FIXME: should be truecolor\n", __FUNCTION__); + warned = 1; + } #endif - // draw all bitmaps - for (i = 0; (bitmap = GetBitmap(i)); ++i) { - uint8_t *argb; - int xs; - int ys; - int x; - int y; - int w; - int h; - int x1; - int y1; - int x2; - int y2; + // draw all bitmaps + for (i = 0; (bitmap = GetBitmap(i)); ++i) { + uint8_t *argb; + int xs; + int ys; + int x; + int y; + int w; + int h; + int x1; + int y1; + int x2; + int y2; - // get dirty bounding box - if (Dirty) { // forced complete update - x1 = 0; - y1 = 0; - x2 = bitmap->Width() - 1; - y2 = bitmap->Height() - 1; - } else if (!bitmap->Dirty(x1, y1, x2, y2)) { - continue; // nothing dirty continue - } - // convert and upload only visible dirty areas - xs = bitmap->X0() + Left(); - ys = bitmap->Y0() + Top(); - // FIXME: negtative position bitmaps - w = x2 - x1 + 1; - h = y2 - y1 + 1; - // clip to screen - if (1) { // just for the case it makes trouble - int width; - int height; - double video_aspect; + // get dirty bounding box + if (Dirty) { // forced complete update + x1 = 0; + y1 = 0; + x2 = bitmap->Width() - 1; + y2 = bitmap->Height() - 1; + } else if (!bitmap->Dirty(x1, y1, x2, y2)) { + continue; // nothing dirty continue + } + // convert and upload only visible dirty areas + xs = bitmap->X0() + Left(); + ys = bitmap->Y0() + Top(); + // FIXME: negtative position bitmaps + w = x2 - x1 + 1; + h = y2 - y1 + 1; + // clip to screen + if (1) { // just for the case it makes trouble + int width; + int height; + double video_aspect; - if (xs < 0) { - if (xs + x1 < 0) { - x1 -= xs + x1; - w += xs + x1; - if (w <= 0) { - continue; - } - } - xs = 0; - } - if (ys < 0) { - if (ys + y1 < 0) { - y1 -= ys + y1; - h += ys + y1; - if (h <= 0) { - continue; - } - } - ys = 0; - } - ::GetOsdSize(&width, &height, &video_aspect); - if (w > width - xs - x1) { - w = width - xs - x1; - if (w <= 0) { - continue; - } - x2 = x1 + w - 1; - } - if (h > height - ys - y1) { - h = height - ys - y1; - if (h <= 0) { - continue; - } - y2 = y1 + h - 1; - } - } + if (xs < 0) { + if (xs + x1 < 0) { + x1 -= xs + x1; + w += xs + x1; + if (w <= 0) { + continue; + } + } + xs = 0; + } + if (ys < 0) { + if (ys + y1 < 0) { + y1 -= ys + y1; + h += ys + y1; + if (h <= 0) { + continue; + } + } + ys = 0; + } + ::GetOsdSize(&width, &height, &video_aspect); + if (w > width - xs - x1) { + w = width - xs - x1; + if (w <= 0) { + continue; + } + x2 = x1 + w - 1; + } + if (h > height - ys - y1) { + h = height - ys - y1; + if (h <= 0) { + continue; + } + y2 = y1 + h - 1; + } + } #ifdef DEBUG - if (w > bitmap->Width() || h > bitmap->Height()) { - esyslog(tr("[softhddev]: dirty area too big\n")); - abort(); - } + if (w > bitmap->Width() || h > bitmap->Height()) { + esyslog(tr("[softhddev]: dirty area too big\n")); + abort(); + } #endif - argb = (uint8_t *) malloc(w * h * sizeof(uint32_t)); - for (y = y1; y <= y2; ++y) { - for (x = x1; x <= x2; ++x) { - ((uint32_t *) argb)[x - x1 + (y - y1) * w] = - bitmap->GetColor(x, y); - } - } + argb = (uint8_t *) malloc(w * h * sizeof(uint32_t)); + for (y = y1; y <= y2; ++y) { + for (x = x1; x <= x2; ++x) { + ((uint32_t *) argb)[x - x1 + (y - y1) * w] = bitmap->GetColor(x, y); + } + } #ifdef OSD_DEBUG - dsyslog("[softhddev]%s: draw %dx%d%+d%+d bm\n", __FUNCTION__, w, h, - xs + x1, ys + y1); + dsyslog("[softhddev]%s: draw %dx%d%+d%+d bm\n", __FUNCTION__, w, h, xs + x1, ys + y1); #endif - OsdDrawARGB(0, 0, w, h, w * sizeof(uint32_t), argb, xs + x1, - ys + y1); + OsdDrawARGB(0, 0, w, h, w * sizeof(uint32_t), argb, xs + x1, ys + y1); - bitmap->Clean(); - // FIXME: reuse argb - free(argb); - } - Dirty = 0; - return; + bitmap->Clean(); + // FIXME: reuse argb + free(argb); + } + Dirty = 0; + return; } LOCK_PIXMAPS; while ((pm = (dynamic_cast < cPixmapMemory * >(RenderPixmaps())))) { - int xp; - int yp; - int stride; - int x; - int y; - int w; - int h; + int xp; + int yp; + int stride; + int x; + int y; + int w; + int h; - x = pm->ViewPort().X(); - y = pm->ViewPort().Y(); - w = pm->ViewPort().Width(); - h = pm->ViewPort().Height(); - stride = w * sizeof(tColor); + x = pm->ViewPort().X(); + y = pm->ViewPort().Y(); + w = pm->ViewPort().Width(); + h = pm->ViewPort().Height(); + stride = w * sizeof(tColor); - // clip to osd - xp = 0; - if (x < 0) { - xp = -x; - w -= xp; - x = 0; - } + // clip to osd + xp = 0; + if (x < 0) { + xp = -x; + w -= xp; + x = 0; + } - yp = 0; - if (y < 0) { - yp = -y; - h -= yp; - y = 0; - } + yp = 0; + if (y < 0) { + yp = -y; + h -= yp; + y = 0; + } - if (w > Width() - x) { - w = Width() - x; - } - if (h > Height() - y) { - h = Height() - y; - } + if (w > Width() - x) { + w = Width() - x; + } + if (h > Height() - y) { + h = Height() - y; + } - x += Left(); - y += Top(); + x += Left(); + y += Top(); - // clip to screen - if (1) { // just for the case it makes trouble - // and it can happen! - int width; - int height; - double video_aspect; + // clip to screen + if (1) { // just for the case it makes trouble + // and it can happen! + int width; + int height; + double video_aspect; - if (x < 0) { - w += x; - xp += -x; - x = 0; - } - if (y < 0) { - h += y; - yp += -y; - y = 0; - } + if (x < 0) { + w += x; + xp += -x; + x = 0; + } + if (y < 0) { + h += y; + yp += -y; + y = 0; + } - ::GetOsdSize(&width, &height, &video_aspect); - if (w > width - x) { - w = width - x; - } - if (h > height - y) { - h = height - y; - } - } + ::GetOsdSize(&width, &height, &video_aspect); + if (w > width - x) { + w = width - x; + } + if (h > height - y) { + h = height - y; + } + } #ifdef OSD_DEBUG - dsyslog("[softhddev]%s: draw %dx%d%+d%+d*%d -> %+d%+d %p\n", - __FUNCTION__, w, h, xp, yp, stride, x, y, pm->Data()); + dsyslog("[softhddev]%s: draw %dx%d%+d%+d*%d -> %+d%+d %p\n", __FUNCTION__, w, h, xp, yp, stride, x, y, + pm->Data()); #endif - OsdDrawARGB(xp, yp, w, h, stride, pm->Data(), x, y); + OsdDrawARGB(xp, yp, w, h, stride, pm->Data(), x, y); #if APIVERSNUM >= 20110 - DestroyPixmap(pm); + DestroyPixmap(pm); #else - delete pm; + delete pm; #endif } Dirty = 0; } #ifdef USE_OPENGLOSD -//Dummy OSD for OpenGL OSD if no X Server is available -class cDummyOsd : public cOsd { +//Dummy OSD for OpenGL OSD if no X Server is available +class cDummyOsd:public cOsd +{ public: - cDummyOsd(int Left, int Top, uint Level) : cOsd(Left, Top, Level) {} - virtual ~cDummyOsd() {} - virtual cPixmap *CreatePixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null) { - (void)Layer; (void)ViewPort; (void)DrawPort; - return NULL; + cDummyOsd(int Left, int Top, uint Level):cOsd(Left, Top, Level) + { } - virtual void DestroyPixmap(cPixmap *Pixmap) { (void)Pixmap; } - virtual void DrawImage(const cPoint &Point, const cImage &Image) { (void)Point; (void)Image; } - virtual void DrawImage(const cPoint &Point, int ImageHandle) { (void) Point; (void)ImageHandle; } - virtual eOsdError CanHandleAreas(const tArea *Areas, int NumAreas) { (void)Areas; (void)NumAreas; return oeOk; } - virtual eOsdError SetAreas(const tArea *Areas, int NumAreas) { (void)Areas; (void)NumAreas; return oeOk; } - virtual void SaveRegion(int x1, int y1, int x2, int y2) { (void)x1; (void)y1; (void)x2; (void)y2; } - virtual void RestoreRegion(void) {} - virtual eOsdError SetPalette(const cPalette &Palette, int Area) { (void)Palette; (void)Area; return oeOk; } - virtual void DrawPixel(int x, int y, tColor Color) { (void)x; (void)y; (void)Color; } - virtual void DrawBitmap(int x, int y, const cBitmap &Bitmap, tColor ColorFg = 0, tColor ColorBg = 0, bool ReplacePalette = false, bool Overlay = false) { - (void)x; (void)y; (void)Bitmap; (void)ColorFg; (void)ColorBg; (void)ReplacePalette; (void)Overlay; + virtual ~ cDummyOsd() + { } - virtual void DrawText(int x, int y, const char *s, tColor ColorFg, tColor ColorBg, const cFont *Font, int Width = 0, int Height = 0, int Alignment = taDefault) { - (void)x; (void)y; (void)s; (void)ColorFg; (void)ColorBg; (void)Font; (void)Width; (void)Height; (void)Alignment; + virtual cPixmap *CreatePixmap(int Layer, const cRect & ViewPort, const cRect & DrawPort = cRect::Null) { + (void)Layer; + (void)ViewPort; + (void)DrawPort; + return NULL; } - virtual void DrawRectangle(int x1, int y1, int x2, int y2, tColor Color) { - (void)x1; (void)y1; (void)x2; (void)y2; (void)Color; + virtual void DestroyPixmap(cPixmap * Pixmap) + { + (void)Pixmap; + } + virtual void DrawImage(const cPoint & Point, const cImage & Image) + { + (void)Point; + (void)Image; + } + virtual void DrawImage(const cPoint & Point, int ImageHandle) + { + (void)Point; + (void)ImageHandle; + } + virtual eOsdError CanHandleAreas(const tArea * Areas, int NumAreas) + { + (void)Areas; + (void)NumAreas; + return oeOk; + } + virtual eOsdError SetAreas(const tArea * Areas, int NumAreas) + { + (void)Areas; + (void)NumAreas; + return oeOk; + } + virtual void SaveRegion(int x1, int y1, int x2, int y2) + { + (void)x1; + (void)y1; + (void)x2; + (void)y2; + } + virtual void RestoreRegion(void) + { + } + virtual eOsdError SetPalette(const cPalette & Palette, int Area) + { + (void)Palette; + (void)Area; + return oeOk; + } + virtual void DrawPixel(int x, int y, tColor Color) + { + (void)x; + (void)y; + (void)Color; + } + virtual void DrawBitmap(int x, int y, const cBitmap & Bitmap, tColor ColorFg = 0, tColor ColorBg = + 0, bool ReplacePalette = false, bool Overlay = false) { + (void)x; + (void)y; + (void)Bitmap; + (void)ColorFg; + (void)ColorBg; + (void)ReplacePalette; + (void)Overlay; + } + virtual void DrawText(int x, int y, const char *s, tColor ColorFg, tColor ColorBg, const cFont * Font, int Width = + 0, int Height = 0, int Alignment = taDefault) { + (void)x; + (void)y; + (void)s; + (void)ColorFg; + (void)ColorBg; + (void)Font; + (void)Width; + (void)Height; + (void)Alignment; + } + virtual void DrawRectangle(int x1, int y1, int x2, int y2, tColor Color) + { + (void)x1; + (void)y1; + (void)x2; + (void)y2; + (void)Color; } virtual void DrawEllipse(int x1, int y1, int x2, int y2, tColor Color, int Quadrants = 0) { - (void)x1; (void)y1; (void)x2; (void)y2; (void)Color; (void)Quadrants; + (void)x1; + (void)y1; + (void)x2; + (void)y2; + (void)Color; + (void)Quadrants; } - virtual void DrawSlope(int x1, int y1, int x2, int y2, tColor Color, int Type) { - (void)x1; (void)y1; (void)x2; (void)y2; (void)Color; (void)Type; + virtual void DrawSlope(int x1, int y1, int x2, int y2, tColor Color, int Type) + { + (void)x1; + (void)y1; + (void)x2; + (void)y2; + (void)Color; + (void)Type; + } + virtual void Flush(void) + { } - virtual void Flush(void) {} }; #endif ////////////////////////////////////////////////////////////////////////////// -// OSD provider +// OSD provider ////////////////////////////////////////////////////////////////////////////// /** @@ -682,35 +753,36 @@ class cDummyOsd : public cOsd { class cSoftOsdProvider:public cOsdProvider { private: - static cOsd *Osd; ///< single OSD + static cOsd *Osd; ///< single OSD #ifdef USE_OPENGLOSD - static std::shared_ptr oglThread; + static std::shared_ptr < cOglThread > oglThread; static bool StartOpenGlThread(void); -protected: - virtual int StoreImageData(const cImage &Image); + protected: + virtual int StoreImageData(const cImage & Image); virtual void DropImageData(int ImageHandle); #endif public: - virtual cOsd * CreateOsd(int, int, uint); + virtual cOsd * CreateOsd(int, int, uint); virtual bool ProvidesTrueColor(void); #ifdef USE_OPENGLOSD static void StopOpenGlThread(void); static const cImage *GetImageData(int ImageHandle); static void OsdSizeChanged(void); #endif - cSoftOsdProvider(void); ///< OSD provider constructor - virtual ~cSoftOsdProvider(); ///< OSD provider destructor + cSoftOsdProvider(void); ///< OSD provider constructor + virtual ~ cSoftOsdProvider(); ///< OSD provider destructor }; -cOsd *cSoftOsdProvider::Osd; ///< single osd +cOsd *cSoftOsdProvider::Osd; ///< single osd #ifdef USE_OPENGLOSD -std::shared_ptr cSoftOsdProvider::oglThread; ///< openGL worker Thread +std::shared_ptr < cOglThread > cSoftOsdProvider::oglThread; ///< openGL worker Thread -int cSoftOsdProvider::StoreImageData(const cImage &Image) +int cSoftOsdProvider::StoreImageData(const cImage & Image) { if (StartOpenGlThread()) { int imgHandle = oglThread->StoreImage(Image); + return imgHandle; } return 0; @@ -722,6 +794,7 @@ void cSoftOsdProvider::DropImageData(int ImageHandle) oglThread->DropImageData(ImageHandle); } #endif + /** ** Create a new OSD. ** @@ -755,18 +828,20 @@ bool cSoftOsdProvider::ProvidesTrueColor(void) } #ifdef USE_OPENGLOSD -const cImage *cSoftOsdProvider::GetImageData(int ImageHandle) { +const cImage *cSoftOsdProvider::GetImageData(int ImageHandle) +{ return cOsdProvider::GetImageData(ImageHandle); } -void cSoftOsdProvider::OsdSizeChanged(void) { +void cSoftOsdProvider::OsdSizeChanged(void) +{ //cleanup OpenGl Context cSoftOsdProvider::StopOpenGlThread(); cOsdProvider::UpdateOsdSize(); } - -bool cSoftOsdProvider::StartOpenGlThread(void) { +bool cSoftOsdProvider::StartOpenGlThread(void) +{ //only try to start worker thread if shd is attached //otherwise glutInit() crashes if (SuspendMode != NOT_SUSPENDED) { @@ -780,6 +855,7 @@ bool cSoftOsdProvider::StartOpenGlThread(void) { oglThread.reset(); } cCondWait wait; + dsyslog("[softhddev]Trying to start OpenGL Worker Thread"); oglThread.reset(new cOglThread(&wait, ConfigMaxSizeGPUImageCache)); wait.Wait(); @@ -791,10 +867,11 @@ bool cSoftOsdProvider::StartOpenGlThread(void) { return false; } -void cSoftOsdProvider::StopOpenGlThread(void) { +void cSoftOsdProvider::StopOpenGlThread(void) +{ dsyslog("[softhddev]stopping OpenGL Worker Thread "); if (oglThread) { -// OsdClose(); +// OsdClose(); oglThread->Stop(); } oglThread.reset(); @@ -806,7 +883,7 @@ void cSoftOsdProvider::StopOpenGlThread(void) { ** Create cOsdProvider class. */ cSoftOsdProvider::cSoftOsdProvider(void) -: cOsdProvider() +:cOsdProvider() { #ifdef OSD_DEBUG dsyslog("[softhddev]%s:\n", __FUNCTION__); @@ -832,7 +909,7 @@ cSoftOsdProvider::~cSoftOsdProvider() } ////////////////////////////////////////////////////////////////////////////// -// cMenuSetupPage +// cMenuSetupPage ////////////////////////////////////////////////////////////////////////////// /** @@ -869,11 +946,11 @@ class cMenuSetupSoft:public cMenuSetupPage int Contrast; int Saturation; int Hue; - int Gamma; - int TargetColorSpace; - int ScalerTest; - int ColorBlindnessFaktor; - int ColorBlindness; + int Gamma; + int TargetColorSpace; + int ScalerTest; + int ColorBlindnessFaktor; + int ColorBlindness; int ResolutionShown[RESOLUTIONS]; int Scaling[RESOLUTIONS]; @@ -936,12 +1013,12 @@ class cMenuSetupSoft:public cMenuSetupPage /// @} private: inline cOsdItem * CollapsedItem(const char *, int &, const char * = NULL); - void Create(void); // create sub-menu + void Create(void); // create sub-menu protected: virtual void Store(void); public: cMenuSetupSoft(void); - virtual eOSState ProcessKey(eKeys); // handle input + virtual eOSState ProcessKey(eKeys); // handle input }; /** @@ -966,12 +1043,11 @@ static inline cOsdItem *SeparatorItem(const char *label) ** @param flag flag handling collapsed or opened ** @param msg open message */ -inline cOsdItem *cMenuSetupSoft::CollapsedItem(const char *label, int &flag, - const char *msg) +inline cOsdItem *cMenuSetupSoft::CollapsedItem(const char *label, int &flag, const char *msg) { cOsdItem *item; - item = new cMenuEditBoolItem(cString::sprintf("* %s", label), &flag,msg ? msg : tr("show"), tr("hide")); + item = new cMenuEditBoolItem(cString::sprintf("* %s", label), &flag, msg ? msg : tr("show"), tr("hide")); return item; } @@ -982,232 +1058,217 @@ inline cOsdItem *cMenuSetupSoft::CollapsedItem(const char *label, int &flag, void cMenuSetupSoft::Create(void) { static const char *const osd_size[] = { - "auto", "1920x1080", "1280x720", "custom", + "auto", "1920x1080", "1280x720", "custom", }; static const char *const video_display_formats_4_3[] = { - "pan&scan", "letterbox", "center cut-out", + "pan&scan", "letterbox", "center cut-out", }; static const char *const video_display_formats_16_9[] = { - "pan&scan", "pillarbox", "center cut-out", + "pan&scan", "pillarbox", "center cut-out", }; #ifdef YADIF static const char *const deinterlace[] = { - "Cuda", "Yadif", + "Cuda", "Yadif", }; static const char *const deinterlace_short[] = { - "C", "Y", + "C", "Y", }; #endif static const char *const audiodrift[] = { - "None", "PCM", "AC-3", "PCM + AC-3" + "None", "PCM", "AC-3", "PCM + AC-3" }; static const char *const resolution[RESOLUTIONS] = { - "576i", "720p", "fake 1080", "1080" ,"2160p" + "576i", "720p", "fake 1080", "1080", "2160p" }; #ifdef PLACEBO - static const char *const target_colorspace[] = { - "Monitor", "sRGB", "BT709", "HDR-HLG", "HDR10", + static const char *const target_colorspace[] = { + "Monitor", "sRGB", "BT709", "HDR-HLG", "HDR10", }; - static const char *const target_colorblindness[] = { - "None", "Protanomaly", "Deuteranomaly", "Tritanomaly", "Monochromacy", + static const char *const target_colorblindness[] = { + "None", "Protanomaly", "Deuteranomaly", "Tritanomaly", "Monochromacy", }; #endif int current; int i; #ifdef PLACEBO - static int scalers=0; - static char *scaling[100]; - static char *scalingtest[100]; - if (scalers == 0) { - scalingtest[0] = "Off"; - for (scalers = 0;pl_named_filters[scalers].filter != NULL ; scalers++) { - scaling[scalers] = (char*)pl_named_filters[scalers].name; - scalingtest[scalers+1] = (char*)pl_named_filters[scalers].name; -// printf("Scaler %s\n",pl_named_filters[scalers].name); - } -// scalers -= 2; - } -#endif - - - current = Current(); // get current menu item index - Clear(); // clear the menu + static int scalers = 0; + static char *scaling[100]; + static char *scalingtest[100]; + + if (scalers == 0) { + scalingtest[0] = "Off"; + for (scalers = 0; pl_named_filters[scalers].filter != NULL; scalers++) { + scaling[scalers] = (char *)pl_named_filters[scalers].name; + scalingtest[scalers + 1] = (char *)pl_named_filters[scalers].name; +// printf("Scaler %s\n",pl_named_filters[scalers].name); + } +// scalers -= 2; + } +#endif + + current = Current(); // get current menu item index + Clear(); // clear the menu // - // general + // general // Add(CollapsedItem(tr("General"), General)); if (General) { - Add(new cMenuEditBoolItem(tr("Make primary device"), &MakePrimary, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Hide main menu entry"),&HideMainMenuEntry, trVDR("no"), trVDR("yes"))); - // - // osd - // - Add(new cMenuEditStraItem(tr("Osd size"), &OsdSize, 4, osd_size)); - if (OsdSize == 3) { - Add(new cMenuEditIntItem(tr("Osd width"), &OsdWidth, 0, 4096)); - Add(new cMenuEditIntItem(tr("Osd height"), &OsdHeight, 0, 4096)); - } -#ifdef USE_OPENGLOSD - Add(new cMenuEditIntItem(tr("GPU mem used for image caching (MB)"), &MaxSizeGPUImageCache, 0, 4000)); + Add(new cMenuEditBoolItem(tr("Make primary device"), &MakePrimary, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Hide main menu entry"), &HideMainMenuEntry, trVDR("no"), trVDR("yes"))); + // + // osd + // + Add(new cMenuEditStraItem(tr("Osd size"), &OsdSize, 4, osd_size)); + if (OsdSize == 3) { + Add(new cMenuEditIntItem(tr("Osd width"), &OsdWidth, 0, 4096)); + Add(new cMenuEditIntItem(tr("Osd height"), &OsdHeight, 0, 4096)); + } +#ifdef USE_OPENGLOSD + Add(new cMenuEditIntItem(tr("GPU mem used for image caching (MB)"), &MaxSizeGPUImageCache, 0, 4000)); #endif - // - // suspend - // - Add(SeparatorItem(tr("Suspend"))); - Add(new cMenuEditBoolItem(tr("Detach from main menu entry"),&DetachFromMainMenu, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Suspend closes video+audio"), &SuspendClose, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Suspend stops x11"), &SuspendX11, trVDR("no"), trVDR("yes"))); + // + // suspend + // + Add(SeparatorItem(tr("Suspend"))); + Add(new cMenuEditBoolItem(tr("Detach from main menu entry"), &DetachFromMainMenu, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Suspend closes video+audio"), &SuspendClose, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Suspend stops x11"), &SuspendX11, trVDR("no"), trVDR("yes"))); } // - // video + // video // Add(CollapsedItem(tr("Video"), Video)); if (Video) { #ifdef USE_SCREENSAVER - Add(new - cMenuEditBoolItem(tr("Enable Screensaver(DPMS) at black screen"), - &EnableDPMSatBlackScreen, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Enable Screensaver(DPMS) at black screen"), &EnableDPMSatBlackScreen, + trVDR("no"), trVDR("yes"))); #endif - Add(new cMenuEditStraItem(trVDR("4:3 video display format"), - &Video4to3DisplayFormat, 3, video_display_formats_4_3)); - Add(new cMenuEditStraItem(trVDR("16:9+other video display format"), - &VideoOtherDisplayFormat, 3, video_display_formats_16_9)); + Add(new cMenuEditStraItem(trVDR("4:3 video display format"), &Video4to3DisplayFormat, 3, + video_display_formats_4_3)); + Add(new cMenuEditStraItem(trVDR("16:9+other video display format"), &VideoOtherDisplayFormat, 3, + video_display_formats_16_9)); #if 0 - // FIXME: switch config gray/color configuration - Add(new cMenuEditIntItem(tr("Video background color (RGB)"), - (int *)&Background, 0, 0x00FFFFFF)); - Add(new cMenuEditIntItem(tr("Video background color (Alpha)"), - (int *)&BackgroundAlpha, 0, 0xFF)); + // FIXME: switch config gray/color configuration + Add(new cMenuEditIntItem(tr("Video background color (RGB)"), (int *)&Background, 0, 0x00FFFFFF)); + Add(new cMenuEditIntItem(tr("Video background color (Alpha)"), (int *)&BackgroundAlpha, 0, 0xFF)); #endif #ifdef PLACEBO - Add(new cMenuEditBoolItem(tr("Use studio levels"), - &StudioLevels, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Use studio levels"), &StudioLevels, trVDR("no"), trVDR("yes"))); #endif - Add(new cMenuEditBoolItem(tr("60hz display mode"), &_60HzMode, - trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Soft start a/v sync"), &SoftStartSync, - trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Black during channel switch"), - &BlackPicture, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Clear decoder on channel switch"), - &ClearOnSwitch, trVDR("no"), trVDR("yes"))); - + Add(new cMenuEditBoolItem(tr("60hz display mode"), &_60HzMode, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Soft start a/v sync"), &SoftStartSync, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Black during channel switch"), &BlackPicture, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Clear decoder on channel switch"), &ClearOnSwitch, trVDR("no"), trVDR("yes"))); + #if PLACEBO - Add(new cMenuEditStraItem(tr("Scaler Test"), &ConfigScalerTest, scalers+1, scalingtest)); + Add(new cMenuEditStraItem(tr("Scaler Test"), &ConfigScalerTest, scalers + 1, scalingtest)); - Add(new cMenuEditIntItem(tr("Brightness (-100..100)"), - &Brightness, -100, 100, tr("min"), tr("max"))); - Add(new cMenuEditIntItem(tr("Contrast (0..100)"), &Contrast, - 0, 100, tr("min"), tr("max"))); - Add(new cMenuEditIntItem(tr("Saturation (0..100)"), - &Saturation, 0, 100, tr("min"), tr("max"))); - Add(new cMenuEditIntItem(tr("Gamma (0..100)"), - &Gamma, 0, 100, tr("min"), tr("max"))); - Add(new cMenuEditIntItem(tr("Hue (-314..314) "), &Hue, -314, 314, tr("min"), tr("max"))); - Add(new cMenuEditStraItem(tr("Monitor Colorspace"), &TargetColorSpace, 5, target_colorspace)); - Add(new cMenuEditStraItem(tr("Color Blindness"), &ColorBlindness, 5, target_colorblindness)); - Add(new cMenuEditIntItem(tr("Color Correction (-100..100) "), &ColorBlindnessFaktor, -100, - 100, tr("min"), tr("max"))); + Add(new cMenuEditIntItem(tr("Brightness (-100..100)"), &Brightness, -100, 100, tr("min"), tr("max"))); + Add(new cMenuEditIntItem(tr("Contrast (0..100)"), &Contrast, 0, 100, tr("min"), tr("max"))); + Add(new cMenuEditIntItem(tr("Saturation (0..100)"), &Saturation, 0, 100, tr("min"), tr("max"))); + Add(new cMenuEditIntItem(tr("Gamma (0..100)"), &Gamma, 0, 100, tr("min"), tr("max"))); + Add(new cMenuEditIntItem(tr("Hue (-314..314) "), &Hue, -314, 314, tr("min"), tr("max"))); + Add(new cMenuEditStraItem(tr("Monitor Colorspace"), &TargetColorSpace, 5, target_colorspace)); + Add(new cMenuEditStraItem(tr("Color Blindness"), &ColorBlindness, 5, target_colorblindness)); + Add(new cMenuEditIntItem(tr("Color Correction (-100..100) "), &ColorBlindnessFaktor, -100, 100, tr("min"), + tr("max"))); #endif - - for (i = 0; i < RESOLUTIONS; ++i) { - cString msg; - // short hidden informations - msg = cString::sprintf("show"); - Add(CollapsedItem(resolution[i], ResolutionShown[i], msg)); + for (i = 0; i < RESOLUTIONS; ++i) { + cString msg; - if (ResolutionShown[i]) { + // short hidden informations + msg = cString::sprintf("show"); + Add(CollapsedItem(resolution[i], ResolutionShown[i], msg)); + + if (ResolutionShown[i]) { #ifdef PLACEBO - Add(new cMenuEditStraItem(tr("Scaling"), &Scaling[i], scalers, scaling)); + Add(new cMenuEditStraItem(tr("Scaling"), &Scaling[i], scalers, scaling)); #endif #ifdef YADIF - if ( i == 0 || i == 2 || i == 3) { - Add(new cMenuEditStraItem(tr("Deinterlace"), &Deinterlace[i],2, deinterlace)); - } + if (i == 0 || i == 2 || i == 3) { + Add(new cMenuEditStraItem(tr("Deinterlace"), &Deinterlace[i], 2, deinterlace)); + } #endif #if 0 - Add(new cMenuEditBoolItem(tr("SkipChromaDeinterlace (vdpau)"), - &SkipChromaDeinterlace[i], trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Inverse Telecine (vdpau)"), - &InverseTelecine[i], trVDR("no"), trVDR("yes"))); - Add(new cMenuEditIntItem(tr("Denoise (0..1000) (vdpau)"), - &Denoise[i], 0, 1000, tr("off"), tr("max"))); - Add(new cMenuEditIntItem(tr("Sharpen (-1000..1000) (vdpau)"), - &Sharpen[i], -1000, 1000, tr("blur max"), - tr("sharpen max"))); + Add(new cMenuEditBoolItem(tr("SkipChromaDeinterlace (vdpau)"), &SkipChromaDeinterlace[i], trVDR("no"), + trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Inverse Telecine (vdpau)"), &InverseTelecine[i], trVDR("no"), + trVDR("yes"))); + Add(new cMenuEditIntItem(tr("Denoise (0..1000) (vdpau)"), &Denoise[i], 0, 1000, tr("off"), tr("max"))); + Add(new cMenuEditIntItem(tr("Sharpen (-1000..1000) (vdpau)"), &Sharpen[i], -1000, 1000, tr("blur max"), + tr("sharpen max"))); #endif - Add(new cMenuEditIntItem(tr("Cut top and bottom (pixel)"), - &CutTopBottom[i], 0, 250)); - Add(new cMenuEditIntItem(tr("Cut left and right (pixel)"), - &CutLeftRight[i], 0, 250)); - } - } + Add(new cMenuEditIntItem(tr("Cut top and bottom (pixel)"), &CutTopBottom[i], 0, 250)); + Add(new cMenuEditIntItem(tr("Cut left and right (pixel)"), &CutLeftRight[i], 0, 250)); + } + } #ifdef USE_AUTOCROP - // - // auto-crop - // - Add(SeparatorItem(tr("Auto-crop"))); - Add(new cMenuEditIntItem(tr("Autocrop interval (frames)"), &AutoCropInterval, 0, 200, tr("off"))); - Add(new cMenuEditIntItem(tr("Autocrop delay (n * interval)"), &AutoCropDelay, 0, 200)); - Add(new cMenuEditIntItem(tr("Autocrop tolerance (pixel)"), &AutoCropTolerance, 0, 32)); + // + // auto-crop + // + Add(SeparatorItem(tr("Auto-crop"))); + Add(new cMenuEditIntItem(tr("Autocrop interval (frames)"), &AutoCropInterval, 0, 200, tr("off"))); + Add(new cMenuEditIntItem(tr("Autocrop delay (n * interval)"), &AutoCropDelay, 0, 200)); + Add(new cMenuEditIntItem(tr("Autocrop tolerance (pixel)"), &AutoCropTolerance, 0, 32)); #endif } // - // audio + // audio // Add(CollapsedItem(tr("Audio"), Audio)); if (Audio) { - Add(new cMenuEditIntItem(tr("Audio/Video delay (ms)"), &AudioDelay, -1000, 1000)); - Add(new cMenuEditStraItem(tr("Audio drift correction"), &AudioDrift, 4, audiodrift)); - Add(new cMenuEditBoolItem(tr("Pass-through default"), &AudioPassthroughDefault, trVDR("off"), trVDR("on"))); - Add(new cMenuEditBoolItem(tr("\040\040PCM pass-through"), &AudioPassthroughPCM, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("\040\040AC-3 pass-through"),&AudioPassthroughAC3, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("\040\040E-AC-3 pass-through"),&AudioPassthroughEAC3, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Enable (E-)AC-3 (decoder) downmix"), &AudioDownmix, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Volume control"), &AudioSoftvol,tr("Hardware"), tr("Software"))); - Add(new cMenuEditBoolItem(tr("Enable normalize volume"),&AudioNormalize, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditIntItem(tr(" Max normalize factor (/1000)"),&AudioMaxNormalize, 0, 10000)); - Add(new cMenuEditBoolItem(tr("Enable volume compression"), &AudioCompression, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditIntItem(tr(" Max compression factor (/1000)"),&AudioMaxCompression, 0, 10000)); - Add(new cMenuEditIntItem(tr("Reduce stereo volume (/1000)"),&AudioStereoDescent, 0, 1000)); - Add(new cMenuEditIntItem(tr("Audio buffer size (ms)"),&AudioBufferTime, 0, 1000)); - Add(new cMenuEditBoolItem(tr("Enable automatic AES"), &AudioAutoAES,trVDR("no"), trVDR("yes"))); + Add(new cMenuEditIntItem(tr("Audio/Video delay (ms)"), &AudioDelay, -1000, 1000)); + Add(new cMenuEditStraItem(tr("Audio drift correction"), &AudioDrift, 4, audiodrift)); + Add(new cMenuEditBoolItem(tr("Pass-through default"), &AudioPassthroughDefault, trVDR("off"), trVDR("on"))); + Add(new cMenuEditBoolItem(tr("\040\040PCM pass-through"), &AudioPassthroughPCM, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("\040\040AC-3 pass-through"), &AudioPassthroughAC3, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("\040\040E-AC-3 pass-through"), &AudioPassthroughEAC3, trVDR("no"), + trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Enable (E-)AC-3 (decoder) downmix"), &AudioDownmix, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Volume control"), &AudioSoftvol, tr("Hardware"), tr("Software"))); + Add(new cMenuEditBoolItem(tr("Enable normalize volume"), &AudioNormalize, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditIntItem(tr(" Max normalize factor (/1000)"), &AudioMaxNormalize, 0, 10000)); + Add(new cMenuEditBoolItem(tr("Enable volume compression"), &AudioCompression, trVDR("no"), trVDR("yes"))); + Add(new cMenuEditIntItem(tr(" Max compression factor (/1000)"), &AudioMaxCompression, 0, 10000)); + Add(new cMenuEditIntItem(tr("Reduce stereo volume (/1000)"), &AudioStereoDescent, 0, 1000)); + Add(new cMenuEditIntItem(tr("Audio buffer size (ms)"), &AudioBufferTime, 0, 1000)); + Add(new cMenuEditBoolItem(tr("Enable automatic AES"), &AudioAutoAES, trVDR("no"), trVDR("yes"))); } #ifdef USE_PIP // - // PIP + // PIP // Add(CollapsedItem(tr("Picture-In-Picture"), Pip)); if (Pip) { - // FIXME: predefined modes/custom mode - Add(new cMenuEditIntItem(tr("Pip X (%)"), &PipX, 0, 100)); - Add(new cMenuEditIntItem(tr("Pip Y (%)"), &PipY, 0, 100)); - Add(new cMenuEditIntItem(tr("Pip Width (%)"), &PipWidth, 0, 100)); - Add(new cMenuEditIntItem(tr("Pip Height (%)"), &PipHeight, 0, 100)); - Add(new cMenuEditIntItem(tr("Video X (%)"), &PipVideoX, 0, 100)); - Add(new cMenuEditIntItem(tr("Video Y (%)"), &PipVideoY, 0, 100)); - Add(new cMenuEditIntItem(tr("Video Width (%)"), &PipVideoWidth, 0,100)); - Add(new cMenuEditIntItem(tr("Video Height (%)"), &PipVideoHeight, 0,100)); - Add(new cMenuEditIntItem(tr("Alternative Pip X (%)"), &PipAltX, 0,100)); - Add(new cMenuEditIntItem(tr("Alternative Pip Y (%)"), &PipAltY, 0,100)); - Add(new cMenuEditIntItem(tr("Alternative Pip Width (%)"), &PipAltWidth, 0, 100)); - Add(new cMenuEditIntItem(tr("Alternative Pip Height (%)"),&PipAltHeight, 0, 100)); - Add(new cMenuEditIntItem(tr("Alternative Video X (%)"), &PipAltVideoX,0, 100)); - Add(new cMenuEditIntItem(tr("Alternative Video Y (%)"), &PipAltVideoY,0, 100)); - Add(new cMenuEditIntItem(tr("Alternative Video Width (%)"), &PipAltVideoWidth, 0, 100)); - Add(new cMenuEditIntItem(tr("Alternative Video Height (%)"),&PipAltVideoHeight, 0, 100)); + // FIXME: predefined modes/custom mode + Add(new cMenuEditIntItem(tr("Pip X (%)"), &PipX, 0, 100)); + Add(new cMenuEditIntItem(tr("Pip Y (%)"), &PipY, 0, 100)); + Add(new cMenuEditIntItem(tr("Pip Width (%)"), &PipWidth, 0, 100)); + Add(new cMenuEditIntItem(tr("Pip Height (%)"), &PipHeight, 0, 100)); + Add(new cMenuEditIntItem(tr("Video X (%)"), &PipVideoX, 0, 100)); + Add(new cMenuEditIntItem(tr("Video Y (%)"), &PipVideoY, 0, 100)); + Add(new cMenuEditIntItem(tr("Video Width (%)"), &PipVideoWidth, 0, 100)); + Add(new cMenuEditIntItem(tr("Video Height (%)"), &PipVideoHeight, 0, 100)); + Add(new cMenuEditIntItem(tr("Alternative Pip X (%)"), &PipAltX, 0, 100)); + Add(new cMenuEditIntItem(tr("Alternative Pip Y (%)"), &PipAltY, 0, 100)); + Add(new cMenuEditIntItem(tr("Alternative Pip Width (%)"), &PipAltWidth, 0, 100)); + Add(new cMenuEditIntItem(tr("Alternative Pip Height (%)"), &PipAltHeight, 0, 100)); + Add(new cMenuEditIntItem(tr("Alternative Video X (%)"), &PipAltVideoX, 0, 100)); + Add(new cMenuEditIntItem(tr("Alternative Video Y (%)"), &PipAltVideoY, 0, 100)); + Add(new cMenuEditIntItem(tr("Alternative Video Width (%)"), &PipAltVideoWidth, 0, 100)); + Add(new cMenuEditIntItem(tr("Alternative Video Height (%)"), &PipAltVideoHeight, 0, 100)); } #endif - SetCurrent(Get(current)); // restore selected menu entry - Display(); // display build menu + SetCurrent(Get(current)); // restore selected menu entry + Display(); // display build menu } /** @@ -1238,22 +1299,22 @@ eOSState cMenuSetupSoft::ProcessKey(eKeys key) state = cMenuSetupPage::ProcessKey(key); if (key != kNone) { - // update menu only, if something on the structure has changed - // this is needed because VDR menus are evil slow - if (old_general != General || old_video != Video || old_audio != Audio + // update menu only, if something on the structure has changed + // this is needed because VDR menus are evil slow + if (old_general != General || old_video != Video || old_audio != Audio #ifdef USE_PIP - || old_pip != Pip + || old_pip != Pip #endif - || old_osd_size != OsdSize) { - Create(); // update menu - } else { - for (i = 0; i < RESOLUTIONS; ++i) { - if (old_resolution_shown[i] != ResolutionShown[i]) { - Create(); // update menu - break; - } - } - } + || old_osd_size != OsdSize) { + Create(); // update menu + } else { + for (i = 0; i < RESOLUTIONS; ++i) { + if (old_resolution_shown[i] != ResolutionShown[i]) { + Create(); // update menu + break; + } + } + } } return state; @@ -1269,34 +1330,34 @@ cMenuSetupSoft::cMenuSetupSoft(void) int i; // - // general + // general // General = 0; MakePrimary = ConfigMakePrimary; HideMainMenuEntry = ConfigHideMainMenuEntry; DetachFromMainMenu = ConfigDetachFromMainMenu; // - // osd + // osd // OsdWidth = ConfigOsdWidth; OsdHeight = ConfigOsdHeight; if (!OsdWidth && !OsdHeight) { - OsdSize = 0; + OsdSize = 0; } else if (OsdWidth == 1920 && OsdHeight == 1080) { - OsdSize = 1; + OsdSize = 1; } else if (OsdWidth == 1280 && OsdHeight == 720) { - OsdSize = 2; + OsdSize = 2; } else { - OsdSize = 3; + OsdSize = 3; } // - // suspend + // suspend // SuspendClose = ConfigSuspendClose; SuspendX11 = ConfigSuspendX11; // - // video + // video // Video = 0; Video4to3DisplayFormat = Config4to3DisplayFormat; @@ -1314,33 +1375,33 @@ cMenuSetupSoft::cMenuSetupSoft(void) Contrast = ConfigVideoContrast; Saturation = ConfigVideoSaturation; Hue = ConfigVideoHue; - Gamma = ConfigGamma; - TargetColorSpace = ConfigTargetColorSpace; - ColorBlindness = ConfigColorBlindness; - ColorBlindnessFaktor = ConfigColorBlindnessFaktor; -// ScalerTest = ConfigScalerTest; + Gamma = ConfigGamma; + TargetColorSpace = ConfigTargetColorSpace; + ColorBlindness = ConfigColorBlindness; + ColorBlindnessFaktor = ConfigColorBlindnessFaktor; +// ScalerTest = ConfigScalerTest; for (i = 0; i < RESOLUTIONS; ++i) { - ResolutionShown[i] = 0; - Scaling[i] = ConfigVideoScaling[i]; - Deinterlace[i] = ConfigVideoDeinterlace[i]; - SkipChromaDeinterlace[i] = ConfigVideoSkipChromaDeinterlace[i]; - InverseTelecine[i] = ConfigVideoInverseTelecine[i]; - Denoise[i] = ConfigVideoDenoise[i]; - Sharpen[i] = ConfigVideoSharpen[i]; + ResolutionShown[i] = 0; + Scaling[i] = ConfigVideoScaling[i]; + Deinterlace[i] = ConfigVideoDeinterlace[i]; + SkipChromaDeinterlace[i] = ConfigVideoSkipChromaDeinterlace[i]; + InverseTelecine[i] = ConfigVideoInverseTelecine[i]; + Denoise[i] = ConfigVideoDenoise[i]; + Sharpen[i] = ConfigVideoSharpen[i]; - CutTopBottom[i] = ConfigVideoCutTopBottom[i]; - CutLeftRight[i] = ConfigVideoCutLeftRight[i]; + CutTopBottom[i] = ConfigVideoCutTopBottom[i]; + CutLeftRight[i] = ConfigVideoCutLeftRight[i]; } // - // auto-crop + // auto-crop // AutoCropInterval = ConfigAutoCropInterval; AutoCropDelay = ConfigAutoCropDelay; AutoCropTolerance = ConfigAutoCropTolerance; // - // audio + // audio // Audio = 0; AudioDelay = ConfigVideoAudioDelay; @@ -1361,7 +1422,7 @@ cMenuSetupSoft::cMenuSetupSoft(void) #ifdef USE_PIP // - // PIP + // PIP // Pip = 0; PipX = ConfigPipX; @@ -1401,29 +1462,26 @@ void cMenuSetupSoft::Store(void) int i; SetupStore("MakePrimary", ConfigMakePrimary = MakePrimary); - SetupStore("HideMainMenuEntry", ConfigHideMainMenuEntry = - HideMainMenuEntry); - SetupStore("DetachFromMainMenu", ConfigDetachFromMainMenu = - DetachFromMainMenu); + SetupStore("HideMainMenuEntry", ConfigHideMainMenuEntry = HideMainMenuEntry); + SetupStore("DetachFromMainMenu", ConfigDetachFromMainMenu = DetachFromMainMenu); switch (OsdSize) { - case 0: - OsdWidth = 0; - OsdHeight = 0; - break; - case 1: - OsdWidth = 1920; - OsdHeight = 1080; - break; - case 2: - OsdWidth = 1280; - OsdHeight = 720; - default: - break; + case 0: + OsdWidth = 0; + OsdHeight = 0; + break; + case 1: + OsdWidth = 1920; + OsdHeight = 1080; + break; + case 2: + OsdWidth = 1280; + OsdHeight = 720; + default: + break; } if (ConfigOsdWidth != OsdWidth || ConfigOsdHeight != OsdHeight) { - VideoSetOsdSize(ConfigOsdWidth = OsdWidth, ConfigOsdHeight = - OsdHeight); - // FIXME: shown osd size not updated + VideoSetOsdSize(ConfigOsdWidth = OsdWidth, ConfigOsdHeight = OsdHeight); + // FIXME: shown osd size not updated } SetupStore("Osd.Width", ConfigOsdWidth); SetupStore("Osd.Height", ConfigOsdHeight); @@ -1431,11 +1489,9 @@ void cMenuSetupSoft::Store(void) SetupStore("Suspend.Close", ConfigSuspendClose = SuspendClose); SetupStore("Suspend.X11", ConfigSuspendX11 = SuspendX11); - SetupStore("Video4to3DisplayFormat", Config4to3DisplayFormat = - Video4to3DisplayFormat); + SetupStore("Video4to3DisplayFormat", Config4to3DisplayFormat = Video4to3DisplayFormat); VideoSet4to3DisplayFormat(Config4to3DisplayFormat); - SetupStore("VideoOtherDisplayFormat", ConfigOtherDisplayFormat = - VideoOtherDisplayFormat); + SetupStore("VideoOtherDisplayFormat", ConfigOtherDisplayFormat = VideoOtherDisplayFormat); VideoSetOtherDisplayFormat(ConfigOtherDisplayFormat); ConfigVideoBackground = Background << 8 | (BackgroundAlpha & 0xFF); @@ -1457,41 +1513,39 @@ void cMenuSetupSoft::Store(void) VideoSetContrast(ConfigVideoContrast); SetupStore("Saturation", ConfigVideoSaturation = Saturation); VideoSetSaturation(ConfigVideoSaturation); - SetupStore("Gamma", ConfigGamma = Gamma); + SetupStore("Gamma", ConfigGamma = Gamma); VideoSetGamma(ConfigGamma); - SetupStore("TargetColorSpace", ConfigTargetColorSpace = TargetColorSpace); + SetupStore("TargetColorSpace", ConfigTargetColorSpace = TargetColorSpace); VideoSetTargetColor(ConfigTargetColorSpace); SetupStore("Hue", ConfigVideoHue = Hue); VideoSetHue(ConfigVideoHue); - SetupStore("CBlindness", ConfigColorBlindness = ColorBlindness); + SetupStore("CBlindness", ConfigColorBlindness = ColorBlindness); VideoSetColorBlindness(ConfigColorBlindness); - SetupStore("CBlindnessFaktor", ConfigColorBlindnessFaktor = ColorBlindnessFaktor); + SetupStore("CBlindnessFaktor", ConfigColorBlindnessFaktor = ColorBlindnessFaktor); VideoSetColorBlindnessFaktor(ConfigColorBlindnessFaktor); // SetupStore("ScalerTest", ConfigScalerTest = ScalerTest); VideoSetScalerTest(ConfigScalerTest); - + for (i = 0; i < RESOLUTIONS; ++i) { - char buf[128]; + char buf[128]; - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Scaling"); - SetupStore(buf, ConfigVideoScaling[i] = Scaling[i]); - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Deinterlace"); - SetupStore(buf, ConfigVideoDeinterlace[i] = Deinterlace[i]); - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], - "SkipChromaDeinterlace"); - SetupStore(buf, ConfigVideoSkipChromaDeinterlace[i] = - SkipChromaDeinterlace[i]); - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "InverseTelecine"); - SetupStore(buf, ConfigVideoInverseTelecine[i] = InverseTelecine[i]); - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Denoise"); - SetupStore(buf, ConfigVideoDenoise[i] = Denoise[i]); - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Sharpen"); - SetupStore(buf, ConfigVideoSharpen[i] = Sharpen[i]); + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Scaling"); + SetupStore(buf, ConfigVideoScaling[i] = Scaling[i]); + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Deinterlace"); + SetupStore(buf, ConfigVideoDeinterlace[i] = Deinterlace[i]); + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "SkipChromaDeinterlace"); + SetupStore(buf, ConfigVideoSkipChromaDeinterlace[i] = SkipChromaDeinterlace[i]); + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "InverseTelecine"); + SetupStore(buf, ConfigVideoInverseTelecine[i] = InverseTelecine[i]); + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Denoise"); + SetupStore(buf, ConfigVideoDenoise[i] = Denoise[i]); + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Sharpen"); + SetupStore(buf, ConfigVideoSharpen[i] = Sharpen[i]); - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "CutTopBottom"); - SetupStore(buf, ConfigVideoCutTopBottom[i] = CutTopBottom[i]); - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "CutLeftRight"); - SetupStore(buf, ConfigVideoCutLeftRight[i] = CutLeftRight[i]); + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "CutTopBottom"); + SetupStore(buf, ConfigVideoCutTopBottom[i] = CutTopBottom[i]); + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "CutLeftRight"); + SetupStore(buf, ConfigVideoCutLeftRight[i] = CutLeftRight[i]); } VideoSetScaling(ConfigVideoScaling); VideoSetDeinterlace(ConfigVideoDeinterlace); @@ -1504,10 +1558,8 @@ void cMenuSetupSoft::Store(void) SetupStore("AutoCrop.Interval", ConfigAutoCropInterval = AutoCropInterval); SetupStore("AutoCrop.Delay", ConfigAutoCropDelay = AutoCropDelay); - SetupStore("AutoCrop.Tolerance", ConfigAutoCropTolerance = - AutoCropTolerance); - VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, - ConfigAutoCropTolerance); + SetupStore("AutoCrop.Tolerance", ConfigAutoCropTolerance = AutoCropTolerance); + VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, ConfigAutoCropTolerance); ConfigAutoCropEnabled = ConfigAutoCropInterval != 0; SetupStore("AudioDelay", ConfigVideoAudioDelay = AudioDelay); @@ -1518,33 +1570,30 @@ void cMenuSetupSoft::Store(void) // FIXME: can handle more audio state changes here // downmix changed reset audio, to get change direct if (ConfigAudioDownmix != AudioDownmix) { - ResetChannelId(); + ResetChannelId(); } ConfigAudioPassthrough = (AudioPassthroughPCM ? CodecPCM : 0) - | (AudioPassthroughAC3 ? CodecAC3 : 0) - | (AudioPassthroughEAC3 ? CodecEAC3 : 0); + | (AudioPassthroughAC3 ? CodecAC3 : 0) + | (AudioPassthroughEAC3 ? CodecEAC3 : 0); AudioPassthroughState = AudioPassthroughDefault; if (AudioPassthroughState) { - SetupStore("AudioPassthrough", ConfigAudioPassthrough); - CodecSetAudioPassthrough(ConfigAudioPassthrough); + SetupStore("AudioPassthrough", ConfigAudioPassthrough); + CodecSetAudioPassthrough(ConfigAudioPassthrough); } else { - SetupStore("AudioPassthrough", -ConfigAudioPassthrough); - CodecSetAudioPassthrough(0); + SetupStore("AudioPassthrough", -ConfigAudioPassthrough); + CodecSetAudioPassthrough(0); } SetupStore("AudioDownmix", ConfigAudioDownmix = AudioDownmix); CodecSetAudioDownmix(ConfigAudioDownmix); SetupStore("AudioSoftvol", ConfigAudioSoftvol = AudioSoftvol); AudioSetSoftvol(ConfigAudioSoftvol); SetupStore("AudioNormalize", ConfigAudioNormalize = AudioNormalize); - SetupStore("AudioMaxNormalize", ConfigAudioMaxNormalize = - AudioMaxNormalize); + SetupStore("AudioMaxNormalize", ConfigAudioMaxNormalize = AudioMaxNormalize); AudioSetNormalize(ConfigAudioNormalize, ConfigAudioMaxNormalize); SetupStore("AudioCompression", ConfigAudioCompression = AudioCompression); - SetupStore("AudioMaxCompression", ConfigAudioMaxCompression = - AudioMaxCompression); + SetupStore("AudioMaxCompression", ConfigAudioMaxCompression = AudioMaxCompression); AudioSetCompression(ConfigAudioCompression, ConfigAudioMaxCompression); - SetupStore("AudioStereoDescent", ConfigAudioStereoDescent = - AudioStereoDescent); + SetupStore("AudioStereoDescent", ConfigAudioStereoDescent = AudioStereoDescent); AudioSetStereoDescent(ConfigAudioStereoDescent); SetupStore("AudioBufferTime", ConfigAudioBufferTime = AudioBufferTime); SetupStore("AudioAutoAES", ConfigAudioAutoAES = AudioAutoAES); @@ -1565,26 +1614,22 @@ void cMenuSetupSoft::Store(void) SetupStore("pip.Alt.Height", ConfigPipAltHeight = PipAltHeight); SetupStore("pip.Alt.VideoX", ConfigPipAltVideoX = PipAltVideoX); SetupStore("pip.Alt.VideoY", ConfigPipAltVideoY = PipAltVideoY); - SetupStore("pip.Alt.VideoWidth", ConfigPipAltVideoWidth = - PipAltVideoWidth); - SetupStore("pip.Alt.VideoHeight", ConfigPipAltVideoHeight = - PipAltVideoHeight); + SetupStore("pip.Alt.VideoWidth", ConfigPipAltVideoWidth = PipAltVideoWidth); + SetupStore("pip.Alt.VideoHeight", ConfigPipAltVideoHeight = PipAltVideoHeight); #endif #ifdef USE_SCREENSAVER - SetupStore("EnableDPMSatBlackScreen", ConfigEnableDPMSatBlackScreen = - EnableDPMSatBlackScreen); + SetupStore("EnableDPMSatBlackScreen", ConfigEnableDPMSatBlackScreen = EnableDPMSatBlackScreen); SetDPMSatBlackScreen(ConfigEnableDPMSatBlackScreen); #endif #ifdef USE_OPENGLOSD - SetupStore("MaxSizeGPUImageCache", ConfigMaxSizeGPUImageCache = - MaxSizeGPUImageCache); + SetupStore("MaxSizeGPUImageCache", ConfigMaxSizeGPUImageCache = MaxSizeGPUImageCache); #endif } ////////////////////////////////////////////////////////////////////////////// -// cPlayer +// cPlayer ////////////////////////////////////////////////////////////////////////////// /** @@ -1608,7 +1653,7 @@ cSoftHdPlayer::~cSoftHdPlayer() } ////////////////////////////////////////////////////////////////////////////// -// cControl +// cControl ////////////////////////////////////////////////////////////////////////////// /** @@ -1617,18 +1662,18 @@ cSoftHdPlayer::~cSoftHdPlayer() class cSoftHdControl:public cControl { public: - static cSoftHdPlayer *Player; ///< dummy player - virtual void Hide(void) ///< hide control + static cSoftHdPlayer *Player; ///< dummy player + virtual void Hide(void) ///< hide control { } - virtual eOSState ProcessKey(eKeys); ///< process input events + virtual eOSState ProcessKey(eKeys); ///< process input events - cSoftHdControl(void); ///< control constructor + cSoftHdControl(void); ///< control constructor - virtual ~ cSoftHdControl(); ///< control destructor + virtual ~ cSoftHdControl(); ///< control destructor }; -cSoftHdPlayer *cSoftHdControl::Player; ///< dummy player instance +cSoftHdPlayer *cSoftHdControl::Player; ///< dummy player instance /** ** Handle a key event. @@ -1638,13 +1683,13 @@ cSoftHdPlayer *cSoftHdControl::Player; ///< dummy player instance eOSState cSoftHdControl::ProcessKey(eKeys key) { if (SuspendMode == SUSPEND_NORMAL && (!ISMODELESSKEY(key) - || key == kMenu || key == kBack || key == kStop)) { - delete Player; + || key == kMenu || key == kBack || key == kStop)) { + delete Player; - Player = NULL; - Resume(); - SuspendMode = NOT_SUSPENDED; - return osEnd; + Player = NULL; + Resume(); + SuspendMode = NOT_SUSPENDED; + return osEnd; } return osContinue; } @@ -1653,7 +1698,7 @@ eOSState cSoftHdControl::ProcessKey(eKeys key) ** Player control constructor. */ cSoftHdControl::cSoftHdControl(void) -: cControl(Player = new cSoftHdPlayer) +:cControl(Player = new cSoftHdPlayer) { } @@ -1667,24 +1712,24 @@ cSoftHdControl::~cSoftHdControl() Player = NULL; // loose control resume if (SuspendMode == SUSPEND_NORMAL) { - Resume(); - SuspendMode = NOT_SUSPENDED; + Resume(); + SuspendMode = NOT_SUSPENDED; } dsyslog("[softhddev]%s: dummy player stopped\n", __FUNCTION__); } ////////////////////////////////////////////////////////////////////////////// -// PIP +// PIP ////////////////////////////////////////////////////////////////////////////// #ifdef USE_PIP -extern "C" void DelPip(void); ///< remove PIP -static int PipAltPosition; ///< flag alternative position +extern "C" void DelPip(void); ///< remove PIP +static int PipAltPosition; ///< flag alternative position ////////////////////////////////////////////////////////////////////////////// -// cReceiver +// cReceiver ////////////////////////////////////////////////////////////////////////////// #include @@ -1702,8 +1747,8 @@ class cSoftReceiver:public cReceiver virtual void Receive(uchar *, int); #endif public: - cSoftReceiver(const cChannel *); ///< receiver constructor - virtual ~ cSoftReceiver(); ///< receiver destructor + cSoftReceiver(const cChannel *); ///< receiver constructor + virtual ~ cSoftReceiver(); ///< receiver destructor }; /** @@ -1711,8 +1756,7 @@ class cSoftReceiver:public cReceiver ** ** @param channel channel to receive */ -cSoftReceiver::cSoftReceiver(const cChannel * channel):cReceiver(NULL, - MINPRIORITY) +cSoftReceiver::cSoftReceiver(const cChannel * channel):cReceiver(NULL, MINPRIORITY) { // cReceiver::channelID not setup, this can cause trouble // we want video only @@ -1735,44 +1779,36 @@ cSoftReceiver::~cSoftReceiver() void cSoftReceiver::Activate(bool on) { if (on) { - int width; - int height; - double video_aspect; + int width; + int height; + double video_aspect; - GetOsdSize(&width, &height, &video_aspect); - if (PipAltPosition) { - PipStart((ConfigPipAltVideoX * width) / 100, - (ConfigPipAltVideoY * height) / 100, - ConfigPipAltVideoWidth ? (ConfigPipAltVideoWidth * width) / - 100 : width, - ConfigPipAltVideoHeight ? (ConfigPipAltVideoHeight * height) / - 100 : height, (ConfigPipAltX * width) / 100, - (ConfigPipAltY * height) / 100, - ConfigPipAltWidth ? (ConfigPipAltWidth * width) / 100 : width, - ConfigPipAltHeight ? (ConfigPipAltHeight * height) / - 100 : height); - } else { - PipStart((ConfigPipVideoX * width) / 100, - (ConfigPipVideoY * height) / 100, - ConfigPipVideoWidth ? (ConfigPipVideoWidth * width) / - 100 : width, - ConfigPipVideoHeight ? (ConfigPipVideoHeight * height) / - 100 : height, (ConfigPipX * width) / 100, - (ConfigPipY * height) / 100, - ConfigPipWidth ? (ConfigPipWidth * width) / 100 : width, - ConfigPipHeight ? (ConfigPipHeight * height) / 100 : height); - } + GetOsdSize(&width, &height, &video_aspect); + if (PipAltPosition) { + PipStart((ConfigPipAltVideoX * width) / 100, (ConfigPipAltVideoY * height) / 100, + ConfigPipAltVideoWidth ? (ConfigPipAltVideoWidth * width) / 100 : width, + ConfigPipAltVideoHeight ? (ConfigPipAltVideoHeight * height) / 100 : height, + (ConfigPipAltX * width) / 100, (ConfigPipAltY * height) / 100, + ConfigPipAltWidth ? (ConfigPipAltWidth * width) / 100 : width, + ConfigPipAltHeight ? (ConfigPipAltHeight * height) / 100 : height); + } else { + PipStart((ConfigPipVideoX * width) / 100, (ConfigPipVideoY * height) / 100, + ConfigPipVideoWidth ? (ConfigPipVideoWidth * width) / 100 : width, + ConfigPipVideoHeight ? (ConfigPipVideoHeight * height) / 100 : height, (ConfigPipX * width) / 100, + (ConfigPipY * height) / 100, ConfigPipWidth ? (ConfigPipWidth * width) / 100 : width, + ConfigPipHeight ? (ConfigPipHeight * height) / 100 : height); + } } else { - PipStop(); + PipStop(); } } /// -/// Parse packetized elementary stream. +/// Parse packetized elementary stream. /// -/// @param data payload data of transport stream -/// @param size number of payload data bytes -/// @param is_start flag, start of pes packet +/// @param data payload data of transport stream +/// @param size number of payload data bytes +/// @param is_start flag, start of pes packet /// static void PipPesParse(const uint8_t * data, int size, int is_start) { @@ -1783,41 +1819,39 @@ static void PipPesParse(const uint8_t * data, int size, int is_start) // FIXME: quick&dirty if (!pes_buf) { - pes_size = 500 * 1024 * 1024; - pes_buf = (uint8_t *) malloc(pes_size); - if (!pes_buf) { // out of memory, should never happen - return; - } - pes_index = 0; + pes_size = 500 * 1024 * 1024; + pes_buf = (uint8_t *) malloc(pes_size); + if (!pes_buf) { // out of memory, should never happen + return; + } + pes_index = 0; } - if (is_start) { // start of pes packet - if (pes_index) { - if (0) { - fprintf(stderr, "pip: PES packet %8d %02x%02x\n", pes_index, - pes_buf[2], pes_buf[3]); - } - if (pes_buf[0] || pes_buf[1] || pes_buf[2] != 0x01) { - // FIXME: first should always fail - esyslog(tr("[softhddev]pip: invalid PES packet %d\n"), - pes_index); - } else { - PipPlayVideo(pes_buf, pes_index); - // FIXME: buffer full: pes packet is dropped - } - pes_index = 0; - } + if (is_start) { // start of pes packet + if (pes_index) { + if (0) { + fprintf(stderr, "pip: PES packet %8d %02x%02x\n", pes_index, pes_buf[2], pes_buf[3]); + } + if (pes_buf[0] || pes_buf[1] || pes_buf[2] != 0x01) { + // FIXME: first should always fail + esyslog(tr("[softhddev]pip: invalid PES packet %d\n"), pes_index); + } else { + PipPlayVideo(pes_buf, pes_index); + // FIXME: buffer full: pes packet is dropped + } + pes_index = 0; + } } if (pes_index + size > pes_size) { - esyslog(tr("[softhddev]pip: pes buffer too small\n")); - pes_size *= 2; - if (pes_index + size > pes_size) { - pes_size = (pes_index + size) * 2; - } - pes_buf = (uint8_t *) realloc(pes_buf, pes_size); - if (!pes_buf) { // out of memory, should never happen - return; - } + esyslog(tr("[softhddev]pip: pes buffer too small\n")); + pes_size *= 2; + if (pes_index + size > pes_size) { + pes_size = (pes_index + size) * 2; + } + pes_buf = (uint8_t *) realloc(pes_buf, pes_size); + if (!pes_buf) { // out of memory, should never happen + return; + } } memcpy(pes_buf + pes_index, data, size); pes_index += size; @@ -1844,58 +1878,57 @@ void cSoftReceiver::Receive(uchar * data, int size) p = data; while (size >= TS_PACKET_SIZE) { - int payload; + int payload; - if (p[0] != TS_PACKET_SYNC) { - esyslog(tr("[softhddev]tsdemux: transport stream out of sync\n")); - // FIXME: kill all buffers - return; - } - if (p[1] & 0x80) { // error indicatord - dsyslog("[softhddev]tsdemux: transport error\n"); - // FIXME: kill all buffers - goto next_packet; - } - if (0) { - int pid; + if (p[0] != TS_PACKET_SYNC) { + esyslog(tr("[softhddev]tsdemux: transport stream out of sync\n")); + // FIXME: kill all buffers + return; + } + if (p[1] & 0x80) { // error indicatord + dsyslog("[softhddev]tsdemux: transport error\n"); + // FIXME: kill all buffers + goto next_packet; + } + if (0) { + int pid; - pid = (p[1] & 0x1F) << 8 | p[2]; - fprintf(stderr, "tsdemux: PID: %#04x%s%s\n", pid, - p[1] & 0x40 ? " start" : "", p[3] & 0x10 ? " payload" : ""); - } - // skip adaptation field - switch (p[3] & 0x30) { // adaption field - case 0x00: // reserved - case 0x20: // adaptation field only - default: - goto next_packet; - case 0x10: // only payload - payload = 4; - break; - case 0x30: // skip adapation field - payload = 5 + p[4]; - // illegal length, ignore packet - if (payload >= TS_PACKET_SIZE) { - dsyslog - ("[softhddev]tsdemux: illegal adaption field length\n"); - goto next_packet; - } - break; - } + pid = (p[1] & 0x1F) << 8 | p[2]; + fprintf(stderr, "tsdemux: PID: %#04x%s%s\n", pid, p[1] & 0x40 ? " start" : "", + p[3] & 0x10 ? " payload" : ""); + } + // skip adaptation field + switch (p[3] & 0x30) { // adaption field + case 0x00: // reserved + case 0x20: // adaptation field only + default: + goto next_packet; + case 0x10: // only payload + payload = 4; + break; + case 0x30: // skip adapation field + payload = 5 + p[4]; + // illegal length, ignore packet + if (payload >= TS_PACKET_SIZE) { + dsyslog("[softhddev]tsdemux: illegal adaption field length\n"); + goto next_packet; + } + break; + } - PipPesParse(p + payload, TS_PACKET_SIZE - payload, p[1] & 0x40); + PipPesParse(p + payload, TS_PACKET_SIZE - payload, p[1] & 0x40); next_packet: - p += TS_PACKET_SIZE; - size -= TS_PACKET_SIZE; + p += TS_PACKET_SIZE; + size -= TS_PACKET_SIZE; } } ////////////////////////////////////////////////////////////////////////////// -static cSoftReceiver *PipReceiver; ///< PIP receiver -static int PipChannelNr; ///< last PIP channel number -static const cChannel *PipChannel; ///< current PIP channel +static cSoftReceiver *PipReceiver; ///< PIP receiver +static int PipChannelNr; ///< last PIP channel number +static const cChannel *PipChannel; ///< current PIP channel /** ** Stop PIP. @@ -1922,26 +1955,26 @@ static void NewPip(int channel_nr) #ifdef DEBUG // is device replaying? if (cDevice::PrimaryDevice()->Replaying() && cControl::Control()) { - dsyslog("[softhddev]%s: replay active\n", __FUNCTION__); - // FIXME: need to find PID + dsyslog("[softhddev]%s: replay active\n", __FUNCTION__); + // FIXME: need to find PID } #endif if (!channel_nr) { - channel_nr = cDevice::CurrentChannel(); + channel_nr = cDevice::CurrentChannel(); } LOCK_CHANNELS_READ; if (channel_nr && (channel = Channels MURKS GetByNumber(channel_nr)) - && (device = cDevice::GetDevice(channel, 0, false, false))) { + && (device = cDevice::GetDevice(channel, 0, false, false))) { - DelPip(); + DelPip(); - device->SwitchChannel(channel, false); - receiver = new cSoftReceiver(channel); - device->AttachReceiver(receiver); - PipReceiver = receiver; - PipChannel = channel; - PipChannelNr = channel_nr; + device->SwitchChannel(channel, false); + receiver = new cSoftReceiver(channel); + device->AttachReceiver(receiver); + PipReceiver = receiver; + PipChannel = channel; + PipChannelNr = channel_nr; } } @@ -1951,13 +1984,13 @@ static void NewPip(int channel_nr) static void TogglePip(void) { if (PipReceiver) { - int attached; + int attached; - attached = PipReceiver->IsAttached(); - DelPip(); - if (attached) { // turn off only if last PIP was on - return; - } + attached = PipReceiver->IsAttached(); + DelPip(); + if (attached) { // turn off only if last PIP was on + return; + } } NewPip(PipChannelNr); } @@ -1975,30 +2008,29 @@ static void PipNextAvailableChannel(int direction) channel = PipChannel; first = channel; - DelPip(); // disable PIP to free the device + DelPip(); // disable PIP to free the device LOCK_CHANNELS_READ; while (channel) { - bool ndr; - cDevice *device; + bool ndr; + cDevice *device; - channel = direction > 0 ? Channels MURKS Next(channel) - : Channels MURKS Prev(channel); - if (!channel && Setup.ChannelsWrap) { - channel = - direction > 0 ? Channels MURKS First() : Channels MURKS Last(); - } - if (channel && !channel->GroupSep() - && (device = cDevice::GetDevice(channel, 0, false, true)) - && device->ProvidesChannel(channel, 0, &ndr) && !ndr) { + channel = direction > 0 ? Channels MURKS Next(channel) + : Channels MURKS Prev(channel); + if (!channel && Setup.ChannelsWrap) { + channel = direction > 0 ? Channels MURKS First() : Channels MURKS Last(); + } + if (channel && !channel->GroupSep() + && (device = cDevice::GetDevice(channel, 0, false, true)) + && device->ProvidesChannel(channel, 0, &ndr) && !ndr) { - NewPip(channel->Number()); - return; - } - if (channel == first) { - Skins.Message(mtError, tr("Channel not available!")); - break; - } + NewPip(channel->Number()); + return; + } + if (channel == first) { + Skins.Message(mtError, tr("Channel not available!")); + break; + } } } @@ -2015,9 +2047,9 @@ static void SwapPipChannels(void) NewPip(0); if (channel) { - LOCK_CHANNELS_READ; + LOCK_CHANNELS_READ; - Channels MURKS SwitchTo(channel->Number()); + Channels MURKS SwitchTo(channel->Number()); } } @@ -2031,37 +2063,30 @@ static void SwapPipPosition(void) double video_aspect; PipAltPosition ^= 1; - if (!PipReceiver) { // no PIP visible, no update needed - return; + if (!PipReceiver) { // no PIP visible, no update needed + return; } GetOsdSize(&width, &height, &video_aspect); if (PipAltPosition) { - PipSetPosition((ConfigPipAltVideoX * width) / 100, - (ConfigPipAltVideoY * height) / 100, - ConfigPipAltVideoWidth ? (ConfigPipAltVideoWidth * width) / - 100 : width, - ConfigPipAltVideoHeight ? (ConfigPipAltVideoHeight * height) / - 100 : height, (ConfigPipAltX * width) / 100, - (ConfigPipAltY * height) / 100, - ConfigPipAltWidth ? (ConfigPipAltWidth * width) / 100 : width, - ConfigPipAltHeight ? (ConfigPipAltHeight * height) / 100 : height); + PipSetPosition((ConfigPipAltVideoX * width) / 100, (ConfigPipAltVideoY * height) / 100, + ConfigPipAltVideoWidth ? (ConfigPipAltVideoWidth * width) / 100 : width, + ConfigPipAltVideoHeight ? (ConfigPipAltVideoHeight * height) / 100 : height, (ConfigPipAltX * width) / 100, + (ConfigPipAltY * height) / 100, ConfigPipAltWidth ? (ConfigPipAltWidth * width) / 100 : width, + ConfigPipAltHeight ? (ConfigPipAltHeight * height) / 100 : height); } else { - PipSetPosition((ConfigPipVideoX * width) / 100, - (ConfigPipVideoY * height) / 100, - ConfigPipVideoWidth ? (ConfigPipVideoWidth * width) / 100 : width, - ConfigPipVideoHeight ? (ConfigPipVideoHeight * height) / - 100 : height, (ConfigPipX * width) / 100, - (ConfigPipY * height) / 100, - ConfigPipWidth ? (ConfigPipWidth * width) / 100 : width, - ConfigPipHeight ? (ConfigPipHeight * height) / 100 : height); + PipSetPosition((ConfigPipVideoX * width) / 100, (ConfigPipVideoY * height) / 100, + ConfigPipVideoWidth ? (ConfigPipVideoWidth * width) / 100 : width, + ConfigPipVideoHeight ? (ConfigPipVideoHeight * height) / 100 : height, (ConfigPipX * width) / 100, + (ConfigPipY * height) / 100, ConfigPipWidth ? (ConfigPipWidth * width) / 100 : width, + ConfigPipHeight ? (ConfigPipHeight * height) / 100 : height); } } #endif ////////////////////////////////////////////////////////////////////////////// -// cOsdMenu +// cOsdMenu ////////////////////////////////////////////////////////////////////////////// /** @@ -2069,10 +2094,10 @@ static void SwapPipPosition(void) */ typedef enum { - HksInitial, ///< initial state - HksBlue, ///< blue button pressed - HksBlue1, ///< blue and 1 number pressed - HksRed, ///< red button pressed + HksInitial, ///< initial state + HksBlue, ///< blue button pressed + HksBlue1, ///< blue and 1 number pressed + HksRed, ///< red button pressed } HkState; /** @@ -2081,12 +2106,12 @@ typedef enum class cSoftHdMenu:public cOsdMenu { private: - HkState HotkeyState; ///< current hot-key state - int HotkeyCode; ///< current hot-key code - void Create(void); ///< create plugin main menu + HkState HotkeyState; ///< current hot-key state + int HotkeyCode; ///< current hot-key code + void Create(void); ///< create plugin main menu public: - cSoftHdMenu(const char *, int = 0, int = 0, int = 0, int = 0, int = 0); - virtual ~ cSoftHdMenu(); + cSoftHdMenu(const char *, int = 0, int = 0, int = 0, int = 0, int = 0); + virtual ~ cSoftHdMenu(); virtual eOSState ProcessKey(eKeys); }; @@ -2100,53 +2125,53 @@ void cSoftHdMenu::Create(void) int duped; int dropped; int counter; - float frametime; + float frametime; - current = Current(); // get current menu item index - Clear(); // clear the menu + current = Current(); // get current menu item index + Clear(); // clear the menu SetHasHotkeys(); if (ConfigDetachFromMainMenu) { - Add(new cOsdItem(hk(tr("Detach SoftHdDevice")), osUser1)); + Add(new cOsdItem(hk(tr("Detach SoftHdDevice")), osUser1)); } else { - Add(new cOsdItem(hk(tr("Suspend SoftHdDevice")), osUser1)); + Add(new cOsdItem(hk(tr("Suspend SoftHdDevice")), osUser1)); } #ifdef USE_PIP if (PipReceiver) { - Add(new cOsdItem(hk(tr("PIP toggle on/off: off")), osUser2)); + Add(new cOsdItem(hk(tr("PIP toggle on/off: off")), osUser2)); } else { - Add(new cOsdItem(hk(tr("PIP toggle on/off: on")), osUser2)); + Add(new cOsdItem(hk(tr("PIP toggle on/off: on")), osUser2)); } Add(new cOsdItem(hk(tr("PIP zapmode (not working)")), osUser3)); Add(new cOsdItem(hk(tr("PIP channel +")), osUser4)); Add(new cOsdItem(hk(tr("PIP channel -")), osUser5)); if (PipReceiver) { - Add(new cOsdItem(hk(tr("PIP on/swap channels: swap")), osUser6)); + Add(new cOsdItem(hk(tr("PIP on/swap channels: swap")), osUser6)); } else { - Add(new cOsdItem(hk(tr("PIP on/swap channels: on")), osUser6)); + Add(new cOsdItem(hk(tr("PIP on/swap channels: on")), osUser6)); } if (PipAltPosition) { - Add(new cOsdItem(hk(tr("PIP swap position: normal")), osUser7)); + Add(new cOsdItem(hk(tr("PIP swap position: normal")), osUser7)); } else { - Add(new cOsdItem(hk(tr("PIP swap position: alternative")), osUser7)); + Add(new cOsdItem(hk(tr("PIP swap position: alternative")), osUser7)); } Add(new cOsdItem(hk(tr("PIP close")), osUser8)); #endif Add(new cOsdItem(NULL, osUnknown, false)); Add(new cOsdItem(NULL, osUnknown, false)); GetStats(&missed, &duped, &dropped, &counter, &frametime); - Add(new cOsdItem(cString::sprintf(tr(" Frames missed(%d) duped(%d) dropped(%d) total(%d)"), missed, duped, dropped, counter), osUnknown, false)); - Add(new cOsdItem(cString::sprintf(tr(" Frame Process time %2.2fms"), frametime), osUnknown, false)); - SetCurrent(Get(current)); // restore selected menu entry - Display(); // display build menu + Add(new cOsdItem(cString::sprintf(tr(" Frames missed(%d) duped(%d) dropped(%d) total(%d)"), missed, duped, dropped, + counter), osUnknown, false)); + Add(new cOsdItem(cString::sprintf(tr(" Frame Process time %2.2fms"), frametime), osUnknown, false)); + SetCurrent(Get(current)); // restore selected menu entry + Display(); // display build menu } /** ** Soft device menu constructor. */ -cSoftHdMenu::cSoftHdMenu(const char *title, int c0, int c1, int c2, int c3, - int c4) +cSoftHdMenu::cSoftHdMenu(const char *title, int c0, int c1, int c2, int c3, int c4) :cOsdMenu(title, c0, c1, c2, c3, c4) { HotkeyState = HksInitial; @@ -2169,133 +2194,127 @@ cSoftHdMenu::~cSoftHdMenu() static void HandleHotkey(int code) { switch (code) { - case 10: // disable pass-through - AudioPassthroughState = 0; - CodecSetAudioPassthrough(0); - Skins.QueueMessage(mtInfo, tr("pass-through disabled")); - break; - case 11: // enable pass-through - // note: you can't enable, without configured pass-through - AudioPassthroughState = 1; - CodecSetAudioPassthrough(ConfigAudioPassthrough); - Skins.QueueMessage(mtInfo, tr("pass-through enabled")); - break; - case 12: // toggle pass-through - AudioPassthroughState ^= 1; - if (AudioPassthroughState) { - CodecSetAudioPassthrough(ConfigAudioPassthrough); - Skins.QueueMessage(mtInfo, tr("pass-through enabled")); - } else { - CodecSetAudioPassthrough(0); - Skins.QueueMessage(mtInfo, tr("pass-through disabled")); - } - break; - case 13: // decrease audio delay - ConfigVideoAudioDelay -= 10; - VideoSetAudioDelay(ConfigVideoAudioDelay); - Skins.QueueMessage(mtInfo, - cString::sprintf(tr("audio delay changed to %d"), - ConfigVideoAudioDelay)); - break; - case 14: // increase audio delay - ConfigVideoAudioDelay += 10; - VideoSetAudioDelay(ConfigVideoAudioDelay); - Skins.QueueMessage(mtInfo, - cString::sprintf(tr("audio delay changed to %d"), - ConfigVideoAudioDelay)); - break; - case 15: - ConfigAudioDownmix ^= 1; - fprintf(stderr, "toggle downmix\n"); - CodecSetAudioDownmix(ConfigAudioDownmix); - if (ConfigAudioDownmix) { - Skins.QueueMessage(mtInfo, tr("surround downmix enabled")); - } else { - Skins.QueueMessage(mtInfo, tr("surround downmix disabled")); - } - ResetChannelId(); - break; + case 10: // disable pass-through + AudioPassthroughState = 0; + CodecSetAudioPassthrough(0); + Skins.QueueMessage(mtInfo, tr("pass-through disabled")); + break; + case 11: // enable pass-through + // note: you can't enable, without configured pass-through + AudioPassthroughState = 1; + CodecSetAudioPassthrough(ConfigAudioPassthrough); + Skins.QueueMessage(mtInfo, tr("pass-through enabled")); + break; + case 12: // toggle pass-through + AudioPassthroughState ^= 1; + if (AudioPassthroughState) { + CodecSetAudioPassthrough(ConfigAudioPassthrough); + Skins.QueueMessage(mtInfo, tr("pass-through enabled")); + } else { + CodecSetAudioPassthrough(0); + Skins.QueueMessage(mtInfo, tr("pass-through disabled")); + } + break; + case 13: // decrease audio delay + ConfigVideoAudioDelay -= 10; + VideoSetAudioDelay(ConfigVideoAudioDelay); + Skins.QueueMessage(mtInfo, cString::sprintf(tr("audio delay changed to %d"), ConfigVideoAudioDelay)); + break; + case 14: // increase audio delay + ConfigVideoAudioDelay += 10; + VideoSetAudioDelay(ConfigVideoAudioDelay); + Skins.QueueMessage(mtInfo, cString::sprintf(tr("audio delay changed to %d"), ConfigVideoAudioDelay)); + break; + case 15: + ConfigAudioDownmix ^= 1; + fprintf(stderr, "toggle downmix\n"); + CodecSetAudioDownmix(ConfigAudioDownmix); + if (ConfigAudioDownmix) { + Skins.QueueMessage(mtInfo, tr("surround downmix enabled")); + } else { + Skins.QueueMessage(mtInfo, tr("surround downmix disabled")); + } + ResetChannelId(); + break; - case 20: // disable full screen - VideoSetFullscreen(0); - break; - case 21: // enable full screen - VideoSetFullscreen(1); - break; - case 22: // toggle full screen - VideoSetFullscreen(-1); - break; - case 23: // disable auto-crop - ConfigAutoCropEnabled = 0; - VideoSetAutoCrop(0, ConfigAutoCropDelay, ConfigAutoCropTolerance); - Skins.QueueMessage(mtInfo, tr("auto-crop disabled and freezed")); - break; - case 24: // enable auto-crop - ConfigAutoCropEnabled = 1; - if (!ConfigAutoCropInterval) { - ConfigAutoCropInterval = 50; - } - VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, - ConfigAutoCropTolerance); - Skins.QueueMessage(mtInfo, tr("auto-crop enabled")); - break; - case 25: // toggle auto-crop - ConfigAutoCropEnabled ^= 1; - // no interval configured, use some default - if (!ConfigAutoCropInterval) { - ConfigAutoCropInterval = 50; - } - VideoSetAutoCrop(ConfigAutoCropEnabled * ConfigAutoCropInterval, - ConfigAutoCropDelay, ConfigAutoCropTolerance); - if (ConfigAutoCropEnabled) { - Skins.QueueMessage(mtInfo, tr("auto-crop enabled")); - } else { - Skins.QueueMessage(mtInfo, - tr("auto-crop disabled and freezed")); - } - break; - case 30: // change 4:3 -> window mode - case 31: - case 32: - VideoSet4to3DisplayFormat(code - 30); - break; - case 39: // rotate 4:3 -> window mode - VideoSet4to3DisplayFormat(-1); - break; - case 40: // change 16:9 -> window mode - case 41: - case 42: - VideoSetOtherDisplayFormat(code - 40); - break; - case 49: // rotate 16:9 -> window mode - VideoSetOtherDisplayFormat(-1); - break; + case 20: // disable full screen + VideoSetFullscreen(0); + break; + case 21: // enable full screen + VideoSetFullscreen(1); + break; + case 22: // toggle full screen + VideoSetFullscreen(-1); + break; + case 23: // disable auto-crop + ConfigAutoCropEnabled = 0; + VideoSetAutoCrop(0, ConfigAutoCropDelay, ConfigAutoCropTolerance); + Skins.QueueMessage(mtInfo, tr("auto-crop disabled and freezed")); + break; + case 24: // enable auto-crop + ConfigAutoCropEnabled = 1; + if (!ConfigAutoCropInterval) { + ConfigAutoCropInterval = 50; + } + VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, ConfigAutoCropTolerance); + Skins.QueueMessage(mtInfo, tr("auto-crop enabled")); + break; + case 25: // toggle auto-crop + ConfigAutoCropEnabled ^= 1; + // no interval configured, use some default + if (!ConfigAutoCropInterval) { + ConfigAutoCropInterval = 50; + } + VideoSetAutoCrop(ConfigAutoCropEnabled * ConfigAutoCropInterval, ConfigAutoCropDelay, + ConfigAutoCropTolerance); + if (ConfigAutoCropEnabled) { + Skins.QueueMessage(mtInfo, tr("auto-crop enabled")); + } else { + Skins.QueueMessage(mtInfo, tr("auto-crop disabled and freezed")); + } + break; + case 30: // change 4:3 -> window mode + case 31: + case 32: + VideoSet4to3DisplayFormat(code - 30); + break; + case 39: // rotate 4:3 -> window mode + VideoSet4to3DisplayFormat(-1); + break; + case 40: // change 16:9 -> window mode + case 41: + case 42: + VideoSetOtherDisplayFormat(code - 40); + break; + case 49: // rotate 16:9 -> window mode + VideoSetOtherDisplayFormat(-1); + break; #ifdef USE_PIP - case 102: // PIP toggle - TogglePip(); - break; - case 104: - PipNextAvailableChannel(1); - break; - case 105: - PipNextAvailableChannel(-1); - break; - case 106: - SwapPipChannels(); - break; - case 107: - SwapPipPosition(); - break; - case 108: - DelPip(); - PipChannelNr = 0; - break; + case 102: // PIP toggle + TogglePip(); + break; + case 104: + PipNextAvailableChannel(1); + break; + case 105: + PipNextAvailableChannel(-1); + break; + case 106: + SwapPipChannels(); + break; + case 107: + SwapPipPosition(); + break; + case 108: + DelPip(); + PipChannelNr = 0; + break; #endif - default: - esyslog(tr("[softhddev]: hot key %d is not supported\n"), code); - break; + default: + esyslog(tr("[softhddev]: hot key %d is not supported\n"), code); + break; } } @@ -2311,112 +2330,108 @@ eOSState cSoftHdMenu::ProcessKey(eKeys key) //dsyslog("[softhddev]%s: %x\n", __FUNCTION__, key); switch (HotkeyState) { - case HksInitial: // initial state, waiting for hot key - if (key == kBlue) { - HotkeyState = HksBlue; // blue button - return osContinue; - } - if (key == kRed) { - HotkeyState = HksRed; // red button - return osContinue; - } - break; - case HksBlue: // blue and first number - if (k0 <= key && key <= k9) { - HotkeyCode = key - k0; - HotkeyState = HksBlue1; - return osContinue; - } - HotkeyState = HksInitial; - break; - case HksBlue1: // blue and second number/enter - if (k0 <= key && key <= k9) { - HotkeyCode *= 10; - HotkeyCode += key - k0; - HotkeyState = HksInitial; - dsyslog("[softhddev]%s: hot-key %d\n", __FUNCTION__, - HotkeyCode); - HandleHotkey(HotkeyCode); - return osEnd; - } - if (key == kOk) { - HotkeyState = HksInitial; - dsyslog("[softhddev]%s: hot-key %d\n", __FUNCTION__, - HotkeyCode); - HandleHotkey(HotkeyCode); - return osEnd; - } - HotkeyState = HksInitial; - break; - case HksRed: // red and first number - if (k0 <= key && key <= k9) { - HotkeyCode = 100 + key - k0; - HotkeyState = HksInitial; - HandleHotkey(HotkeyCode); - return osEnd; - } - HotkeyState = HksInitial; - break; + + case HksInitial: // initial state, waiting for hot key + if (key == kBlue) { + HotkeyState = HksBlue; // blue button + return osContinue; + } + if (key == kRed) { + HotkeyState = HksRed; // red button + return osContinue; + } + break; + case HksBlue: // blue and first number + if (k0 <= key && key <= k9) { + HotkeyCode = key - k0; + HotkeyState = HksBlue1; + return osContinue; + } + HotkeyState = HksInitial; + break; + case HksBlue1: // blue and second number/enter + if (k0 <= key && key <= k9) { + HotkeyCode *= 10; + HotkeyCode += key - k0; + HotkeyState = HksInitial; + dsyslog("[softhddev]%s: hot-key %d\n", __FUNCTION__, HotkeyCode); + HandleHotkey(HotkeyCode); + return osEnd; + } + if (key == kOk) { + HotkeyState = HksInitial; + dsyslog("[softhddev]%s: hot-key %d\n", __FUNCTION__, HotkeyCode); + HandleHotkey(HotkeyCode); + return osEnd; + } + HotkeyState = HksInitial; + case HksRed: // red and first number + if (k0 <= key && key <= k9) { + HotkeyCode = 100 + key - k0; + HotkeyState = HksInitial; + HandleHotkey(HotkeyCode); + return osEnd; + } + HotkeyState = HksInitial; + break; } // call standard function state = cOsdMenu::ProcessKey(key); switch (state) { - case osUser1: - // not already suspended - if (SuspendMode == NOT_SUSPENDED && !cSoftHdControl::Player) { - cControl::Launch(new cSoftHdControl); - cControl::Attach(); - if (ConfigDetachFromMainMenu) { - Suspend(1, 1, 0); - SuspendMode = SUSPEND_DETACHED; - } else { - Suspend(ConfigSuspendClose, ConfigSuspendClose, - ConfigSuspendX11); - SuspendMode = SUSPEND_NORMAL; - } + case osUser1: + // not already suspended + if (SuspendMode == NOT_SUSPENDED && !cSoftHdControl::Player) { + cControl::Launch(new cSoftHdControl); + cControl::Attach(); + if (ConfigDetachFromMainMenu) { + Suspend(1, 1, 0); + SuspendMode = SUSPEND_DETACHED; + } else { + Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11); + SuspendMode = SUSPEND_NORMAL; + } #ifdef USE_OPENGLOSD - dsyslog("[softhddev]stopping Ogl Thread osUser1"); - cSoftOsdProvider::StopOpenGlThread(); + dsyslog("[softhddev]stopping Ogl Thread osUser1"); + cSoftOsdProvider::StopOpenGlThread(); #endif - if (ShutdownHandler.GetUserInactiveTime()) { - dsyslog("[softhddev]%s: set user inactive\n", - __FUNCTION__); - ShutdownHandler.SetUserInactive(); - } - } - return osEnd; + if (ShutdownHandler.GetUserInactiveTime()) { + dsyslog("[softhddev]%s: set user inactive\n", __FUNCTION__); + ShutdownHandler.SetUserInactive(); + } + } + return osEnd; #ifdef USE_PIP - case osUser2: - TogglePip(); - return osEnd; - case osUser4: - PipNextAvailableChannel(1); - return osEnd; - case osUser5: - PipNextAvailableChannel(-1); - return osEnd; - case osUser6: - SwapPipChannels(); - return osEnd; - case osUser7: - SwapPipPosition(); - return osEnd; - case osUser8: - DelPip(); - PipChannelNr = 0; - return osEnd; + case osUser2: + TogglePip(); + return osEnd; + case osUser4: + PipNextAvailableChannel(1); + return osEnd; + case osUser5: + PipNextAvailableChannel(-1); + return osEnd; + case osUser6: + SwapPipChannels(); + return osEnd; + case osUser7: + SwapPipPosition(); + return osEnd; + case osUser8: + DelPip(); + PipChannelNr = 0; + return osEnd; #endif - default: - Create(); - break; + default: + Create(); + break; } return state; } ////////////////////////////////////////////////////////////////////////////// -// cDevice +// cDevice ////////////////////////////////////////////////////////////////////////////// class cSoftHdDevice:public cDevice @@ -2425,10 +2440,16 @@ class cSoftHdDevice:public cDevice cSoftHdDevice(void); virtual ~ cSoftHdDevice(void); #ifdef CUVID - virtual cString DeviceName(void) const { return "softhdcuvid"; } + virtual cString DeviceName(void) const + { + return "softhdcuvid"; + } #endif #ifdef VAAPI - virtual cString DeviceName(void) const { return "softhdvaapi"; } + virtual cString DeviceName(void) const + { + return "softhdvaapi"; + } #endif virtual bool HasDecoder(void) const; virtual bool CanReplay(void) const; @@ -2518,18 +2539,18 @@ void cSoftHdDevice::MakePrimaryDevice(bool on) cDevice::MakePrimaryDevice(on); if (on) { - new cSoftOsdProvider(); + new cSoftOsdProvider(); - if (SuspendMode == SUSPEND_DETACHED) { - Resume(); - SuspendMode = NOT_SUSPENDED; - } + if (SuspendMode == SUSPEND_DETACHED) { + Resume(); + SuspendMode = NOT_SUSPENDED; + } } else if (SuspendMode == NOT_SUSPENDED) { - Suspend(1, 1, 0); - SuspendMode = SUSPEND_DETACHED; + Suspend(1, 1, 0); + SuspendMode = SUSPEND_DETACHED; #ifdef USE_OPENGLOSD - dsyslog("[softhddev]stopping Ogl Thread MakePrimaryDevice"); - cSoftOsdProvider::StopOpenGlThread(); + dsyslog("[softhddev]stopping Ogl Thread MakePrimaryDevice"); + cSoftOsdProvider::StopOpenGlThread(); #endif } } @@ -2547,7 +2568,7 @@ cSpuDecoder *cSoftHdDevice::GetSpuDecoder(void) dsyslog("[softhddev]%s:\n", __FUNCTION__); if (!spuDecoder && IsPrimaryDevice()) { - spuDecoder = new cDvbSpuDecoder(); + spuDecoder = new cDvbSpuDecoder(); } return spuDecoder; } @@ -2580,36 +2601,36 @@ bool cSoftHdDevice::SetPlayMode(ePlayMode play_mode) dsyslog("[softhddev]%s: %d\n", __FUNCTION__, play_mode); switch (play_mode) { - case pmAudioVideo: - break; - case pmAudioOnly: - case pmAudioOnlyBlack: - break; - case pmVideoOnly: - break; - case pmNone: - break; - case pmExtern_THIS_SHOULD_BE_AVOIDED: - dsyslog("[softhddev] play mode external\n"); - // FIXME: what if already suspended? - Suspend(1, 1, 0); - SuspendMode = SUSPEND_EXTERNAL; + case pmAudioVideo: + break; + case pmAudioOnly: + case pmAudioOnlyBlack: + break; + case pmVideoOnly: + break; + case pmNone: + break; + case pmExtern_THIS_SHOULD_BE_AVOIDED: + dsyslog("[softhddev] play mode external\n"); + // FIXME: what if already suspended? + Suspend(1, 1, 0); + SuspendMode = SUSPEND_EXTERNAL; #ifdef USE_OPENGLOSD - dsyslog("[softhddev]stopping Ogl Thread pmExtern_THIS_SHOULD_BE_AVOIDED"); - cSoftOsdProvider::StopOpenGlThread(); + dsyslog("[softhddev]stopping Ogl Thread pmExtern_THIS_SHOULD_BE_AVOIDED"); + cSoftOsdProvider::StopOpenGlThread(); #endif - return true; - default: - dsyslog("[softhddev] playmode not implemented... %d\n", play_mode); - break; + return true; + default: + dsyslog("[softhddev] playmode not implemented... %d\n", play_mode); + break; } if (SuspendMode != NOT_SUSPENDED) { - if (SuspendMode != SUSPEND_EXTERNAL) { - return false; - } - Resume(); - SuspendMode = NOT_SUSPENDED; + if (SuspendMode != SUSPEND_EXTERNAL) { + return false; + } + Resume(); + SuspendMode = NOT_SUSPENDED; } return::SetPlayMode(play_mode); @@ -2703,12 +2724,11 @@ void cSoftHdDevice::Mute(void) */ void cSoftHdDevice::StillPicture(const uchar * data, int length) { - dsyslog("[softhddev]%s: %s %p %d\n", __FUNCTION__, - data[0] == 0x47 ? "ts" : "pes", data, length); + dsyslog("[softhddev]%s: %s %p %d\n", __FUNCTION__, data[0] == 0x47 ? "ts" : "pes", data, length); - if (data[0] == 0x47) { // ts sync - cDevice::StillPicture(data, length); - return; + if (data[0] == 0x47) { // ts sync + cDevice::StillPicture(data, length); + return; } ::StillPicture(data, length); @@ -2723,8 +2743,7 @@ void cSoftHdDevice::StillPicture(const uchar * data, int length) ** @retval true if ready ** @retval false if busy */ -bool cSoftHdDevice::Poll( - __attribute__ ((unused)) cPoller & poller, int timeout_ms) +bool cSoftHdDevice::Poll( __attribute__((unused)) cPoller & poller, int timeout_ms) { //dsyslog("[softhddev]%s: %d\n", __FUNCTION__, timeout_ms); @@ -2749,8 +2768,7 @@ bool cSoftHdDevice::Flush(int timeout_ms) ** Sets the video display format to the given one (only useful if this ** device has an MPEG decoder). */ -void cSoftHdDevice:: SetVideoDisplayFormat(eVideoDisplayFormat - video_display_format) +void cSoftHdDevice::SetVideoDisplayFormat(eVideoDisplayFormat video_display_format) { dsyslog("[softhddev]%s: %d\n", __FUNCTION__, video_display_format); @@ -2760,10 +2778,10 @@ void cSoftHdDevice:: SetVideoDisplayFormat(eVideoDisplayFormat // called on every channel switch, no need to kill osd... if (last != video_display_format) { - last = video_display_format; + last = video_display_format; - ::VideoSetDisplayFormat(video_display_format); - cSoftOsd::Dirty = 1; + ::VideoSetDisplayFormat(video_display_format); + cSoftOsd::Dirty = 1; } #endif } @@ -2822,18 +2840,18 @@ int cSoftHdDevice::PlayAudio(const uchar * data, int length, uchar id) return::PlayAudio(data, length, id); } -void cSoftHdDevice::SetAudioTrackDevice( - __attribute__ ((unused)) eTrackType type) +void cSoftHdDevice::SetAudioTrackDevice( __attribute__((unused)) eTrackType type) { //dsyslog("[softhddev]%s:\n", __FUNCTION__); } -void cSoftHdDevice::SetDigitalAudioDevice( __attribute__ ((unused)) bool on) +void cSoftHdDevice::SetDigitalAudioDevice( __attribute__((unused)) + bool on) { //dsyslog("[softhddev]%s: %s\n", __FUNCTION__, on ? "true" : "false"); } -void cSoftHdDevice::SetAudioChannelDevice( __attribute__ ((unused)) +void cSoftHdDevice::SetAudioChannelDevice( __attribute__((unused)) int audio_channel) { //dsyslog("[softhddev]%s: %d\n", __FUNCTION__, audio_channel); @@ -2915,16 +2933,15 @@ int cSoftHdDevice::PlayTsAudio(const uchar * data, int length) ** @param width number of horizontal pixels in the frame ** @param height number of vertical pixels in the frame */ -uchar *cSoftHdDevice::GrabImage(int &size, bool jpeg, int quality, int width, - int height) +uchar *cSoftHdDevice::GrabImage(int &size, bool jpeg, int quality, int width, int height) { - dsyslog("[softhddev]%s: %d, %d, %d, %dx%d\n", __FUNCTION__, size, jpeg, quality, width, height); + dsyslog("[softhddev]%s: %d, %d, %d, %dx%d\n", __FUNCTION__, size, jpeg, quality, width, height); if (SuspendMode != NOT_SUSPENDED) { - return NULL; + return NULL; } - if (quality < 0) { // caller should care, but fix it - quality = 95; + if (quality < 0) { // caller should care, but fix it + quality = 95; } return::GrabImage(&size, jpeg, quality, width, height); @@ -2939,8 +2956,8 @@ uchar *cSoftHdDevice::GrabImage(int &size, bool jpeg, int quality, int width, ** ** @returns the real rectangle or cRect:Null if invalid. */ -cRect cSoftHdDevice::CanScaleVideo(const cRect & rect, - __attribute__ ((unused)) int alignment) +cRect cSoftHdDevice::CanScaleVideo(const cRect & rect, __attribute__((unused)) + int alignment) { return rect; } @@ -2953,8 +2970,7 @@ cRect cSoftHdDevice::CanScaleVideo(const cRect & rect, void cSoftHdDevice::ScaleVideo(const cRect & rect) { #ifdef OSD_DEBUG - dsyslog("[softhddev]%s: %dx%d%+d%+d\n", __FUNCTION__, rect.Width(), - rect.Height(), rect.X(), rect.Y()); + dsyslog("[softhddev]%s: %dx%d%+d%+d\n", __FUNCTION__, rect.Width(), rect.Height(), rect.X(), rect.Y()); #endif ::ScaleVideo(rect.X(), rect.Y(), rect.Width(), rect.Height()); } @@ -2964,14 +2980,13 @@ void cSoftHdDevice::ScaleVideo(const cRect & rect) /** ** Call rgb to jpeg for C Plugin. */ -extern "C" uint8_t * CreateJpeg(uint8_t * image, int *size, int quality, - int width, int height) +extern "C" uint8_t * CreateJpeg(uint8_t * image, int *size, int quality, int width, int height) { - return (uint8_t *) RgbToJpeg((uchar *) image, width, height, *size, quality); + return (uint8_t *) RgbToJpeg((uchar *) image, width, height, *size, quality); } ////////////////////////////////////////////////////////////////////////////// -// cPlugin +// cPlugin ////////////////////////////////////////////////////////////////////////////// class cPluginSoftHdDevice:public cPlugin @@ -3083,29 +3098,27 @@ bool cPluginSoftHdDevice::Start(void) //dsyslog("[softhddev]%s:\n", __FUNCTION__); if (!MyDevice->IsPrimaryDevice()) { - isyslog("[softhddev] softhddevice %d is not the primary device!", - MyDevice->DeviceNumber()); - if (ConfigMakePrimary) { - // Must be done in the main thread - dsyslog("[softhddev] makeing softhddevice %d the primary device!", - MyDevice->DeviceNumber()); - DoMakePrimary = MyDevice->DeviceNumber() + 1; - } + isyslog("[softhddev] softhddevice %d is not the primary device!", MyDevice->DeviceNumber()); + if (ConfigMakePrimary) { + // Must be done in the main thread + dsyslog("[softhddev] makeing softhddevice %d the primary device!", MyDevice->DeviceNumber()); + DoMakePrimary = MyDevice->DeviceNumber() + 1; + } } switch (::Start()) { - case 1: - //cControl::Launch(new cSoftHdControl); - //cControl::Attach(); - // FIXME: VDR overwrites the control - SuspendMode = SUSPEND_NORMAL; - break; - case -1: - SuspendMode = SUSPEND_DETACHED; - break; - case 0: - default: - break; + case 1: + //cControl::Launch(new cSoftHdControl); + //cControl::Attach(); + // FIXME: VDR overwrites the control + SuspendMode = SUSPEND_NORMAL; + break; + case -1: + SuspendMode = SUSPEND_DETACHED; + break; + case 0: + default: + break; } return true; @@ -3132,14 +3145,14 @@ void cPluginSoftHdDevice::Housekeeping(void) // check if user is inactive, automatic enter suspend mode // FIXME: cControl prevents shutdown, disable this until fixed if (0 && SuspendMode == NOT_SUSPENDED && ShutdownHandler.IsUserInactive()) { - // don't overwrite already suspended suspend mode - cControl::Launch(new cSoftHdControl); - cControl::Attach(); - Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11); - SuspendMode = SUSPEND_NORMAL; + // don't overwrite already suspended suspend mode + cControl::Launch(new cSoftHdControl); + cControl::Attach(); + Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11); + SuspendMode = SUSPEND_NORMAL; #ifdef USE_OPENGLOSD - dsyslog("[softhddev]stopping Ogl Thread Housekeeping"); - cSoftOsdProvider::StopOpenGlThread(); + dsyslog("[softhddev]stopping Ogl Thread Housekeeping"); + cSoftOsdProvider::StopOpenGlThread(); #endif } @@ -3175,10 +3188,9 @@ void cPluginSoftHdDevice::MainThreadHook(void) //dsyslog("[softhddev]%s:\n", __FUNCTION__); if (DoMakePrimary) { - dsyslog("[softhddev]%s: switching primary device to %d\n", - __FUNCTION__, DoMakePrimary); - cDevice::SetPrimaryDevice(DoMakePrimary); - DoMakePrimary = 0; + dsyslog("[softhddev]%s: switching primary device to %d\n", __FUNCTION__, DoMakePrimary); + cDevice::SetPrimaryDevice(DoMakePrimary); + DoMakePrimary = 0; } ::MainThreadHook(); @@ -3209,335 +3221,335 @@ bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value) //dsyslog("[softhddev]%s: '%s' = '%s'\n", __FUNCTION__, name, value); if (!strcasecmp(name, "MakePrimary")) { - ConfigMakePrimary = atoi(value); - return true; + ConfigMakePrimary = atoi(value); + return true; } if (!strcasecmp(name, "HideMainMenuEntry")) { - ConfigHideMainMenuEntry = atoi(value); - return true; + ConfigHideMainMenuEntry = atoi(value); + return true; } if (!strcasecmp(name, "DetachFromMainMenu")) { - ConfigDetachFromMainMenu = atoi(value); - return true; + ConfigDetachFromMainMenu = atoi(value); + return true; } if (!strcasecmp(name, "Osd.Width")) { - ConfigOsdWidth = atoi(value); - VideoSetOsdSize(ConfigOsdWidth, ConfigOsdHeight); - return true; + ConfigOsdWidth = atoi(value); + VideoSetOsdSize(ConfigOsdWidth, ConfigOsdHeight); + return true; } if (!strcasecmp(name, "Osd.Height")) { - ConfigOsdHeight = atoi(value); - VideoSetOsdSize(ConfigOsdWidth, ConfigOsdHeight); - return true; + ConfigOsdHeight = atoi(value); + VideoSetOsdSize(ConfigOsdWidth, ConfigOsdHeight); + return true; } if (!strcasecmp(name, "Suspend.Close")) { - ConfigSuspendClose = atoi(value); - return true; + ConfigSuspendClose = atoi(value); + return true; } if (!strcasecmp(name, "Suspend.X11")) { - ConfigSuspendX11 = atoi(value); - return true; + ConfigSuspendX11 = atoi(value); + return true; } if (!strcasecmp(name, "Video4to3DisplayFormat")) { - Config4to3DisplayFormat = atoi(value); - VideoSet4to3DisplayFormat(Config4to3DisplayFormat); - return true; + Config4to3DisplayFormat = atoi(value); + VideoSet4to3DisplayFormat(Config4to3DisplayFormat); + return true; } if (!strcasecmp(name, "VideoOtherDisplayFormat")) { - ConfigOtherDisplayFormat = atoi(value); - VideoSetOtherDisplayFormat(ConfigOtherDisplayFormat); - return true; + ConfigOtherDisplayFormat = atoi(value); + VideoSetOtherDisplayFormat(ConfigOtherDisplayFormat); + return true; } if (!strcasecmp(name, "Background")) { - VideoSetBackground(ConfigVideoBackground = strtoul(value, NULL, 0)); - return true; + VideoSetBackground(ConfigVideoBackground = strtoul(value, NULL, 0)); + return true; } if (!strcasecmp(name, "StudioLevels")) { - VideoSetStudioLevels(ConfigVideoStudioLevels = atoi(value)); - return true; + VideoSetStudioLevels(ConfigVideoStudioLevels = atoi(value)); + return true; } if (!strcasecmp(name, "60HzMode")) { - VideoSet60HzMode(ConfigVideo60HzMode = atoi(value)); - return true; + VideoSet60HzMode(ConfigVideo60HzMode = atoi(value)); + return true; } if (!strcasecmp(name, "SoftStartSync")) { - VideoSetSoftStartSync(ConfigVideoSoftStartSync = atoi(value)); - return true; + VideoSetSoftStartSync(ConfigVideoSoftStartSync = atoi(value)); + return true; } if (!strcasecmp(name, "BlackPicture")) { - VideoSetBlackPicture(ConfigVideoBlackPicture = atoi(value)); - return true; + VideoSetBlackPicture(ConfigVideoBlackPicture = atoi(value)); + return true; } if (!strcasecmp(name, "ClearOnSwitch")) { - ConfigVideoClearOnSwitch = atoi(value); - return true; + ConfigVideoClearOnSwitch = atoi(value); + return true; } if (!strcasecmp(name, "Brightness")) { - int i; - i = atoi(value); - ConfigVideoBrightness = i>100?100:i; - VideoSetBrightness(ConfigVideoBrightness); - return true; + int i; + + i = atoi(value); + ConfigVideoBrightness = i > 100 ? 100 : i; + VideoSetBrightness(ConfigVideoBrightness); + return true; } if (!strcasecmp(name, "Contrast")) { - int i; - i = atoi(value); - ConfigVideoContrast = i>100?100:i; - VideoSetContrast(ConfigVideoContrast); - return true; + int i; + + i = atoi(value); + ConfigVideoContrast = i > 100 ? 100 : i; + VideoSetContrast(ConfigVideoContrast); + return true; } if (!strcasecmp(name, "Saturation")) { - int i; - i = atoi(value); - ConfigVideoSaturation = i>100?100:i; - VideoSetSaturation(ConfigVideoSaturation); - return true; + int i; + + i = atoi(value); + ConfigVideoSaturation = i > 100 ? 100 : i; + VideoSetSaturation(ConfigVideoSaturation); + return true; } - if (!strcasecmp(name, "Gamma")) { - int i; - i = atoi(value); - ConfigGamma = i>100?100:i; - VideoSetGamma(ConfigGamma); - return true; + if (!strcasecmp(name, "Gamma")) { + int i; + + i = atoi(value); + ConfigGamma = i > 100 ? 100 : i; + VideoSetGamma(ConfigGamma); + return true; } - if (!strcasecmp(name, "TargetColorSpace")) { - VideoSetTargetColor(ConfigTargetColorSpace = atoi(value)); - return true; + if (!strcasecmp(name, "TargetColorSpace")) { + VideoSetTargetColor(ConfigTargetColorSpace = atoi(value)); + return true; } if (!strcasecmp(name, "Hue")) { - VideoSetHue(ConfigVideoHue = atoi(value)); - return true; + VideoSetHue(ConfigVideoHue = atoi(value)); + return true; } - if (!strcasecmp(name, "CBlindness")) { - VideoSetColorBlindness(ConfigColorBlindness = atoi(value)); - return true; + if (!strcasecmp(name, "CBlindness")) { + VideoSetColorBlindness(ConfigColorBlindness = atoi(value)); + return true; } - if (!strcasecmp(name, "CBlindnessFaktor")) { - VideoSetColorBlindnessFaktor(ConfigColorBlindnessFaktor = atoi(value)); - return true; + if (!strcasecmp(name, "CBlindnessFaktor")) { + VideoSetColorBlindnessFaktor(ConfigColorBlindnessFaktor = atoi(value)); + return true; } #if 0 - if (!strcasecmp(name, "ScalerTest")) { - VideoSetScalerTest(ConfigScalerTest = atoi(value)); - return true; + if (!strcasecmp(name, "ScalerTest")) { + VideoSetScalerTest(ConfigScalerTest = atoi(value)); + return true; } #endif for (i = 0; i < RESOLUTIONS; ++i) { - char buf[128]; + char buf[128]; - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Scaling"); - if (!strcasecmp(name, buf)) { - ConfigVideoScaling[i] = atoi(value); - VideoSetScaling(ConfigVideoScaling); - return true; - } - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Deinterlace"); - if (!strcasecmp(name, buf)) { - ConfigVideoDeinterlace[i] = atoi(value); - VideoSetDeinterlace(ConfigVideoDeinterlace); - return true; - } - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], - "SkipChromaDeinterlace"); - if (!strcasecmp(name, buf)) { - ConfigVideoSkipChromaDeinterlace[i] = atoi(value); - VideoSetSkipChromaDeinterlace(ConfigVideoSkipChromaDeinterlace); - return true; - } - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "InverseTelecine"); - if (!strcasecmp(name, buf)) { - ConfigVideoInverseTelecine[i] = atoi(value); - VideoSetInverseTelecine(ConfigVideoInverseTelecine); - return true; - } - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Denoise"); - if (!strcasecmp(name, buf)) { - ConfigVideoDenoise[i] = atoi(value); - VideoSetDenoise(ConfigVideoDenoise); - return true; - } - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Sharpen"); - if (!strcasecmp(name, buf)) { - ConfigVideoSharpen[i] = atoi(value); - VideoSetSharpen(ConfigVideoSharpen); - return true; - } + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Scaling"); + if (!strcasecmp(name, buf)) { + ConfigVideoScaling[i] = atoi(value); + VideoSetScaling(ConfigVideoScaling); + return true; + } + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Deinterlace"); + if (!strcasecmp(name, buf)) { + ConfigVideoDeinterlace[i] = atoi(value); + VideoSetDeinterlace(ConfigVideoDeinterlace); + return true; + } + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "SkipChromaDeinterlace"); + if (!strcasecmp(name, buf)) { + ConfigVideoSkipChromaDeinterlace[i] = atoi(value); + VideoSetSkipChromaDeinterlace(ConfigVideoSkipChromaDeinterlace); + return true; + } + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "InverseTelecine"); + if (!strcasecmp(name, buf)) { + ConfigVideoInverseTelecine[i] = atoi(value); + VideoSetInverseTelecine(ConfigVideoInverseTelecine); + return true; + } + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Denoise"); + if (!strcasecmp(name, buf)) { + ConfigVideoDenoise[i] = atoi(value); + VideoSetDenoise(ConfigVideoDenoise); + return true; + } + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Sharpen"); + if (!strcasecmp(name, buf)) { + ConfigVideoSharpen[i] = atoi(value); + VideoSetSharpen(ConfigVideoSharpen); + return true; + } - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "CutTopBottom"); - if (!strcasecmp(name, buf)) { - ConfigVideoCutTopBottom[i] = atoi(value); - VideoSetCutTopBottom(ConfigVideoCutTopBottom); - return true; - } - snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "CutLeftRight"); - if (!strcasecmp(name, buf)) { - ConfigVideoCutLeftRight[i] = atoi(value); - VideoSetCutLeftRight(ConfigVideoCutLeftRight); - return true; - } + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "CutTopBottom"); + if (!strcasecmp(name, buf)) { + ConfigVideoCutTopBottom[i] = atoi(value); + VideoSetCutTopBottom(ConfigVideoCutTopBottom); + return true; + } + snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "CutLeftRight"); + if (!strcasecmp(name, buf)) { + ConfigVideoCutLeftRight[i] = atoi(value); + VideoSetCutLeftRight(ConfigVideoCutLeftRight); + return true; + } } if (!strcasecmp(name, "AutoCrop.Interval")) { - VideoSetAutoCrop(ConfigAutoCropInterval = - atoi(value), ConfigAutoCropDelay, ConfigAutoCropTolerance); - ConfigAutoCropEnabled = ConfigAutoCropInterval != 0; - return true; + VideoSetAutoCrop(ConfigAutoCropInterval = atoi(value), ConfigAutoCropDelay, ConfigAutoCropTolerance); + ConfigAutoCropEnabled = ConfigAutoCropInterval != 0; + return true; } if (!strcasecmp(name, "AutoCrop.Delay")) { - VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay = - atoi(value), ConfigAutoCropTolerance); - return true; + VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay = atoi(value), ConfigAutoCropTolerance); + return true; } if (!strcasecmp(name, "AutoCrop.Tolerance")) { - VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, - ConfigAutoCropTolerance = atoi(value)); - return true; + VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, ConfigAutoCropTolerance = atoi(value)); + return true; } if (!strcasecmp(name, "AudioDelay")) { - VideoSetAudioDelay(ConfigVideoAudioDelay = atoi(value)); - return true; + VideoSetAudioDelay(ConfigVideoAudioDelay = atoi(value)); + return true; } if (!strcasecmp(name, "AudioDrift")) { - CodecSetAudioDrift(ConfigAudioDrift = atoi(value)); - return true; + CodecSetAudioDrift(ConfigAudioDrift = atoi(value)); + return true; } if (!strcasecmp(name, "AudioPassthrough")) { - int i; + int i; - i = atoi(value); - AudioPassthroughState = i > 0; - ConfigAudioPassthrough = abs(i); - if (AudioPassthroughState) { - CodecSetAudioPassthrough(ConfigAudioPassthrough); - } else { - CodecSetAudioPassthrough(0); - } - return true; + i = atoi(value); + AudioPassthroughState = i > 0; + ConfigAudioPassthrough = abs(i); + if (AudioPassthroughState) { + CodecSetAudioPassthrough(ConfigAudioPassthrough); + } else { + CodecSetAudioPassthrough(0); + } + return true; } if (!strcasecmp(name, "AudioDownmix")) { - CodecSetAudioDownmix(ConfigAudioDownmix = atoi(value)); - return true; + CodecSetAudioDownmix(ConfigAudioDownmix = atoi(value)); + return true; } if (!strcasecmp(name, "AudioSoftvol")) { - AudioSetSoftvol(ConfigAudioSoftvol = atoi(value)); - return true; + AudioSetSoftvol(ConfigAudioSoftvol = atoi(value)); + return true; } if (!strcasecmp(name, "AudioNormalize")) { - ConfigAudioNormalize = atoi(value); - AudioSetNormalize(ConfigAudioNormalize, ConfigAudioMaxNormalize); - return true; + ConfigAudioNormalize = atoi(value); + AudioSetNormalize(ConfigAudioNormalize, ConfigAudioMaxNormalize); + return true; } if (!strcasecmp(name, "AudioMaxNormalize")) { - ConfigAudioMaxNormalize = atoi(value); - AudioSetNormalize(ConfigAudioNormalize, ConfigAudioMaxNormalize); - return true; + ConfigAudioMaxNormalize = atoi(value); + AudioSetNormalize(ConfigAudioNormalize, ConfigAudioMaxNormalize); + return true; } if (!strcasecmp(name, "AudioCompression")) { - ConfigAudioCompression = atoi(value); - AudioSetCompression(ConfigAudioCompression, ConfigAudioMaxCompression); - return true; + ConfigAudioCompression = atoi(value); + AudioSetCompression(ConfigAudioCompression, ConfigAudioMaxCompression); + return true; } if (!strcasecmp(name, "AudioMaxCompression")) { - ConfigAudioMaxCompression = atoi(value); - AudioSetCompression(ConfigAudioCompression, ConfigAudioMaxCompression); - return true; + ConfigAudioMaxCompression = atoi(value); + AudioSetCompression(ConfigAudioCompression, ConfigAudioMaxCompression); + return true; } if (!strcasecmp(name, "AudioStereoDescent")) { - ConfigAudioStereoDescent = atoi(value); - AudioSetStereoDescent(ConfigAudioStereoDescent); - return true; + ConfigAudioStereoDescent = atoi(value); + AudioSetStereoDescent(ConfigAudioStereoDescent); + return true; } if (!strcasecmp(name, "AudioBufferTime")) { - ConfigAudioBufferTime = atoi(value); - AudioSetBufferTime(ConfigAudioBufferTime); - return true; + ConfigAudioBufferTime = atoi(value); + AudioSetBufferTime(ConfigAudioBufferTime); + return true; } if (!strcasecmp(name, "AudioAutoAES")) { - ConfigAudioAutoAES = atoi(value); - AudioSetAutoAES(ConfigAudioAutoAES); - return true; + ConfigAudioAutoAES = atoi(value); + AudioSetAutoAES(ConfigAudioAutoAES); + return true; } #ifdef USE_PIP if (!strcasecmp(name, "pip.X")) { - ConfigPipX = atoi(value); - return true; + ConfigPipX = atoi(value); + return true; } if (!strcasecmp(name, "pip.Y")) { - ConfigPipY = atoi(value); - return true; + ConfigPipY = atoi(value); + return true; } if (!strcasecmp(name, "pip.Width")) { - ConfigPipWidth = atoi(value); - return true; + ConfigPipWidth = atoi(value); + return true; } if (!strcasecmp(name, "pip.Height")) { - ConfigPipHeight = atoi(value); - return true; + ConfigPipHeight = atoi(value); + return true; } if (!strcasecmp(name, "pip.VideoX")) { - ConfigPipVideoX = atoi(value); - return true; + ConfigPipVideoX = atoi(value); + return true; } if (!strcasecmp(name, "pip.VideoY")) { - ConfigPipVideoY = atoi(value); - return true; + ConfigPipVideoY = atoi(value); + return true; } if (!strcasecmp(name, "pip.VideoWidth")) { - ConfigPipVideoWidth = atoi(value); - return true; + ConfigPipVideoWidth = atoi(value); + return true; } if (!strcasecmp(name, "pip.VideoHeight")) { - ConfigPipVideoHeight = atoi(value); - return true; + ConfigPipVideoHeight = atoi(value); + return true; } if (!strcasecmp(name, "pip.Alt.X")) { - ConfigPipAltX = atoi(value); - return true; + ConfigPipAltX = atoi(value); + return true; } if (!strcasecmp(name, "pip.Alt.Y")) { - ConfigPipAltY = atoi(value); - return true; + ConfigPipAltY = atoi(value); + return true; } if (!strcasecmp(name, "pip.Alt.Width")) { - ConfigPipAltWidth = atoi(value); - return true; + ConfigPipAltWidth = atoi(value); + return true; } if (!strcasecmp(name, "pip.Alt.Height")) { - ConfigPipAltHeight = atoi(value); - return true; + ConfigPipAltHeight = atoi(value); + return true; } if (!strcasecmp(name, "pip.Alt.VideoX")) { - ConfigPipAltVideoX = atoi(value); - return true; + ConfigPipAltVideoX = atoi(value); + return true; } if (!strcasecmp(name, "pip.Alt.VideoY")) { - ConfigPipAltVideoY = atoi(value); - return true; + ConfigPipAltVideoY = atoi(value); + return true; } if (!strcasecmp(name, "pip.Alt.VideoWidth")) { - ConfigPipAltVideoWidth = atoi(value); - return true; + ConfigPipAltVideoWidth = atoi(value); + return true; } if (!strcasecmp(name, "pip.Alt.VideoHeight")) { - ConfigPipAltVideoHeight = atoi(value); - return true; + ConfigPipAltVideoHeight = atoi(value); + return true; } #endif #ifdef USE_SCREENSAVER - if (!strcasecmp(name, "EnableDPMSatBlackScreen")) { - ConfigEnableDPMSatBlackScreen = atoi(value); - SetDPMSatBlackScreen(ConfigEnableDPMSatBlackScreen); - return true; + if (!strcasecmp(name, "EnableDPMSatBlackScreen")) { + ConfigEnableDPMSatBlackScreen = atoi(value); + SetDPMSatBlackScreen(ConfigEnableDPMSatBlackScreen); + return true; } #endif #ifdef USE_OPENGLOSD if (!strcasecmp(name, "MaxSizeGPUImageCache")) { - ConfigMaxSizeGPUImageCache = atoi(value); - return true; + ConfigMaxSizeGPUImageCache = atoi(value); + return true; } #endif @@ -3555,70 +3567,69 @@ bool cPluginSoftHdDevice::Service(const char *id, void *data) { //dsyslog("[softhddev]%s: id %s\n", __FUNCTION__, id); if (strcmp(id, OSD_3DMODE_SERVICE) == 0) { - SoftHDDevice_Osd3DModeService_v1_0_t *r; + SoftHDDevice_Osd3DModeService_v1_0_t *r; - r = (SoftHDDevice_Osd3DModeService_v1_0_t *) data; - VideoSetOsd3DMode(r->Mode); - return true; + r = (SoftHDDevice_Osd3DModeService_v1_0_t *) data; + VideoSetOsd3DMode(r->Mode); + return true; } if (strcmp(id, ATMO_GRAB_SERVICE) == 0) { - int width; - int height; + int width; + int height; - if (data == NULL) { - return true; - } + if (data == NULL) { + return true; + } - if (SuspendMode != NOT_SUSPENDED) { - return false; - } + if (SuspendMode != NOT_SUSPENDED) { + return false; + } - SoftHDDevice_AtmoGrabService_v1_0_t *r = - (SoftHDDevice_AtmoGrabService_v1_0_t *) data; - if (r->structSize != sizeof(SoftHDDevice_AtmoGrabService_v1_0_t) - || r->analyseSize < 64 || r->analyseSize > 256 - || r->clippedOverscan < 0 || r->clippedOverscan > 200) { - return false; - } + SoftHDDevice_AtmoGrabService_v1_0_t *r = (SoftHDDevice_AtmoGrabService_v1_0_t *) data; - width = r->analyseSize * -1; // Internal marker for Atmo grab service - height = r->clippedOverscan; + if (r->structSize != sizeof(SoftHDDevice_AtmoGrabService_v1_0_t) + || r->analyseSize < 64 || r->analyseSize > 256 || r->clippedOverscan < 0 || r->clippedOverscan > 200) { + return false; + } - r->img = VideoGrabService(&r->imgSize, &width, &height); - if (r->img == NULL) { - return false; - } - r->imgType = GRAB_IMG_RGBA_FORMAT_B8G8R8A8; - r->width = width; - r->height = height; - return true; + width = r->analyseSize * -1; // Internal marker for Atmo grab service + height = r->clippedOverscan; + + r->img = VideoGrabService(&r->imgSize, &width, &height); + if (r->img == NULL) { + return false; + } + r->imgType = GRAB_IMG_RGBA_FORMAT_B8G8R8A8; + r->width = width; + r->height = height; + return true; } if (strcmp(id, ATMO1_GRAB_SERVICE) == 0) { - SoftHDDevice_AtmoGrabService_v1_1_t *r; + SoftHDDevice_AtmoGrabService_v1_1_t *r; - if (!data) { - return true; - } + if (!data) { + return true; + } - if (SuspendMode != NOT_SUSPENDED) { - return false; - } + if (SuspendMode != NOT_SUSPENDED) { + return false; + } - r = (SoftHDDevice_AtmoGrabService_v1_1_t *) data; - r->img = VideoGrabService(&r->size, &r->width, &r->height); - if (!r->img) { - return false; - } - return true; + r = (SoftHDDevice_AtmoGrabService_v1_1_t *) data; + r->img = VideoGrabService(&r->size, &r->width, &r->height); + if (!r->img) { + return false; + } + return true; } return false; } //---------------------------------------------------------------------------- -// cPlugin SVDRP +// cPlugin SVDRP //---------------------------------------------------------------------------- /** @@ -3626,59 +3637,42 @@ bool cPluginSoftHdDevice::Service(const char *id, void *data) ** FIXME: translation? */ static const char *SVDRPHelpText[] = { - "SUSP\n" "\040 Suspend plugin.\n\n" - " The plugin is suspended to save energie. Depending on the setup\n" - " 'softhddevice.Suspend.Close = 0' only the video and audio output\n" - " is stopped or with 'softhddevice.Suspend.Close = 1' the video\n" - " and audio devices are closed.\n" - " If 'softhddevice.Suspend.X11 = 1' is set and the X11 server was\n" - " started by the plugin, the X11 server would also be closed.\n" - " (Stopping X11 while suspended isn't supported yet)\n", - "RESU\n" "\040 Resume plugin.\n\n" - " Resume the suspended plugin. The plugin could be suspended by\n" - " the command line option '-s' or by a previous SUSP command.\n" - " If the x11 server was stopped by the plugin, it will be\n" - " restarted.", - "DETA\n" "\040 Detach plugin.\n\n" - " The plugin will be detached from the audio, video and DVB\n" - " devices. Other programs or plugins can use them now.\n", + "SUSP\n" "\040 Suspend plugin.\n\n" " The plugin is suspended to save energie. Depending on the setup\n" + " 'softhddevice.Suspend.Close = 0' only the video and audio output\n" + " is stopped or with 'softhddevice.Suspend.Close = 1' the video\n" " and audio devices are closed.\n" + " If 'softhddevice.Suspend.X11 = 1' is set and the X11 server was\n" + " started by the plugin, the X11 server would also be closed.\n" + " (Stopping X11 while suspended isn't supported yet)\n", + "RESU\n" "\040 Resume plugin.\n\n" " Resume the suspended plugin. The plugin could be suspended by\n" + " the command line option '-s' or by a previous SUSP command.\n" + " If the x11 server was stopped by the plugin, it will be\n" " restarted.", + "DETA\n" "\040 Detach plugin.\n\n" " The plugin will be detached from the audio, video and DVB\n" + " devices. Other programs or plugins can use them now.\n", "ATTA <-d display> <-a audio> <-p pass>\n" " Attach plugin.\n\n" - " Attach the plugin to audio, video and DVB devices. Use:\n" - " -d display\tdisplay of x11 server (fe. :0.0)\n" - " -a audio\taudio device (fe. alsa: hw:0,0 oss: /dev/dsp)\n" - " -p pass\t\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n", + " Attach the plugin to audio, video and DVB devices. Use:\n" + " -d display\tdisplay of x11 server (fe. :0.0)\n" + " -a audio\taudio device (fe. alsa: hw:0,0 oss: /dev/dsp)\n" + " -p pass\t\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n", "PRIM \n" " Make the primary device.\n\n" - " is the number of device. Without number softhddevice becomes\n" - " the primary device. If becoming primary, the plugin is attached\n" - " to the devices. If loosing primary, the plugin is detached from\n" - " the devices.", - "HOTK key\n" " Execute hotkey.\n\n" - " key is the hotkey number, following are supported:\n" - " 10: disable audio pass-through\n" - " 11: enable audio pass-through\n" - " 12: toggle audio pass-through\n" - " 13: decrease audio delay by 10ms\n" - " 14: increase audio delay by 10ms\n" " 15: toggle ac3 mixdown\n" - " 20: disable fullscreen\n\040 21: enable fullscreen\n" - " 22: toggle fullscreen\n" - " 23: disable auto-crop\n\040 24: enable auto-crop\n" - " 25: toggle auto-crop\n" - " 30: stretch 4:3 to display\n\040 31: pillar box 4:3 in display\n" - " 32: center cut-out 4:3 to display\n" - " 39: rotate 4:3 to display zoom mode\n" - " 40: stretch other aspect ratios to display\n" - " 41: letter box other aspect ratios in display\n" - " 42: center cut-out other aspect ratios to display\n" - " 49: rotate other aspect ratios to display zoom mode\n", - "STAT\n" "\040 Display SuspendMode of the plugin.\n\n" - " reply code is 910 + SuspendMode\n" - " SUSPEND_EXTERNAL == -1 (909)\n" - " NOT_SUSPENDED == 0 (910)\n" - " SUSPEND_NORMAL == 1 (911)\n" - " SUSPEND_DETACHED == 2 (912)\n", - "RAIS\n" "\040 Raise softhddevice window\n\n" - " If Xserver is not started by softhddevice, the window which\n" - " contains the softhddevice frontend will be raised to the front.\n", + " is the number of device. Without number softhddevice becomes\n" + " the primary device. If becoming primary, the plugin is attached\n" + " to the devices. If loosing primary, the plugin is detached from\n" " the devices.", + "HOTK key\n" " Execute hotkey.\n\n" " key is the hotkey number, following are supported:\n" + " 10: disable audio pass-through\n" " 11: enable audio pass-through\n" + " 12: toggle audio pass-through\n" " 13: decrease audio delay by 10ms\n" + " 14: increase audio delay by 10ms\n" " 15: toggle ac3 mixdown\n" + " 20: disable fullscreen\n\040 21: enable fullscreen\n" " 22: toggle fullscreen\n" + " 23: disable auto-crop\n\040 24: enable auto-crop\n" " 25: toggle auto-crop\n" + " 30: stretch 4:3 to display\n\040 31: pillar box 4:3 in display\n" + " 32: center cut-out 4:3 to display\n" " 39: rotate 4:3 to display zoom mode\n" + " 40: stretch other aspect ratios to display\n" " 41: letter box other aspect ratios in display\n" + " 42: center cut-out other aspect ratios to display\n" + " 49: rotate other aspect ratios to display zoom mode\n", + "STAT\n" "\040 Display SuspendMode of the plugin.\n\n" " reply code is 910 + SuspendMode\n" + " SUSPEND_EXTERNAL == -1 (909)\n" " NOT_SUSPENDED == 0 (910)\n" + " SUSPEND_NORMAL == 1 (911)\n" " SUSPEND_DETACHED == 2 (912)\n", + "RAIS\n" "\040 Raise softhddevice window\n\n" " If Xserver is not started by softhddevice, the window which\n" + " contains the softhddevice frontend will be raised to the front.\n", NULL }; @@ -3700,183 +3694,183 @@ const char **cPluginSoftHdDevice::SVDRPHelpPages(void) ** @param option all command arguments ** @param reply_code reply code */ -cString cPluginSoftHdDevice::SVDRPCommand(const char *command, - const char *option, __attribute__ ((unused)) int &reply_code) +cString cPluginSoftHdDevice::SVDRPCommand(const char *command, const char *option, __attribute__((unused)) + int &reply_code) { if (!strcasecmp(command, "STAT")) { - reply_code = 910 + SuspendMode; - switch (SuspendMode) { - case SUSPEND_EXTERNAL: - return "SuspendMode is SUSPEND_EXTERNAL"; - case NOT_SUSPENDED: - return "SuspendMode is NOT_SUSPENDED"; - case SUSPEND_NORMAL: - return "SuspendMode is SUSPEND_NORMAL"; - case SUSPEND_DETACHED: - return "SuspendMode is SUSPEND_DETACHED"; - } + reply_code = 910 + SuspendMode; + switch (SuspendMode) { + case SUSPEND_EXTERNAL: + return "SuspendMode is SUSPEND_EXTERNAL"; + case NOT_SUSPENDED: + return "SuspendMode is NOT_SUSPENDED"; + case SUSPEND_NORMAL: + return "SuspendMode is SUSPEND_NORMAL"; + case SUSPEND_DETACHED: + return "SuspendMode is SUSPEND_DETACHED"; + } } if (!strcasecmp(command, "SUSP")) { - if (cSoftHdControl::Player) { // already suspended - return "SoftHdDevice already suspended"; - } - if (SuspendMode != NOT_SUSPENDED) { - return "SoftHdDevice already detached"; - } + if (cSoftHdControl::Player) { // already suspended + return "SoftHdDevice already suspended"; + } + if (SuspendMode != NOT_SUSPENDED) { + return "SoftHdDevice already detached"; + } #ifdef USE_OPENGLOSD - dsyslog("[softhddev]stopping Ogl Thread svdrp STAT"); - cSoftOsdProvider::StopOpenGlThread(); + dsyslog("[softhddev]stopping Ogl Thread svdrp STAT"); + cSoftOsdProvider::StopOpenGlThread(); #endif - cControl::Launch(new cSoftHdControl); - cControl::Attach(); - Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11); - SuspendMode = SUSPEND_NORMAL; - return "SoftHdDevice is suspended"; + cControl::Launch(new cSoftHdControl); + cControl::Attach(); + Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11); + SuspendMode = SUSPEND_NORMAL; + return "SoftHdDevice is suspended"; } if (!strcasecmp(command, "RESU")) { - if (SuspendMode == NOT_SUSPENDED) { - return "SoftHdDevice already resumed"; - } - if (SuspendMode != SUSPEND_NORMAL) { - return "can't resume SoftHdDevice"; - } - if (ShutdownHandler.GetUserInactiveTime()) { - ShutdownHandler.SetUserInactiveTimeout(); - } - if (cSoftHdControl::Player) { // suspended - cControl::Shutdown(); // not need, if not suspended - } - Resume(); - SuspendMode = NOT_SUSPENDED; - return "SoftHdDevice is resumed"; + if (SuspendMode == NOT_SUSPENDED) { + return "SoftHdDevice already resumed"; + } + if (SuspendMode != SUSPEND_NORMAL) { + return "can't resume SoftHdDevice"; + } + if (ShutdownHandler.GetUserInactiveTime()) { + ShutdownHandler.SetUserInactiveTimeout(); + } + if (cSoftHdControl::Player) { // suspended + cControl::Shutdown(); // not need, if not suspended + } + Resume(); + SuspendMode = NOT_SUSPENDED; + return "SoftHdDevice is resumed"; } if (!strcasecmp(command, "DETA")) { - if (SuspendMode == SUSPEND_DETACHED) { - return "SoftHdDevice already detached"; - } - if (cSoftHdControl::Player) { // already suspended - return "can't suspend SoftHdDevice already suspended"; - } + if (SuspendMode == SUSPEND_DETACHED) { + return "SoftHdDevice already detached"; + } + if (cSoftHdControl::Player) { // already suspended + return "can't suspend SoftHdDevice already suspended"; + } #ifdef USE_OPENGLOSD - dsyslog("[softhddev]stopping Ogl Thread svdrp DETA"); - cSoftOsdProvider::StopOpenGlThread(); + dsyslog("[softhddev]stopping Ogl Thread svdrp DETA"); + cSoftOsdProvider::StopOpenGlThread(); #endif - cControl::Launch(new cSoftHdControl); - cControl::Attach(); - Suspend(1, 1, 0); - SuspendMode = SUSPEND_DETACHED; - return "SoftHdDevice is detached"; + cControl::Launch(new cSoftHdControl); + cControl::Attach(); + Suspend(1, 1, 0); + SuspendMode = SUSPEND_DETACHED; + return "SoftHdDevice is detached"; } if (!strcasecmp(command, "ATTA")) { - char *tmp; - char *t; - char *s; - char *o; + char *tmp; + char *t; + char *s; + char *o; - if (SuspendMode != SUSPEND_DETACHED) { - return "can't attach SoftHdDevice not detached"; - } - if (!(tmp = strdup(option))) { - return "out of memory"; - } - t = tmp; - while ((s = strsep(&t, " \t\n\r"))) { - if (!strcmp(s, "-d")) { - if (!(o = strsep(&t, " \t\n\r"))) { - free(tmp); - return "missing option argument"; - } - free(ConfigX11Display); - ConfigX11Display = strdup(o); - X11DisplayName = ConfigX11Display; - } else if (!strncmp(s, "-d", 2)) { - free(ConfigX11Display); - ConfigX11Display = strdup(s + 2); - X11DisplayName = ConfigX11Display; + if (SuspendMode != SUSPEND_DETACHED) { + return "can't attach SoftHdDevice not detached"; + } + if (!(tmp = strdup(option))) { + return "out of memory"; + } + t = tmp; + while ((s = strsep(&t, " \t\n\r"))) { + if (!strcmp(s, "-d")) { + if (!(o = strsep(&t, " \t\n\r"))) { + free(tmp); + return "missing option argument"; + } + free(ConfigX11Display); + ConfigX11Display = strdup(o); + X11DisplayName = ConfigX11Display; + } else if (!strncmp(s, "-d", 2)) { + free(ConfigX11Display); + ConfigX11Display = strdup(s + 2); + X11DisplayName = ConfigX11Display; - } else if (!strcmp(s, "-a")) { - if (!(o = strsep(&t, " \t\n\r"))) { - free(tmp); - return "missing option argument"; - } - free(ConfigAudioDevice); - ConfigAudioDevice = strdup(o); - AudioSetDevice(ConfigAudioDevice); - } else if (!strncmp(s, "-a", 2)) { - free(ConfigAudioDevice); - ConfigAudioDevice = strdup(s + 2); - AudioSetDevice(ConfigAudioDevice); + } else if (!strcmp(s, "-a")) { + if (!(o = strsep(&t, " \t\n\r"))) { + free(tmp); + return "missing option argument"; + } + free(ConfigAudioDevice); + ConfigAudioDevice = strdup(o); + AudioSetDevice(ConfigAudioDevice); + } else if (!strncmp(s, "-a", 2)) { + free(ConfigAudioDevice); + ConfigAudioDevice = strdup(s + 2); + AudioSetDevice(ConfigAudioDevice); - } else if (!strcmp(s, "-p")) { - if (!(o = strsep(&t, " \t\n\r"))) { - free(tmp); - return "missing option argument"; - } - free(ConfigPassthroughDevice); - ConfigPassthroughDevice = strdup(o); - AudioSetPassthroughDevice(ConfigPassthroughDevice); - } else if (!strncmp(s, "-p", 2)) { - free(ConfigPassthroughDevice); - ConfigPassthroughDevice = strdup(s + 2); - AudioSetPassthroughDevice(ConfigPassthroughDevice); + } else if (!strcmp(s, "-p")) { + if (!(o = strsep(&t, " \t\n\r"))) { + free(tmp); + return "missing option argument"; + } + free(ConfigPassthroughDevice); + ConfigPassthroughDevice = strdup(o); + AudioSetPassthroughDevice(ConfigPassthroughDevice); + } else if (!strncmp(s, "-p", 2)) { + free(ConfigPassthroughDevice); + ConfigPassthroughDevice = strdup(s + 2); + AudioSetPassthroughDevice(ConfigPassthroughDevice); - } else if (*s) { - free(tmp); - return "unsupported option"; - } - } - free(tmp); - if (ShutdownHandler.GetUserInactiveTime()) { - ShutdownHandler.SetUserInactiveTimeout(); - } - if (cSoftHdControl::Player) { // suspended - cControl::Shutdown(); // not need, if not suspended - } - Resume(); - SuspendMode = NOT_SUSPENDED; - return "SoftHdDevice is attached"; + } else if (*s) { + free(tmp); + return "unsupported option"; + } + } + free(tmp); + if (ShutdownHandler.GetUserInactiveTime()) { + ShutdownHandler.SetUserInactiveTimeout(); + } + if (cSoftHdControl::Player) { // suspended + cControl::Shutdown(); // not need, if not suspended + } + Resume(); + SuspendMode = NOT_SUSPENDED; + return "SoftHdDevice is attached"; } if (!strcasecmp(command, "HOTK")) { - int hotk; + int hotk; - hotk = strtol(option, NULL, 0); - HandleHotkey(hotk); - return "hot-key executed"; + hotk = strtol(option, NULL, 0); + HandleHotkey(hotk); + return "hot-key executed"; } if (!strcasecmp(command, "PRIM")) { - int primary; + int primary; - primary = strtol(option, NULL, 0); - if (!primary && MyDevice) { - primary = MyDevice->DeviceNumber() + 1; - } - dsyslog("[softhddev] switching primary device to %d\n", primary); - DoMakePrimary = primary; - return "switching primary device requested"; + primary = strtol(option, NULL, 0); + if (!primary && MyDevice) { + primary = MyDevice->DeviceNumber() + 1; + } + dsyslog("[softhddev] switching primary device to %d\n", primary); + DoMakePrimary = primary; + return "switching primary device requested"; } if (!strcasecmp(command, "3DOF")) { - VideoSetOsd3DMode(0); - return "3d off"; + VideoSetOsd3DMode(0); + return "3d off"; } if (!strcasecmp(command, "3DSB")) { - VideoSetOsd3DMode(1); - return "3d sbs"; + VideoSetOsd3DMode(1); + return "3d sbs"; } if (!strcasecmp(command, "3DTB")) { - VideoSetOsd3DMode(2); - return "3d tb"; + VideoSetOsd3DMode(2); + return "3d tb"; } if (!strcasecmp(command, "RAIS")) { - if (!ConfigStartX11Server) { - VideoRaiseWindow(); - } else { - return "Raise not possible"; - } - return "Window raised"; + if (!ConfigStartX11Server) { + VideoRaiseWindow(); + } else { + return "Raise not possible"; + } + return "Window raised"; } return NULL; } -VDRPLUGINCREATOR(cPluginSoftHdDevice); // Don't touch this! +VDRPLUGINCREATOR(cPluginSoftHdDevice); // Don't touch this! diff --git a/softhddev.c b/softhddev.c index a38e9d2..5c9fef6 100644 --- a/softhddev.c +++ b/softhddev.c @@ -1,28 +1,28 @@ /// -/// @file softhddev.c @brief A software HD device plugin for VDR. +/// @file softhddev.c @brief A software HD device plugin for VDR. /// -/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: 8881600a16f475cba7db8911ad88ce2234f72d14 $ +/// $Id: 8881600a16f475cba7db8911ad88ce2234f72d14 $ ////////////////////////////////////////////////////////////////////////////// -#define noUSE_SOFTLIMIT ///< add soft buffer limits to Play.. -#define noUSE_PIP ///< include PIP support + new API -#define noDUMP_TRICKSPEED ///< dump raw trickspeed packets +#define noUSE_SOFTLIMIT ///< add soft buffer limits to Play.. +#define noUSE_PIP ///< include PIP support + new API +#define noDUMP_TRICKSPEED ///< dump raw trickspeed packets #include #include @@ -39,8 +39,8 @@ #include #include -#define _(str) gettext(str) ///< gettext shortcut -#define _N(str) str ///< gettext_noop shortcut +#define _(str) gettext(str) ///< gettext shortcut +#define _N(str) str ///< gettext_noop shortcut #include #include @@ -50,7 +50,7 @@ #endif #include -#include "iatomic.h" // portable atomic_t +#include "iatomic.h" // portable atomic_t #include "misc.h" #include "softhddev.h" @@ -64,126 +64,126 @@ static void DumpMpeg(const uint8_t * data, int size); #endif ////////////////////////////////////////////////////////////////////////////// -// Variables +// Variables ////////////////////////////////////////////////////////////////////////////// -extern int ConfigAudioBufferTime; ///< config size ms of audio buffer -extern int ConfigVideoClearOnSwitch; //< clear decoder on channel switch -char ConfigStartX11Server; ///< flag start the x11 server -static signed char ConfigStartSuspended; ///< flag to start in suspend mode -static char ConfigFullscreen; ///< fullscreen modus -static const char *X11ServerArguments; ///< default command arguments -static char ConfigStillDecoder; ///< hw/sw decoder for still picture +extern int ConfigAudioBufferTime; ///< config size ms of audio buffer +extern int ConfigVideoClearOnSwitch; //< clear decoder on channel switch +char ConfigStartX11Server; ///< flag start the x11 server +static signed char ConfigStartSuspended; ///< flag to start in suspend mode +static char ConfigFullscreen; ///< fullscreen modus +static const char *X11ServerArguments; ///< default command arguments +static char ConfigStillDecoder; ///< hw/sw decoder for still picture -static pthread_mutex_t SuspendLockMutex; ///< suspend lock mutex +static pthread_mutex_t SuspendLockMutex; ///< suspend lock mutex -static volatile char StreamFreezed; ///< stream freezed +static volatile char StreamFreezed; ///< stream freezed ////////////////////////////////////////////////////////////////////////////// -// Audio +// Audio ////////////////////////////////////////////////////////////////////////////// -static volatile char NewAudioStream; ///< new audio stream -static volatile char SkipAudio; ///< skip audio stream -static AudioDecoder *MyAudioDecoder; ///< audio decoder -static enum AVCodecID AudioCodecID; ///< current codec id -static int AudioChannelID; ///< current audio channel id -static VideoStream *AudioSyncStream; ///< video stream for audio/video sync +static volatile char NewAudioStream; ///< new audio stream +static volatile char SkipAudio; ///< skip audio stream +static AudioDecoder *MyAudioDecoder; ///< audio decoder +static enum AVCodecID AudioCodecID; ///< current codec id +static int AudioChannelID; ///< current audio channel id +static VideoStream *AudioSyncStream; ///< video stream for audio/video sync /// Minimum free space in audio buffer 8 packets for 8 channels #define AUDIO_MIN_BUFFER_FREE (3072 * 8 * 8) -#define AUDIO_BUFFER_SIZE (512 * 1024) ///< audio PES buffer default size -static AVPacket AudioAvPkt[1]; ///< audio a/v packet +#define AUDIO_BUFFER_SIZE (512 * 1024) ///< audio PES buffer default size +static AVPacket AudioAvPkt[1]; ///< audio a/v packet int AudioDelay = 0; ////////////////////////////////////////////////////////////////////////////// -// Audio codec parser +// Audio codec parser ////////////////////////////////////////////////////////////////////////////// /// -/// Mpeg bitrate table. +/// Mpeg bitrate table. /// -/// BitRateTable[Version][Layer][Index] +/// BitRateTable[Version][Layer][Index] /// static const uint16_t BitRateTable[2][4][16] = { // MPEG Version 1 {{}, - {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, - 0}, - {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0}, - {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0}}, + {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, + 0}, + {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0}, + {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0}}, // MPEG Version 2 & 2.5 {{}, - {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0}, - {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}, - {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0} - } + {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0}, + {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}, + {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0} + } }; /// -/// Mpeg samperate table. +/// Mpeg samperate table. /// static const uint16_t SampleRateTable[4] = { 44100, 48000, 32000, 0 }; /// -/// Fast check for Mpeg audio. +/// Fast check for Mpeg audio. /// -/// 4 bytes 0xFFExxxxx Mpeg audio +/// 4 bytes 0xFFExxxxx Mpeg audio /// static inline int FastMpegCheck(const uint8_t * p) { - if (p[0] != 0xFF) { // 11bit frame sync - return 0; + if (p[0] != 0xFF) { // 11bit frame sync + return 0; } if ((p[1] & 0xE0) != 0xE0) { - return 0; + return 0; } - if ((p[1] & 0x18) == 0x08) { // version ID - 01 reserved - return 0; + if ((p[1] & 0x18) == 0x08) { // version ID - 01 reserved + return 0; } - if (!(p[1] & 0x06)) { // layer description - 00 reserved - return 0; + if (!(p[1] & 0x06)) { // layer description - 00 reserved + return 0; } - if ((p[2] & 0xF0) == 0xF0) { // bitrate index - 1111 reserved - return 0; + if ((p[2] & 0xF0) == 0xF0) { // bitrate index - 1111 reserved + return 0; } - if ((p[2] & 0x0C) == 0x0C) { // sampling rate index - 11 reserved - return 0; + if ((p[2] & 0x0C) == 0x0C) { // sampling rate index - 11 reserved + return 0; } return 1; } /// -/// Check for Mpeg audio. +/// Check for Mpeg audio. /// -/// 0xFFEx already checked. +/// 0xFFEx already checked. /// -/// @param data incomplete PES packet -/// @param size number of bytes +/// @param data incomplete PES packet +/// @param size number of bytes /// -/// @retval <0 possible mpeg audio, but need more data -/// @retval 0 no valid mpeg audio -/// @retval >0 valid mpeg audio +/// @retval <0 possible mpeg audio, but need more data +/// @retval 0 no valid mpeg audio +/// @retval >0 valid mpeg audio /// -/// From: http://www.mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm +/// From: http://www.mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm /// -/// AAAAAAAA AAABBCCD EEEEFFGH IIJJKLMM +/// AAAAAAAA AAABBCCD EEEEFFGH IIJJKLMM /// -/// o a 11x Frame sync -/// o b 2x Mpeg audio version (2.5, reserved, 2, 1) -/// o c 2x Layer (reserved, III, II, I) -/// o e 2x BitRate index -/// o f 2x SampleRate index (4100, 48000, 32000, 0) -/// o g 1x Paddding bit -/// o .. Doesn't care +/// o a 11x Frame sync +/// o b 2x Mpeg audio version (2.5, reserved, 2, 1) +/// o c 2x Layer (reserved, III, II, I) +/// o e 2x BitRate index +/// o f 2x SampleRate index (4100, 48000, 32000, 0) +/// o g 1x Paddding bit +/// o .. Doesn't care /// -/// frame length: -/// Layer I: -/// FrameLengthInBytes = (12 * BitRate / SampleRate + Padding) * 4 -/// Layer II & III: -/// FrameLengthInBytes = 144 * BitRate / SampleRate + Padding +/// frame length: +/// Layer I: +/// FrameLengthInBytes = (12 * BitRate / SampleRate + Padding) * 4 +/// Layer II & III: +/// FrameLengthInBytes = 144 * BitRate / SampleRate + Padding /// static int MpegCheck(const uint8_t * data, int size) { @@ -205,77 +205,75 @@ static int MpegCheck(const uint8_t * data, int size) padding = (data[2] >> 1) & 0x01; sample_rate = SampleRateTable[sample_rate_index]; - if (!sample_rate) { // no valid sample rate try next - // moved into fast check - abort(); - return 0; + if (!sample_rate) { // no valid sample rate try next + // moved into fast check + abort(); + return 0; } - sample_rate >>= mpeg2; // mpeg 2 half rate - sample_rate >>= mpeg25; // mpeg 2.5 quarter rate + sample_rate >>= mpeg2; // mpeg 2 half rate + sample_rate >>= mpeg25; // mpeg 2.5 quarter rate bit_rate = BitRateTable[mpeg2 | mpeg25][layer][bit_rate_index]; - if (!bit_rate) { // no valid bit-rate try next - // FIXME: move into fast check? - return 0; + if (!bit_rate) { // no valid bit-rate try next + // FIXME: move into fast check? + return 0; } bit_rate *= 1000; switch (layer) { - case 1: - frame_size = (12 * bit_rate) / sample_rate; - frame_size = (frame_size + padding) * 4; - break; - case 2: - case 3: - default: - frame_size = (144 * bit_rate) / sample_rate; - frame_size = frame_size + padding; - break; + case 1: + frame_size = (12 * bit_rate) / sample_rate; + frame_size = (frame_size + padding) * 4; + break; + case 2: + case 3: + default: + frame_size = (144 * bit_rate) / sample_rate; + frame_size = frame_size + padding; + break; } if (0) { - Debug(3, - "pesdemux: mpeg%s layer%d bitrate=%d samplerate=%d %d bytes\n", - mpeg25 ? "2.5" : mpeg2 ? "2" : "1", layer, bit_rate, sample_rate, - frame_size); + Debug(3, "pesdemux: mpeg%s layer%d bitrate=%d samplerate=%d %d bytes\n", mpeg25 ? "2.5" : mpeg2 ? "2" : "1", + layer, bit_rate, sample_rate, frame_size); } if (frame_size + 4 > size) { - return -frame_size - 4; + return -frame_size - 4; } // check if after this frame a new mpeg frame starts if (FastMpegCheck(data + frame_size)) { - return frame_size; + return frame_size; } return 0; } /// -/// Fast check for AAC LATM audio. +/// Fast check for AAC LATM audio. /// -/// 3 bytes 0x56Exxx AAC LATM audio +/// 3 bytes 0x56Exxx AAC LATM audio /// static inline int FastLatmCheck(const uint8_t * p) { - if (p[0] != 0x56) { // 11bit sync - return 0; + if (p[0] != 0x56) { // 11bit sync + return 0; } if ((p[1] & 0xE0) != 0xE0) { - return 0; + return 0; } return 1; } /// -/// Check for AAC LATM audio. +/// Check for AAC LATM audio. /// -/// 0x56Exxx already checked. +/// 0x56Exxx already checked. /// -/// @param data incomplete PES packet -/// @param size number of bytes +/// @param data incomplete PES packet +/// @param size number of bytes /// -/// @retval <0 possible AAC LATM audio, but need more data -/// @retval 0 no valid AAC LATM audio -/// @retval >0 valid AAC LATM audio +/// @retval <0 possible AAC LATM audio, but need more data +/// @retval 0 no valid AAC LATM audio +/// @retval >0 valid AAC LATM audio /// static int LatmCheck(const uint8_t * data, int size) { @@ -286,20 +284,20 @@ static int LatmCheck(const uint8_t * data, int size) frame_size += 3; if (frame_size + 2 > size) { - return -frame_size - 2; + return -frame_size - 2; } // check if after this frame a new AAC LATM frame starts if (FastLatmCheck(data + frame_size)) { - return frame_size; + return frame_size; } return 0; } /// -/// Possible AC-3 frame sizes. +/// Possible AC-3 frame sizes. /// -/// from ATSC A/52 table 5.18 frame size code table. +/// from ATSC A/52 table 5.18 frame size code table. /// const uint16_t Ac3FrameSizeTable[38][3] = { {64, 69, 96}, {64, 70, 96}, {80, 87, 120}, {80, 88, 120}, @@ -315,144 +313,144 @@ const uint16_t Ac3FrameSizeTable[38][3] = { }; /// -/// Fast check for (E-)AC-3 audio. +/// Fast check for (E-)AC-3 audio. /// -/// 5 bytes 0x0B77xxxxxx AC-3 audio +/// 5 bytes 0x0B77xxxxxx AC-3 audio /// static inline int FastAc3Check(const uint8_t * p) { - if (p[0] != 0x0B) { // 16bit sync - return 0; + if (p[0] != 0x0B) { // 16bit sync + return 0; } if (p[1] != 0x77) { - return 0; + return 0; } return 1; } /// -/// Check for (E-)AC-3 audio. +/// Check for (E-)AC-3 audio. /// -/// 0x0B77xxxxxx already checked. +/// 0x0B77xxxxxx already checked. /// -/// @param data incomplete PES packet -/// @param size number of bytes +/// @param data incomplete PES packet +/// @param size number of bytes /// -/// @retval <0 possible AC-3 audio, but need more data -/// @retval 0 no valid AC-3 audio -/// @retval >0 valid AC-3 audio +/// @retval <0 possible AC-3 audio, but need more data +/// @retval 0 no valid AC-3 audio +/// @retval >0 valid AC-3 audio /// -/// o AC-3 Header -/// AAAAAAAA AAAAAAAA BBBBBBBB BBBBBBBB CCDDDDDD EEEEEFFF +/// o AC-3 Header +/// AAAAAAAA AAAAAAAA BBBBBBBB BBBBBBBB CCDDDDDD EEEEEFFF /// -/// o a 16x Frame sync, always 0x0B77 -/// o b 16x CRC 16 -/// o c 2x Samplerate -/// o d 6x Framesize code -/// o e 5x Bitstream ID -/// o f 3x Bitstream mode +/// o a 16x Frame sync, always 0x0B77 +/// o b 16x CRC 16 +/// o c 2x Samplerate +/// o d 6x Framesize code +/// o e 5x Bitstream ID +/// o f 3x Bitstream mode /// -/// o E-AC-3 Header -/// AAAAAAAA AAAAAAAA BBCCCDDD DDDDDDDD EEFFGGGH IIIII... +/// o E-AC-3 Header +/// AAAAAAAA AAAAAAAA BBCCCDDD DDDDDDDD EEFFGGGH IIIII... /// -/// o a 16x Frame sync, always 0x0B77 -/// o b 2x Frame type -/// o c 3x Sub stream ID -/// o d 10x Framesize - 1 in words -/// o e 2x Framesize code -/// o f 2x Framesize code 2 +/// o a 16x Frame sync, always 0x0B77 +/// o b 2x Frame type +/// o c 3x Sub stream ID +/// o d 10x Framesize - 1 in words +/// o e 2x Framesize code +/// o f 2x Framesize code 2 /// static int Ac3Check(const uint8_t * data, int size) { int frame_size; - if (size < 5) { // need 5 bytes to see if AC-3/E-AC-3 - return -5; + if (size < 5) { // need 5 bytes to see if AC-3/E-AC-3 + return -5; } - if (data[5] > (10 << 3)) { // E-AC-3 - if ((data[4] & 0xF0) == 0xF0) { // invalid fscod fscod2 - return 0; - } - frame_size = ((data[2] & 0x03) << 8) + data[3] + 1; - frame_size *= 2; - } else { // AC-3 - int fscod; - int frmsizcod; + if (data[5] > (10 << 3)) { // E-AC-3 + if ((data[4] & 0xF0) == 0xF0) { // invalid fscod fscod2 + return 0; + } + frame_size = ((data[2] & 0x03) << 8) + data[3] + 1; + frame_size *= 2; + } else { // AC-3 + int fscod; + int frmsizcod; - // crc1 crc1 fscod|frmsizcod - fscod = data[4] >> 6; - if (fscod == 0x03) { // invalid sample rate - return 0; - } - frmsizcod = data[4] & 0x3F; - if (frmsizcod > 37) { // invalid frame size - return 0; - } - // invalid is checked above - frame_size = Ac3FrameSizeTable[frmsizcod][fscod] * 2; + // crc1 crc1 fscod|frmsizcod + fscod = data[4] >> 6; + if (fscod == 0x03) { // invalid sample rate + return 0; + } + frmsizcod = data[4] & 0x3F; + if (frmsizcod > 37) { // invalid frame size + return 0; + } + // invalid is checked above + frame_size = Ac3FrameSizeTable[frmsizcod][fscod] * 2; } if (frame_size + 5 > size) { - return -frame_size - 5; + return -frame_size - 5; } // FIXME: relaxed checks if codec is already detected // check if after this frame a new AC-3 frame starts if (FastAc3Check(data + frame_size)) { - return frame_size; + return frame_size; } return 0; } /// -/// Fast check for ADTS Audio Data Transport Stream. +/// Fast check for ADTS Audio Data Transport Stream. /// -/// 7/9 bytes 0xFFFxxxxxxxxxxx(xxxx) ADTS audio +/// 7/9 bytes 0xFFFxxxxxxxxxxx(xxxx) ADTS audio /// static inline int FastAdtsCheck(const uint8_t * p) { - if (p[0] != 0xFF) { // 12bit sync - return 0; + if (p[0] != 0xFF) { // 12bit sync + return 0; } - if ((p[1] & 0xF6) != 0xF0) { // sync + layer must be 0 - return 0; + if ((p[1] & 0xF6) != 0xF0) { // sync + layer must be 0 + return 0; } - if ((p[2] & 0x3C) == 0x3C) { // sampling frequency index != 15 - return 0; + if ((p[2] & 0x3C) == 0x3C) { // sampling frequency index != 15 + return 0; } return 1; } /// -/// Check for ADTS Audio Data Transport Stream. +/// Check for ADTS Audio Data Transport Stream. /// -/// 0xFFF already checked. +/// 0xFFF already checked. /// -/// @param data incomplete PES packet -/// @param size number of bytes +/// @param data incomplete PES packet +/// @param size number of bytes /// -/// @retval <0 possible ADTS audio, but need more data -/// @retval 0 no valid ADTS audio -/// @retval >0 valid AC-3 audio +/// @retval <0 possible ADTS audio, but need more data +/// @retval 0 no valid ADTS audio +/// @retval >0 valid AC-3 audio /// -/// AAAAAAAA AAAABCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP -/// (QQQQQQQQ QQQQQQQ) +/// AAAAAAAA AAAABCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP +/// (QQQQQQQQ QQQQQQQ) /// -/// o A*12 syncword 0xFFF -/// o B*1 MPEG Version: 0 for MPEG-4, 1 for MPEG-2 -/// o C*2 layer: always 0 -/// o .. -/// o F*4 sampling frequency index (15 is invalid) -/// o .. -/// o M*13 frame length +/// o A*12 syncword 0xFFF +/// o B*1 MPEG Version: 0 for MPEG-4, 1 for MPEG-2 +/// o C*2 layer: always 0 +/// o .. +/// o F*4 sampling frequency index (15 is invalid) +/// o .. +/// o M*13 frame length /// static int AdtsCheck(const uint8_t * data, int size) { int frame_size; if (size < 6) { - return -6; + return -6; } frame_size = (data[3] & 0x03) << 11; @@ -460,28 +458,28 @@ static int AdtsCheck(const uint8_t * data, int size) frame_size |= (data[5] & 0xE0) >> 5; if (frame_size + 3 > size) { - return -frame_size - 3; + return -frame_size - 3; } // check if after this frame a new ADTS frame starts if (FastAdtsCheck(data + frame_size)) { - return frame_size; + return frame_size; } return 0; } ////////////////////////////////////////////////////////////////////////////// -// PES Demux +// PES Demux ////////////////////////////////////////////////////////////////////////////// /// -/// PES type. +/// PES type. /// enum { PES_PROG_STREAM_MAP = 0xBC, PES_PRIVATE_STREAM1 = 0xBD, - PES_PADDING_STREAM = 0xBE, ///< filler, padding stream + PES_PADDING_STREAM = 0xBE, ///< filler, padding stream PES_PRIVATE_STREAM2 = 0xBF, PES_AUDIO_STREAM_S = 0xC0, PES_AUDIO_STREAM_E = 0xDF, @@ -491,60 +489,60 @@ enum PES_EMM_STREAM = 0xF1, PES_DSM_CC_STREAM = 0xF2, PES_ISO13522_STREAM = 0xF3, - PES_TYPE_E_STREAM = 0xF8, ///< ITU-T rec. h.222.1 type E stream + PES_TYPE_E_STREAM = 0xF8, ///< ITU-T rec. h.222.1 type E stream PES_PROG_STREAM_DIR = 0xFF, }; #ifndef NO_TS_AUDIO /// -/// PES parser state. +/// PES parser state. /// enum { - PES_INIT, ///< unknown codec + PES_INIT, ///< unknown codec - PES_SKIP, ///< skip packet - PES_SYNC, ///< search packet sync byte - PES_HEADER, ///< copy header - PES_START, ///< pes packet start found - PES_PAYLOAD, ///< copy payload + PES_SKIP, ///< skip packet + PES_SYNC, ///< search packet sync byte + PES_HEADER, ///< copy header + PES_START, ///< pes packet start found + PES_PAYLOAD, ///< copy payload - PES_LPCM_HEADER, ///< copy lcpm header - PES_LPCM_PAYLOAD, ///< copy lcpm payload + PES_LPCM_HEADER, ///< copy lcpm header + PES_LPCM_PAYLOAD, ///< copy lcpm payload }; -#define PES_START_CODE_SIZE 6 ///< size of pes start code with length -#define PES_HEADER_SIZE 9 ///< size of pes header -#define PES_MAX_HEADER_SIZE (PES_HEADER_SIZE + 256) ///< maximal header size -#define PES_MAX_PAYLOAD (512 * 1024) ///< max pay load size +#define PES_START_CODE_SIZE 6 ///< size of pes start code with length +#define PES_HEADER_SIZE 9 ///< size of pes header +#define PES_MAX_HEADER_SIZE (PES_HEADER_SIZE + 256) ///< maximal header size +#define PES_MAX_PAYLOAD (512 * 1024) ///< max pay load size /// -/// PES demuxer. +/// PES demuxer. /// typedef struct _pes_demux_ { - //int Pid; ///< packet id - //int PcrPid; ///< program clock reference pid - //int StreamType; ///< stream type + //int Pid; ///< packet id + //int PcrPid; ///< program clock reference pid + //int StreamType; ///< stream type - int State; ///< parsing state - uint8_t Header[PES_MAX_HEADER_SIZE]; ///< buffer for pes header - int HeaderIndex; ///< header index - int HeaderSize; ///< size of pes header - uint8_t *Buffer; ///< payload buffer - int Index; ///< buffer index - int Skip; ///< buffer skip - int Size; ///< size of payload buffer + int State; ///< parsing state + uint8_t Header[PES_MAX_HEADER_SIZE]; ///< buffer for pes header + int HeaderIndex; ///< header index + int HeaderSize; ///< size of pes header + uint8_t *Buffer; ///< payload buffer + int Index; ///< buffer index + int Skip; ///< buffer skip + int Size; ///< size of payload buffer - uint8_t StartCode; ///< pes packet start code + uint8_t StartCode; ///< pes packet start code - int64_t PTS; ///< presentation time stamp - int64_t DTS; ///< decode time stamp + int64_t PTS; ///< presentation time stamp + int64_t DTS; ///< decode time stamp } PesDemux; /// -/// Reset packetized elementary stream demuxer. +/// Reset packetized elementary stream demuxer. /// static void PesReset(PesDemux * pesdx) { @@ -557,9 +555,9 @@ static void PesReset(PesDemux * pesdx) } /// -/// Initialize a packetized elementary stream demuxer. +/// Initialize a packetized elementary stream demuxer. /// -/// @param pesdx packetized elementary stream demuxer +/// @param pesdx packetized elementary stream demuxer /// static void PesInit(PesDemux * pesdx) { @@ -567,333 +565,323 @@ static void PesInit(PesDemux * pesdx) pesdx->Size = PES_MAX_PAYLOAD; pesdx->Buffer = av_malloc(PES_MAX_PAYLOAD + AV_INPUT_BUFFER_PADDING_SIZE); if (!pesdx->Buffer) { - Fatal(_("pesdemux: out of memory\n")); + Fatal(_("pesdemux: out of memory\n")); } PesReset(pesdx); } /// -/// Parse packetized elementary stream. +/// Parse packetized elementary stream. /// -/// @param pesdx packetized elementary stream demuxer -/// @param data payload data of transport stream -/// @param size number of payload data bytes -/// @param is_start flag, start of pes packet +/// @param pesdx packetized elementary stream demuxer +/// @param data payload data of transport stream +/// @param size number of payload data bytes +/// @param is_start flag, start of pes packet /// -static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, - int is_start) +static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_start) { const uint8_t *p; const uint8_t *q; - if (is_start) { // start of pes packet - if (pesdx->Index && pesdx->Skip) { - // copy remaining bytes down - pesdx->Index -= pesdx->Skip; - memmove(pesdx->Buffer, pesdx->Buffer + pesdx->Skip, pesdx->Index); - pesdx->Skip = 0; - } - pesdx->State = PES_SYNC; - pesdx->HeaderIndex = 0; - pesdx->PTS = AV_NOPTS_VALUE; // reset if not yet used - pesdx->DTS = AV_NOPTS_VALUE; + if (is_start) { // start of pes packet + if (pesdx->Index && pesdx->Skip) { + // copy remaining bytes down + pesdx->Index -= pesdx->Skip; + memmove(pesdx->Buffer, pesdx->Buffer + pesdx->Skip, pesdx->Index); + pesdx->Skip = 0; + } + pesdx->State = PES_SYNC; + pesdx->HeaderIndex = 0; + pesdx->PTS = AV_NOPTS_VALUE; // reset if not yet used + pesdx->DTS = AV_NOPTS_VALUE; } // cleanup, if too much cruft if (pesdx->Skip > PES_MAX_PAYLOAD / 2) { - // copy remaining bytes down - pesdx->Index -= pesdx->Skip; - memmove(pesdx->Buffer, pesdx->Buffer + pesdx->Skip, pesdx->Index); - pesdx->Skip = 0; + // copy remaining bytes down + pesdx->Index -= pesdx->Skip; + memmove(pesdx->Buffer, pesdx->Buffer + pesdx->Skip, pesdx->Index); + pesdx->Skip = 0; } p = data; do { - int n; + int n; - switch (pesdx->State) { - case PES_SKIP: // skip this packet - return; + switch (pesdx->State) { + case PES_SKIP: // skip this packet + return; - case PES_START: // at start of pes packet payload + case PES_START: // at start of pes packet payload #if 0 - // Played with PlayAudio - // FIXME: need 0x80 -- 0xA0 state - if (AudioCodecID == AV_CODEC_ID_NONE) { - if ((*p & 0xF0) == 0x80) { // AC-3 & DTS - Debug(3, "pesdemux: dvd ac-3\n"); - } else if ((*p & 0xFF) == 0xA0) { // LPCM - Debug(3, "pesdemux: dvd lpcm\n"); - pesdx->State = PES_LPCM_HEADER; - pesdx->HeaderIndex = 0; - pesdx->HeaderSize = 7; - // FIXME: need harder LPCM check - //break; - } - } + // Played with PlayAudio + // FIXME: need 0x80 -- 0xA0 state + if (AudioCodecID == AV_CODEC_ID_NONE) { + if ((*p & 0xF0) == 0x80) { // AC-3 & DTS + Debug(3, "pesdemux: dvd ac-3\n"); + } else if ((*p & 0xFF) == 0xA0) { // LPCM + Debug(3, "pesdemux: dvd lpcm\n"); + pesdx->State = PES_LPCM_HEADER; + pesdx->HeaderIndex = 0; + pesdx->HeaderSize = 7; + // FIXME: need harder LPCM check + //break; + } + } #endif - case PES_INIT: // find start of audio packet - // FIXME: increase if needed the buffer + case PES_INIT: // find start of audio packet + // FIXME: increase if needed the buffer - // fill buffer - n = pesdx->Size - pesdx->Index; - if (n > size) { - n = size; - } - memcpy(pesdx->Buffer + pesdx->Index, p, n); - pesdx->Index += n; - p += n; - size -= n; + // fill buffer + n = pesdx->Size - pesdx->Index; + if (n > size) { + n = size; + } + memcpy(pesdx->Buffer + pesdx->Index, p, n); + pesdx->Index += n; + p += n; + size -= n; - q = pesdx->Buffer + pesdx->Skip; - n = pesdx->Index - pesdx->Skip; - while (n >= 5) { - int r; - unsigned codec_id = AV_CODEC_ID_NONE; + q = pesdx->Buffer + pesdx->Skip; + n = pesdx->Index - pesdx->Skip; + while (n >= 5) { + int r; + unsigned codec_id = AV_CODEC_ID_NONE; - // 4 bytes 0xFFExxxxx Mpeg audio - // 5 bytes 0x0B77xxxxxx AC-3 audio - // 6 bytes 0x0B77xxxxxxxx E-AC-3 audio - // 3 bytes 0x56Exxx AAC LATM audio - // 7/9 bytes 0xFFFxxxxxxxxxxx ADTS audio - // PCM audio can't be found - // FIXME: simple+faster detection, if codec already known - r = 0; - if (!r && FastMpegCheck(q)) { - r = MpegCheck(q, n); - codec_id = AV_CODEC_ID_MP2; - } - if (!r && FastAc3Check(q)) { - r = Ac3Check(q, n); - codec_id = AV_CODEC_ID_AC3; - if (r > 0 && q[5] > (10 << 3)) { - codec_id = AV_CODEC_ID_EAC3; - } - } - if (!r && FastLatmCheck(q)) { - r = LatmCheck(q, n); - codec_id = AV_CODEC_ID_AAC_LATM; - } - if (!r && FastAdtsCheck(q)) { - r = AdtsCheck(q, n); - codec_id = AV_CODEC_ID_AAC; - } - if (r < 0) { // need more bytes - break; - } - if (r > 0) { - AVPacket avpkt[1]; + // 4 bytes 0xFFExxxxx Mpeg audio + // 5 bytes 0x0B77xxxxxx AC-3 audio + // 6 bytes 0x0B77xxxxxxxx E-AC-3 audio + // 3 bytes 0x56Exxx AAC LATM audio + // 7/9 bytes 0xFFFxxxxxxxxxxx ADTS audio + // PCM audio can't be found + // FIXME: simple+faster detection, if codec already known + r = 0; + if (!r && FastMpegCheck(q)) { + r = MpegCheck(q, n); + codec_id = AV_CODEC_ID_MP2; + } + if (!r && FastAc3Check(q)) { + r = Ac3Check(q, n); + codec_id = AV_CODEC_ID_AC3; + if (r > 0 && q[5] > (10 << 3)) { + codec_id = AV_CODEC_ID_EAC3; + } + } + if (!r && FastLatmCheck(q)) { + r = LatmCheck(q, n); + codec_id = AV_CODEC_ID_AAC_LATM; + } + if (!r && FastAdtsCheck(q)) { + r = AdtsCheck(q, n); + codec_id = AV_CODEC_ID_AAC; + } + if (r < 0) { // need more bytes + break; + } + if (r > 0) { + AVPacket avpkt[1]; - // new codec id, close and open new - if (AudioCodecID != codec_id) { - Debug(3, "pesdemux: new codec %#06x -> %#06x\n", - AudioCodecID, codec_id); - CodecAudioClose(MyAudioDecoder); - CodecAudioOpen(MyAudioDecoder, codec_id); - AudioCodecID = codec_id; - } - av_init_packet(avpkt); - avpkt->data = (void *)q; - avpkt->size = r; - avpkt->pts = pesdx->PTS; - avpkt->dts = pesdx->DTS; - // FIXME: not aligned for ffmpeg - CodecAudioDecode(MyAudioDecoder, avpkt); - pesdx->PTS = AV_NOPTS_VALUE; - pesdx->DTS = AV_NOPTS_VALUE; - pesdx->Skip += r; - // FIXME: switch to decoder state - //pesdx->State = PES_MPEG_DECODE; - break; - } - if (AudioCodecID != AV_CODEC_ID_NONE) { - // shouldn't happen after we have a vaild codec - // detected - Debug(4, "pesdemux: skip @%d %02x\n", pesdx->Skip,q[0]); - } - // try next byte - ++pesdx->Skip; - ++q; - --n; - } - break; + // new codec id, close and open new + if (AudioCodecID != codec_id) { + Debug(3, "pesdemux: new codec %#06x -> %#06x\n", AudioCodecID, codec_id); + CodecAudioClose(MyAudioDecoder); + CodecAudioOpen(MyAudioDecoder, codec_id); + AudioCodecID = codec_id; + } + av_init_packet(avpkt); + avpkt->data = (void *)q; + avpkt->size = r; + avpkt->pts = pesdx->PTS; + avpkt->dts = pesdx->DTS; + // FIXME: not aligned for ffmpeg + CodecAudioDecode(MyAudioDecoder, avpkt); + pesdx->PTS = AV_NOPTS_VALUE; + pesdx->DTS = AV_NOPTS_VALUE; + pesdx->Skip += r; + // FIXME: switch to decoder state + //pesdx->State = PES_MPEG_DECODE; + break; + } + if (AudioCodecID != AV_CODEC_ID_NONE) { + // shouldn't happen after we have a vaild codec + // detected + Debug(4, "pesdemux: skip @%d %02x\n", pesdx->Skip, q[0]); + } + // try next byte + ++pesdx->Skip; + ++q; + --n; + } + break; - case PES_SYNC: // wait for pes sync - n = PES_START_CODE_SIZE - pesdx->HeaderIndex; - if (n > size) { - n = size; - } - memcpy(pesdx->Header + pesdx->HeaderIndex, p, n); - pesdx->HeaderIndex += n; - p += n; - size -= n; + case PES_SYNC: // wait for pes sync + n = PES_START_CODE_SIZE - pesdx->HeaderIndex; + if (n > size) { + n = size; + } + memcpy(pesdx->Header + pesdx->HeaderIndex, p, n); + pesdx->HeaderIndex += n; + p += n; + size -= n; - // have complete packet start code - if (pesdx->HeaderIndex >= PES_START_CODE_SIZE) { - unsigned code; + // have complete packet start code + if (pesdx->HeaderIndex >= PES_START_CODE_SIZE) { + unsigned code; - // bad mpeg pes packet start code prefix 0x00001xx - if (pesdx->Header[0] || pesdx->Header[1] - || pesdx->Header[2] != 0x01) { - Debug(3, "pesdemux: bad pes packet\n"); - pesdx->State = PES_SKIP; - return; - } - code = pesdx->Header[3]; - if (code != pesdx->StartCode) { - Debug(3, "pesdemux: pes start code id %#02x\n", code); - // FIXME: need to save start code id? - pesdx->StartCode = code; - // we could have already detect a valid stream type - // don't switch to codec 'none' - } + // bad mpeg pes packet start code prefix 0x00001xx + if (pesdx->Header[0] || pesdx->Header[1] + || pesdx->Header[2] != 0x01) { + Debug(3, "pesdemux: bad pes packet\n"); + pesdx->State = PES_SKIP; + return; + } + code = pesdx->Header[3]; + if (code != pesdx->StartCode) { + Debug(3, "pesdemux: pes start code id %#02x\n", code); + // FIXME: need to save start code id? + pesdx->StartCode = code; + // we could have already detect a valid stream type + // don't switch to codec 'none' + } - pesdx->State = PES_HEADER; - pesdx->HeaderSize = PES_HEADER_SIZE; - } - break; + pesdx->State = PES_HEADER; + pesdx->HeaderSize = PES_HEADER_SIZE; + } + break; - case PES_HEADER: // parse PES header - n = pesdx->HeaderSize - pesdx->HeaderIndex; - if (n > size) { - n = size; - } - memcpy(pesdx->Header + pesdx->HeaderIndex, p, n); - pesdx->HeaderIndex += n; - p += n; - size -= n; + case PES_HEADER: // parse PES header + n = pesdx->HeaderSize - pesdx->HeaderIndex; + if (n > size) { + n = size; + } + memcpy(pesdx->Header + pesdx->HeaderIndex, p, n); + pesdx->HeaderIndex += n; + p += n; + size -= n; - // have header upto size bits - if (pesdx->HeaderIndex == PES_HEADER_SIZE) { - if ((pesdx->Header[6] & 0xC0) != 0x80) { - Error(_("pesdemux: mpeg1 pes packet unsupported\n")); - pesdx->State = PES_SKIP; - return; - } - // have pes extension - if (!pesdx->Header[8]) { - goto empty_header; - } - pesdx->HeaderSize += pesdx->Header[8]; - // have complete header - } else if (pesdx->HeaderIndex == pesdx->HeaderSize) { - int64_t pts; - int64_t dts; + // have header upto size bits + if (pesdx->HeaderIndex == PES_HEADER_SIZE) { + if ((pesdx->Header[6] & 0xC0) != 0x80) { + Error(_("pesdemux: mpeg1 pes packet unsupported\n")); + pesdx->State = PES_SKIP; + return; + } + // have pes extension + if (!pesdx->Header[8]) { + goto empty_header; + } + pesdx->HeaderSize += pesdx->Header[8]; + // have complete header + } else if (pesdx->HeaderIndex == pesdx->HeaderSize) { + int64_t pts; + int64_t dts; - if ((pesdx->Header[7] & 0xC0) == 0x80) { - pts = - (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | - (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] - & 0xFE) >> 1; - pesdx->PTS = pts; - pesdx->DTS = AV_NOPTS_VALUE; - } else if ((pesdx->Header[7] & 0xC0) == 0xC0) { - pts = - (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | - (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] - & 0xFE) >> 1; - pesdx->PTS = pts; - dts = - (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 - | (data[16] & 0xFE) << 14 | data[17] << 7 | - (data[18] & 0xFE) >> 1; - pesdx->DTS = dts; - Debug(4,"pesdemux: pts %#012" PRIx64 " %#012" PRIx64 "\n", pts, dts); - } - empty_header: - pesdx->State = PES_INIT; - if (pesdx->StartCode == PES_PRIVATE_STREAM1) { - // only private stream 1, has sub streams - pesdx->State = PES_START; - } - } - break; + if ((pesdx->Header[7] & 0xC0) == 0x80) { + pts = + (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 + | (data[13] + & 0xFE) >> 1; + pesdx->PTS = pts; + pesdx->DTS = AV_NOPTS_VALUE; + } else if ((pesdx->Header[7] & 0xC0) == 0xC0) { + pts = + (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 + | (data[13] + & 0xFE) >> 1; + pesdx->PTS = pts; + dts = + (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | data[17] << + 7 | (data[18] & 0xFE) >> 1; + pesdx->DTS = dts; + Debug(4, "pesdemux: pts %#012" PRIx64 " %#012" PRIx64 "\n", pts, dts); + } + empty_header: + pesdx->State = PES_INIT; + if (pesdx->StartCode == PES_PRIVATE_STREAM1) { + // only private stream 1, has sub streams + pesdx->State = PES_START; + } + } + break; #if 0 - // Played with PlayAudio - case PES_LPCM_HEADER: // lpcm header - n = pesdx->HeaderSize - pesdx->HeaderIndex; - if (n > size) { - n = size; - } - memcpy(pesdx->Header + pesdx->HeaderIndex, p, n); - pesdx->HeaderIndex += n; - p += n; - size -= n; + // Played with PlayAudio + case PES_LPCM_HEADER: // lpcm header + n = pesdx->HeaderSize - pesdx->HeaderIndex; + if (n > size) { + n = size; + } + memcpy(pesdx->Header + pesdx->HeaderIndex, p, n); + pesdx->HeaderIndex += n; + p += n; + size -= n; - if (pesdx->HeaderIndex == pesdx->HeaderSize) { - static int samplerates[] = { 48000, 96000, 44100, 32000 }; - int samplerate; - int channels; - int bits_per_sample; - const uint8_t *q; + if (pesdx->HeaderIndex == pesdx->HeaderSize) { + static int samplerates[] = { 48000, 96000, 44100, 32000 }; + int samplerate; + int channels; + int bits_per_sample; + const uint8_t *q; - if (AudioCodecID != AV_CODEC_ID_PCM_DVD) { + if (AudioCodecID != AV_CODEC_ID_PCM_DVD) { - q = pesdx->Header; - Debug(3, "pesdemux: LPCM %d sr:%d bits:%d chan:%d\n", - q[0], q[5] >> 4, (((q[5] >> 6) & 0x3) + 4) * 4, - (q[5] & 0x7) + 1); - CodecAudioClose(MyAudioDecoder); + q = pesdx->Header; + Debug(3, "pesdemux: LPCM %d sr:%d bits:%d chan:%d\n", q[0], q[5] >> 4, + (((q[5] >> 6) & 0x3) + 4) * 4, (q[5] & 0x7) + 1); + CodecAudioClose(MyAudioDecoder); - bits_per_sample = (((q[5] >> 6) & 0x3) + 4) * 4; - if (bits_per_sample != 16) { - Error(_ - ("softhddev: LPCM %d bits per sample aren't supported\n"), - bits_per_sample); - // FIXME: handle unsupported formats. - } - samplerate = samplerates[q[5] >> 4]; - channels = (q[5] & 0x7) + 1; - AudioSetup(&samplerate, &channels, 0); - if (samplerate != samplerates[q[5] >> 4]) { - Error(_ - ("softhddev: LPCM %d sample-rate is unsupported\n"), - samplerates[q[5] >> 4]); - // FIXME: support resample - } - if (channels != (q[5] & 0x7) + 1) { - Error(_ - ("softhddev: LPCM %d channels are unsupported\n"), - (q[5] & 0x7) + 1); - // FIXME: support resample - } - //CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD); - AudioCodecID = AV_CODEC_ID_PCM_DVD; - } - pesdx->State = PES_LPCM_PAYLOAD; - pesdx->Index = 0; - pesdx->Skip = 0; - } - break; + bits_per_sample = (((q[5] >> 6) & 0x3) + 4) * 4; + if (bits_per_sample != 16) { + Error(_("softhddev: LPCM %d bits per sample aren't supported\n"), bits_per_sample); + // FIXME: handle unsupported formats. + } + samplerate = samplerates[q[5] >> 4]; + channels = (q[5] & 0x7) + 1; + AudioSetup(&samplerate, &channels, 0); + if (samplerate != samplerates[q[5] >> 4]) { + Error(_("softhddev: LPCM %d sample-rate is unsupported\n"), samplerates[q[5] >> 4]); + // FIXME: support resample + } + if (channels != (q[5] & 0x7) + 1) { + Error(_("softhddev: LPCM %d channels are unsupported\n"), (q[5] & 0x7) + 1); + // FIXME: support resample + } + //CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD); + AudioCodecID = AV_CODEC_ID_PCM_DVD; + } + pesdx->State = PES_LPCM_PAYLOAD; + pesdx->Index = 0; + pesdx->Skip = 0; + } + break; - case PES_LPCM_PAYLOAD: // lpcm payload - // fill buffer - n = pesdx->Size - pesdx->Index; - if (n > size) { - n = size; - } - memcpy(pesdx->Buffer + pesdx->Index, p, n); - pesdx->Index += n; - p += n; - size -= n; + case PES_LPCM_PAYLOAD: // lpcm payload + // fill buffer + n = pesdx->Size - pesdx->Index; + if (n > size) { + n = size; + } + memcpy(pesdx->Buffer + pesdx->Index, p, n); + pesdx->Index += n; + p += n; + size -= n; - if (pesdx->PTS != (int64_t) AV_NOPTS_VALUE) { - // FIXME: needs bigger buffer - AudioSetClock(pesdx->PTS); - pesdx->PTS = AV_NOPTS_VALUE; - } - swab(pesdx->Buffer, pesdx->Buffer, pesdx->Index); - AudioEnqueue(pesdx->Buffer, pesdx->Index); - pesdx->Index = 0; - break; + if (pesdx->PTS != (int64_t) AV_NOPTS_VALUE) { + // FIXME: needs bigger buffer + AudioSetClock(pesdx->PTS); + pesdx->PTS = AV_NOPTS_VALUE; + } + swab(pesdx->Buffer, pesdx->Buffer, pesdx->Index); + AudioEnqueue(pesdx->Buffer, pesdx->Index); + pesdx->Index = 0; + break; #endif - } + } } while (size > 0); } ////////////////////////////////////////////////////////////////////////////// -// Transport stream demux +// Transport stream demux ////////////////////////////////////////////////////////////////////////////// /// Transport stream packet size @@ -902,28 +890,28 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, #define TS_PACKET_SYNC 0x47 /// -/// transport stream demuxer typedef. +/// transport stream demuxer typedef. /// typedef struct _ts_demux_ TsDemux; /// -/// transport stream demuxer structure. +/// transport stream demuxer structure. /// struct _ts_demux_ { - int Packets; ///< packets between PCR + int Packets; ///< packets between PCR }; -static PesDemux PesDemuxAudio[1]; ///< audio demuxer +static PesDemux PesDemuxAudio[1]; ///< audio demuxer /// -/// Transport stream demuxer. +/// Transport stream demuxer. /// -/// @param tsdx transport stream demuxer -/// @param data buffer of transport stream packets -/// @param size size of buffer +/// @param tsdx transport stream demuxer +/// @param data buffer of transport stream packets +/// @param size size of buffer /// -/// @returns number of bytes consumed from buffer. +/// @returns number of bytes consumed from buffer. /// static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size) { @@ -932,61 +920,60 @@ static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size) p = data; while (size >= TS_PACKET_SIZE) { #ifdef DEBUG - int pid; + int pid; #endif - int payload; + int payload; - if (p[0] != TS_PACKET_SYNC) { - Error(_("tsdemux: transport stream out of sync\n")); - // FIXME: kill all buffers - return size; - } - ++tsdx->Packets; - if (p[1] & 0x80) { // error indicator - Debug(3, "tsdemux: transport error\n"); - // FIXME: kill all buffers - goto next_packet; - } + if (p[0] != TS_PACKET_SYNC) { + Error(_("tsdemux: transport stream out of sync\n")); + // FIXME: kill all buffers + return size; + } + ++tsdx->Packets; + if (p[1] & 0x80) { // error indicator + Debug(3, "tsdemux: transport error\n"); + // FIXME: kill all buffers + goto next_packet; + } #ifdef DEBUG - pid = (p[1] & 0x1F) << 8 | p[2]; - Debug(4, "tsdemux: PID: %#04x%s%s\n", pid, p[1] & 0x40 ? " start" : "", - p[3] & 0x10 ? " payload" : ""); + pid = (p[1] & 0x1F) << 8 | p[2]; + Debug(4, "tsdemux: PID: %#04x%s%s\n", pid, p[1] & 0x40 ? " start" : "", p[3] & 0x10 ? " payload" : ""); #endif - // skip adaptation field - switch (p[3] & 0x30) { // adaption field - case 0x00: // reserved - case 0x20: // adaptation field only - default: - goto next_packet; - case 0x10: // only payload - payload = 4; - break; - case 0x30: // skip adapation field - payload = 5 + p[4]; - // illegal length, ignore packet - if (payload >= TS_PACKET_SIZE) { - Debug(3, "tsdemux: illegal adaption field length\n"); - goto next_packet; - } - break; - } + // skip adaptation field + switch (p[3] & 0x30) { // adaption field + case 0x00: // reserved + case 0x20: // adaptation field only + default: + goto next_packet; + case 0x10: // only payload + payload = 4; + break; + case 0x30: // skip adapation field + payload = 5 + p[4]; + // illegal length, ignore packet + if (payload >= TS_PACKET_SIZE) { + Debug(3, "tsdemux: illegal adaption field length\n"); + goto next_packet; + } + break; + } - PesParse(PesDemuxAudio, p + payload, TS_PACKET_SIZE - payload, p[1] & 0x40); + PesParse(PesDemuxAudio, p + payload, TS_PACKET_SIZE - payload, p[1] & 0x40); #if 0 - int tmp; + int tmp; - // check continuity - tmp = p[3] & 0x0F; // continuity counter - if (((tsdx->CC + 1) & 0x0F) != tmp) { - Debug(3, "tsdemux: OUT OF SYNC: %d %d\n", tmp, tsdx->CC); - //TS discontinuity (received 8, expected 0) for PID - } - tsdx->CC = tmp; + // check continuity + tmp = p[3] & 0x0F; // continuity counter + if (((tsdx->CC + 1) & 0x0F) != tmp) { + Debug(3, "tsdemux: OUT OF SYNC: %d %d\n", tmp, tsdx->CC); + //TS discontinuity (received 8, expected 0) for PID + } + tsdx->CC = tmp; #endif - next_packet: - p += TS_PACKET_SIZE; - size -= TS_PACKET_SIZE; + next_packet: + p += TS_PACKET_SIZE; + size -= TS_PACKET_SIZE; } return p - data; @@ -1008,36 +995,35 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) // channel switch: SetAudioChannelDevice: SetDigitalAudioDevice: - if (SkipAudio || !MyAudioDecoder) { // skip audio - return size; + if (SkipAudio || !MyAudioDecoder) { // skip audio + return size; } - if (StreamFreezed ) { // stream freezed - return 0; + if (StreamFreezed) { // stream freezed + return 0; + } + if (AudioDelay) { + Debug(3, "AudioDelay %dms\n", AudioDelay); + usleep(AudioDelay / 90); + AudioDelay = 0; + return 0; } - if (AudioDelay) { - Debug(3,"AudioDelay %dms\n",AudioDelay); - usleep(AudioDelay/90); - AudioDelay = 0; - return 0; - } if (NewAudioStream) { - // this clears the audio ringbuffer indirect, open and setup does it - CodecAudioClose(MyAudioDecoder); - AudioFlushBuffers(); - AudioSetBufferTime(ConfigAudioBufferTime); - AudioCodecID = AV_CODEC_ID_NONE; - AudioChannelID = -1; - NewAudioStream = 0; + // this clears the audio ringbuffer indirect, open and setup does it + CodecAudioClose(MyAudioDecoder); + AudioFlushBuffers(); + AudioSetBufferTime(ConfigAudioBufferTime); + AudioCodecID = AV_CODEC_ID_NONE; + AudioChannelID = -1; + NewAudioStream = 0; } // hard limit buffer full: don't overrun audio buffers on replay if (AudioFreeBytes() < AUDIO_MIN_BUFFER_FREE) { - return 0; + return 0; } #ifdef USE_SOFTLIMIT // soft limit buffer full - if (AudioSyncStream && VideoGetBuffers(AudioSyncStream) > 3 - && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) { - return 0; + if (AudioSyncStream && VideoGetBuffers(AudioSyncStream) > 3 && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) { + return 0; } #endif // PES header 0x00 0x00 0x01 ID @@ -1045,108 +1031,102 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) // must be a PES start code if (size < 9 || !data || data[0] || data[1] || data[2] != 0x01) { - Error(_("[softhddev] invalid PES audio packet\n")); - return size; + Error(_("[softhddev] invalid PES audio packet\n")); + return size; } - n = data[8]; // header size + n = data[8]; // header size - if (size < 9 + n + 4) { // wrong size - if (size == 9 + n) { - Warning(_("[softhddev] empty audio packet\n")); - } else { - Error(_("[softhddev] invalid audio packet %d bytes\n"), size); - } - return size; + if (size < 9 + n + 4) { // wrong size + if (size == 9 + n) { + Warning(_("[softhddev] empty audio packet\n")); + } else { + Error(_("[softhddev] invalid audio packet %d bytes\n"), size); + } + return size; } if (data[7] & 0x80 && n >= 5) { - AudioAvPkt->pts = - (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & - 0xFE) << 14 | data[12] << 7 | (data[13] & 0xFE) >> 1; - //Debug(3, "audio: pts %#012" PRIx64 "\n", AudioAvPkt->pts); + AudioAvPkt->pts = + (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] & + 0xFE) >> 1; + //Debug(3, "audio: pts %#012" PRIx64 "\n", AudioAvPkt->pts); } - if (0) { // dts is unused - if (data[7] & 0x40) { - AudioAvPkt->dts = - (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] - & 0xFE) << 14 | data[17] << 7 | (data[18] & 0xFE) >> 1; - Debug(3, "audio: dts %#012" PRIx64 "\n", AudioAvPkt->dts); - } + if (0) { // dts is unused + if (data[7] & 0x40) { + AudioAvPkt->dts = (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] + & 0xFE) << 14 | data[17] << 7 | (data[18] & 0xFE) >> 1; + Debug(3, "audio: dts %#012" PRIx64 "\n", AudioAvPkt->dts); + } } p = data + 9 + n; - n = size - 9 - n; // skip pes header + n = size - 9 - n; // skip pes header if (n + AudioAvPkt->stream_index > AudioAvPkt->size) { - Fatal(_("[softhddev] audio buffer too small\n")); - AudioAvPkt->stream_index = 0; + Fatal(_("[softhddev] audio buffer too small\n")); + AudioAvPkt->stream_index = 0; } - if (AudioChannelID != id) { // id changed audio track changed - AudioChannelID = id; - AudioCodecID = AV_CODEC_ID_NONE; - Debug(3, "audio/demux: new channel id\n"); + if (AudioChannelID != id) { // id changed audio track changed + AudioChannelID = id; + AudioCodecID = AV_CODEC_ID_NONE; + Debug(3, "audio/demux: new channel id\n"); } // Private stream + LPCM ID if ((id & 0xF0) == 0xA0) { - if (n < 7) { - Error(_("[softhddev] invalid LPCM audio packet %d bytes\n"), size); - return size; - } - if (AudioCodecID != AV_CODEC_ID_PCM_DVD) { - static int samplerates[] = { 48000, 96000, 44100, 32000 }; - int samplerate; - int channels; - int bits_per_sample; + if (n < 7) { + Error(_("[softhddev] invalid LPCM audio packet %d bytes\n"), size); + return size; + } + if (AudioCodecID != AV_CODEC_ID_PCM_DVD) { + static int samplerates[] = { 48000, 96000, 44100, 32000 }; + int samplerate; + int channels; + int bits_per_sample; - Debug(3, "[softhddev]%s: LPCM %d sr:%d bits:%d chan:%d\n", - __FUNCTION__, id, p[5] >> 4, (((p[5] >> 6) & 0x3) + 4) * 4, - (p[5] & 0x7) + 1); - CodecAudioClose(MyAudioDecoder); + Debug(3, "[softhddev]%s: LPCM %d sr:%d bits:%d chan:%d\n", __FUNCTION__, id, p[5] >> 4, + (((p[5] >> 6) & 0x3) + 4) * 4, (p[5] & 0x7) + 1); + CodecAudioClose(MyAudioDecoder); - bits_per_sample = (((p[5] >> 6) & 0x3) + 4) * 4; - if (bits_per_sample != 16) { - Error(_ - ("[softhddev] LPCM %d bits per sample aren't supported\n"), - bits_per_sample); - // FIXME: handle unsupported formats. - } - samplerate = samplerates[p[5] >> 4]; - channels = (p[5] & 0x7) + 1; + bits_per_sample = (((p[5] >> 6) & 0x3) + 4) * 4; + if (bits_per_sample != 16) { + Error(_("[softhddev] LPCM %d bits per sample aren't supported\n"), bits_per_sample); + // FIXME: handle unsupported formats. + } + samplerate = samplerates[p[5] >> 4]; + channels = (p[5] & 0x7) + 1; - // FIXME: ConfigAudioBufferTime + x - AudioSetBufferTime(400); - AudioSetup(&samplerate, &channels, 0); - if (samplerate != samplerates[p[5] >> 4]) { - Error(_("[softhddev] LPCM %d sample-rate is unsupported\n"), - samplerates[p[5] >> 4]); - // FIXME: support resample - } - if (channels != (p[5] & 0x7) + 1) { - Error(_("[softhddev] LPCM %d channels are unsupported\n"), - (p[5] & 0x7) + 1); - // FIXME: support resample - } - //CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD); - AudioCodecID = AV_CODEC_ID_PCM_DVD; - } + // FIXME: ConfigAudioBufferTime + x + AudioSetBufferTime(400); + AudioSetup(&samplerate, &channels, 0); + if (samplerate != samplerates[p[5] >> 4]) { + Error(_("[softhddev] LPCM %d sample-rate is unsupported\n"), samplerates[p[5] >> 4]); + // FIXME: support resample + } + if (channels != (p[5] & 0x7) + 1) { + Error(_("[softhddev] LPCM %d channels are unsupported\n"), (p[5] & 0x7) + 1); + // FIXME: support resample + } + //CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD); + AudioCodecID = AV_CODEC_ID_PCM_DVD; + } - if (AudioAvPkt->pts != (int64_t) AV_NOPTS_VALUE) { - AudioSetClock(AudioAvPkt->pts); - AudioAvPkt->pts = AV_NOPTS_VALUE; - } - swab(p + 7, AudioAvPkt->data, n - 7); - AudioEnqueue(AudioAvPkt->data, n - 7); + if (AudioAvPkt->pts != (int64_t) AV_NOPTS_VALUE) { + AudioSetClock(AudioAvPkt->pts); + AudioAvPkt->pts = AV_NOPTS_VALUE; + } + swab(p + 7, AudioAvPkt->data, n - 7); + AudioEnqueue(AudioAvPkt->data, n - 7); - return size; + return size; } // DVD track header if ((id & 0xF0) == 0x80 && (p[0] & 0xF0) == 0x80) { - p += 4; - n -= 4; // skip track header - if (AudioCodecID == AV_CODEC_ID_NONE) { - // FIXME: ConfigAudioBufferTime + x - AudioSetBufferTime(400); - } + p += 4; + n -= 4; // skip track header + if (AudioCodecID == AV_CODEC_ID_NONE) { + // FIXME: ConfigAudioBufferTime + x + AudioSetBufferTime(400); + } } // append new packet, to partial old data memcpy(AudioAvPkt->data + AudioAvPkt->stream_index, p, n); @@ -1155,73 +1135,73 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) n = AudioAvPkt->stream_index; p = AudioAvPkt->data; while (n >= 5) { - int r; - unsigned codec_id; + int r; + unsigned codec_id; - // 4 bytes 0xFFExxxxx Mpeg audio - // 3 bytes 0x56Exxx AAC LATM audio - // 5 bytes 0x0B77xxxxxx AC-3 audio - // 6 bytes 0x0B77xxxxxxxx E-AC-3 audio - // 7/9 bytes 0xFFFxxxxxxxxxxx ADTS audio - // PCM audio can't be found - r = 0; - codec_id = AV_CODEC_ID_NONE; // keep compiler happy - if (id != 0xbd && FastMpegCheck(p)) { - r = MpegCheck(p, n); - codec_id = AV_CODEC_ID_MP2; - } - if (id != 0xbd && !r && FastLatmCheck(p)) { - r = LatmCheck(p, n); - codec_id = AV_CODEC_ID_AAC_LATM; - } - if ((id == 0xbd || (id & 0xF0) == 0x80) && !r && FastAc3Check(p)) { - r = Ac3Check(p, n); - codec_id = AV_CODEC_ID_AC3; - if (r > 0 && p[5] > (10 << 3)) { - codec_id = AV_CODEC_ID_EAC3; - } - /* faster ac3 detection at end of pes packet (no improvemnts) - if (AudioCodecID == codec_id && -r - 2 == n) { - r = n; - } - */ - } - if (id != 0xbd && !r && FastAdtsCheck(p)) { - r = AdtsCheck(p, n); - codec_id = AV_CODEC_ID_AAC; - } - if (r < 0) { // need more bytes - break; - } - if (r > 0) { - AVPacket avpkt[1]; + // 4 bytes 0xFFExxxxx Mpeg audio + // 3 bytes 0x56Exxx AAC LATM audio + // 5 bytes 0x0B77xxxxxx AC-3 audio + // 6 bytes 0x0B77xxxxxxxx E-AC-3 audio + // 7/9 bytes 0xFFFxxxxxxxxxxx ADTS audio + // PCM audio can't be found + r = 0; + codec_id = AV_CODEC_ID_NONE; // keep compiler happy + if (id != 0xbd && FastMpegCheck(p)) { + r = MpegCheck(p, n); + codec_id = AV_CODEC_ID_MP2; + } + if (id != 0xbd && !r && FastLatmCheck(p)) { + r = LatmCheck(p, n); + codec_id = AV_CODEC_ID_AAC_LATM; + } + if ((id == 0xbd || (id & 0xF0) == 0x80) && !r && FastAc3Check(p)) { + r = Ac3Check(p, n); + codec_id = AV_CODEC_ID_AC3; + if (r > 0 && p[5] > (10 << 3)) { + codec_id = AV_CODEC_ID_EAC3; + } + /* faster ac3 detection at end of pes packet (no improvemnts) + if (AudioCodecID == codec_id && -r - 2 == n) { + r = n; + } + */ + } + if (id != 0xbd && !r && FastAdtsCheck(p)) { + r = AdtsCheck(p, n); + codec_id = AV_CODEC_ID_AAC; + } + if (r < 0) { // need more bytes + break; + } + if (r > 0) { + AVPacket avpkt[1]; - // new codec id, close and open new - if (AudioCodecID != codec_id) { - CodecAudioClose(MyAudioDecoder); - CodecAudioOpen(MyAudioDecoder, codec_id); - AudioCodecID = codec_id; - } - av_init_packet(avpkt); - avpkt->data = (void *)p; - avpkt->size = r; - avpkt->pts = AudioAvPkt->pts; - avpkt->dts = AudioAvPkt->dts; - // FIXME: not aligned for ffmpeg - CodecAudioDecode(MyAudioDecoder, avpkt); - AudioAvPkt->pts = AV_NOPTS_VALUE; - AudioAvPkt->dts = AV_NOPTS_VALUE; - p += r; - n -= r; - continue; - } - ++p; - --n; + // new codec id, close and open new + if (AudioCodecID != codec_id) { + CodecAudioClose(MyAudioDecoder); + CodecAudioOpen(MyAudioDecoder, codec_id); + AudioCodecID = codec_id; + } + av_init_packet(avpkt); + avpkt->data = (void *)p; + avpkt->size = r; + avpkt->pts = AudioAvPkt->pts; + avpkt->dts = AudioAvPkt->dts; + // FIXME: not aligned for ffmpeg + CodecAudioDecode(MyAudioDecoder, avpkt); + AudioAvPkt->pts = AV_NOPTS_VALUE; + AudioAvPkt->dts = AV_NOPTS_VALUE; + p += r; + n -= r; + continue; + } + ++p; + --n; } // copy remaining bytes to start of packet if (n) { - memmove(AudioAvPkt->data, p, n); + memmove(AudioAvPkt->data, p, n); } AudioAvPkt->stream_index = n; @@ -1244,42 +1224,41 @@ int PlayTsAudio(const uint8_t * data, int size) { static TsDemux tsdx[1]; - if (SkipAudio || !MyAudioDecoder) { // skip audio - return size; + if (SkipAudio || !MyAudioDecoder) { // skip audio + return size; } - if (StreamFreezed) { // stream freezed - return 0; + if (StreamFreezed) { // stream freezed + return 0; } if (NewAudioStream) { - // this clears the audio ringbuffer indirect, open and setup does it - CodecAudioClose(MyAudioDecoder); - AudioFlushBuffers(); - // max time between audio packets 200ms + 24ms hw buffer - AudioSetBufferTime(ConfigAudioBufferTime); - AudioCodecID = AV_CODEC_ID_NONE; - AudioChannelID = -1; - NewAudioStream = 0; - PesReset(PesDemuxAudio); + // this clears the audio ringbuffer indirect, open and setup does it + CodecAudioClose(MyAudioDecoder); + AudioFlushBuffers(); + // max time between audio packets 200ms + 24ms hw buffer + AudioSetBufferTime(ConfigAudioBufferTime); + AudioCodecID = AV_CODEC_ID_NONE; + AudioChannelID = -1; + NewAudioStream = 0; + PesReset(PesDemuxAudio); } // hard limit buffer full: don't overrun audio buffers on replay if (AudioFreeBytes() < AUDIO_MIN_BUFFER_FREE) { - return 0; + return 0; } #ifdef USE_SOFTLIMIT // soft limit buffer full - if (AudioSyncStream && VideoGetBuffers(AudioSyncStream) > 3 - && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) { - return 0; + if (AudioSyncStream && VideoGetBuffers(AudioSyncStream) > 3 && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) { + return 0; } #endif - if (AudioDelay) { - Debug(3,"AudioDelay %dms\n",AudioDelay); - usleep(AudioDelay*1000); - AudioDelay = 0; -// TsDemuxer(tsdx, data, size); // insert dummy audio - - } + if (AudioDelay) { + Debug(3, "AudioDelay %dms\n", AudioDelay); + usleep(AudioDelay * 1000); + AudioDelay = 0; +// TsDemuxer(tsdx, data, size); // insert dummy audio + + } return TsDemuxer(tsdx, data, size); } @@ -1305,62 +1284,62 @@ void ResetChannelId(void) } ////////////////////////////////////////////////////////////////////////////// -// Video +// Video ////////////////////////////////////////////////////////////////////////////// -#define VIDEO_BUFFER_SIZE (1024 * 1024) ///< video PES buffer default size 512 * 1024 -#define VIDEO_PACKET_MAX 256 ///< max number of video packets 192 +#define VIDEO_BUFFER_SIZE (1024 * 1024) ///< video PES buffer default size 512 * 1024 +#define VIDEO_PACKET_MAX 256 ///< max number of video packets 192 /** ** Video output stream device structure. Parser, decoder, display. */ struct __video_stream__ { - VideoHwDecoder *HwDecoder; ///< video hardware decoder - VideoDecoder *Decoder; ///< video decoder - pthread_mutex_t DecoderLockMutex; ///< video decoder lock mutex + VideoHwDecoder *HwDecoder; ///< video hardware decoder + VideoDecoder *Decoder; ///< video decoder + pthread_mutex_t DecoderLockMutex; ///< video decoder lock mutex - enum AVCodecID CodecID; ///< current codec id - enum AVCodecID LastCodecID; ///< last codec id + enum AVCodecID CodecID; ///< current codec id + enum AVCodecID LastCodecID; ///< last codec id - volatile char NewStream; ///< flag new video stream - volatile char ClosingStream; ///< flag closing video stream - volatile char SkipStream; ///< skip video stream - volatile char Freezed; ///< stream freezed + volatile char NewStream; ///< flag new video stream + volatile char ClosingStream; ///< flag closing video stream + volatile char SkipStream; ///< skip video stream + volatile char Freezed; ///< stream freezed - volatile char TrickSpeed; ///< current trick speed - volatile char Close; ///< command close video stream - volatile char ClearBuffers; ///< command clear video buffers - volatile char ClearClose; ///< clear video buffers for close + volatile char TrickSpeed; ///< current trick speed + volatile char Close; ///< command close video stream + volatile char ClearBuffers; ///< command clear video buffers + volatile char ClearClose; ///< clear video buffers for close - int InvalidPesCounter; ///< counter of invalid PES packets + int InvalidPesCounter; ///< counter of invalid PES packets - enum AVCodecID CodecIDRb[VIDEO_PACKET_MAX]; ///< codec ids in ring buffer - AVPacket PacketRb[VIDEO_PACKET_MAX]; ///< PES packet ring buffer - int StartCodeState; ///< last three bytes start code state + enum AVCodecID CodecIDRb[VIDEO_PACKET_MAX]; ///< codec ids in ring buffer + AVPacket PacketRb[VIDEO_PACKET_MAX]; ///< PES packet ring buffer + int StartCodeState; ///< last three bytes start code state - int PacketWrite; ///< ring buffer write pointer - int PacketRead; ///< ring buffer read pointer - atomic_t PacketsFilled; ///< how many of the ring buffer is used + int PacketWrite; ///< ring buffer write pointer + int PacketRead; ///< ring buffer read pointer + atomic_t PacketsFilled; ///< how many of the ring buffer is used }; -static VideoStream MyVideoStream[1]; ///< normal video stream +static VideoStream MyVideoStream[1]; ///< normal video stream #ifdef USE_PIP -static VideoStream PipVideoStream[1]; ///< pip video stream +static VideoStream PipVideoStream[1]; ///< pip video stream #endif #ifdef DEBUG -uint32_t VideoSwitch; ///< debug video switch ticks -static int VideoMaxPacketSize; ///< biggest used packet buffer +uint32_t VideoSwitch; ///< debug video switch ticks +static int VideoMaxPacketSize; ///< biggest used packet buffer #endif //#define STILL_DEBUG 2 #ifdef STILL_DEBUG -static char InStillPicture; ///< flag still picture +static char InStillPicture; ///< flag still picture #endif -const char *X11DisplayName; ///< x11 display name -static volatile char Usr1Signal; ///< true got usr1 signal +const char *X11DisplayName; ///< x11 display name +static volatile char Usr1Signal; ///< true got usr1 signal ////////////////////////////////////////////////////////////////////////////// @@ -1374,13 +1353,13 @@ static void VideoPacketInit(VideoStream * stream) int i; for (i = 0; i < VIDEO_PACKET_MAX; ++i) { - AVPacket *avpkt; + AVPacket *avpkt; - avpkt = &stream->PacketRb[i]; - // build a clean ffmpeg av packet - if (av_new_packet(avpkt, VIDEO_BUFFER_SIZE)) { - Fatal(_("[softhddev] out of memory\n")); - } + avpkt = &stream->PacketRb[i]; + // build a clean ffmpeg av packet + if (av_new_packet(avpkt, VIDEO_BUFFER_SIZE)) { + Fatal(_("[softhddev] out of memory\n")); + } } atomic_set(&stream->PacketsFilled, 0); @@ -1399,7 +1378,7 @@ static void VideoPacketExit(VideoStream * stream) atomic_set(&stream->PacketsFilled, 0); for (i = 0; i < VIDEO_PACKET_MAX; ++i) { - av_packet_unref(&stream->PacketRb[i]); + av_packet_unref(&stream->PacketRb[i]); } } @@ -1411,34 +1390,33 @@ static void VideoPacketExit(VideoStream * stream) ** @param data data of pes packet ** @param size size of pes packet */ -static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const void *data, - int size) +static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const void *data, int size) { AVPacket *avpkt; - // Debug(3, "video: enqueue %d\n", size); + // Debug(3, "video: enqueue %d\n", size); avpkt = &stream->PacketRb[stream->PacketWrite]; - if (!avpkt->stream_index) { // add pts only for first added - avpkt->pts = pts; - avpkt->dts = dts; + if (!avpkt->stream_index) { // add pts only for first added + avpkt->pts = pts; + avpkt->dts = dts; } if (avpkt->stream_index + size >= avpkt->size) { -// Warning(_("video: packet buffer too small for %d\n"), -// avpkt->stream_index + size); +// Warning(_("video: packet buffer too small for %d\n"), +// avpkt->stream_index + size); - // new + grow reserves FF_INPUT_BUFFER_PADDING_SIZE - av_grow_packet(avpkt, ((size + VIDEO_BUFFER_SIZE / 2) - / (VIDEO_BUFFER_SIZE / 2)) * (VIDEO_BUFFER_SIZE / 2)); - // FIXME: out of memory! + // new + grow reserves FF_INPUT_BUFFER_PADDING_SIZE + av_grow_packet(avpkt, ((size + VIDEO_BUFFER_SIZE / 2) + / (VIDEO_BUFFER_SIZE / 2)) * (VIDEO_BUFFER_SIZE / 2)); + // FIXME: out of memory! #ifdef DEBUG - if (avpkt->size <= avpkt->stream_index + size) { - fprintf(stderr, "%d %d %d\n", avpkt->size, avpkt->stream_index, size); - fflush(stderr); - abort(); - } + if (avpkt->size <= avpkt->stream_index + size) { + fprintf(stderr, "%d %d %d\n", avpkt->size, avpkt->stream_index, size); + fflush(stderr); + abort(); + } #endif } @@ -1446,8 +1424,8 @@ static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const v avpkt->stream_index += size; #ifdef DEBUG if (avpkt->stream_index > VideoMaxPacketSize) { - VideoMaxPacketSize = avpkt->stream_index; - Debug(4, "video: max used PES packet size: %d\n", VideoMaxPacketSize); + VideoMaxPacketSize = avpkt->stream_index; + Debug(4, "video: max used PES packet size: %d\n", VideoMaxPacketSize); } #endif } @@ -1461,7 +1439,7 @@ static void VideoResetPacket(VideoStream * stream) { AVPacket *avpkt; - stream->StartCodeState = 0; // reset start code state + stream->StartCodeState = 0; // reset start code state stream->CodecIDRb[stream->PacketWrite] = AV_CODEC_ID_NONE; avpkt = &stream->PacketRb[stream->PacketWrite]; @@ -1481,21 +1459,21 @@ static void VideoNextPacket(VideoStream * stream, int codec_id) AVPacket *avpkt; avpkt = &stream->PacketRb[stream->PacketWrite]; - if (!avpkt->stream_index) { // ignore empty packets - if (codec_id != AV_CODEC_ID_NONE) { - return; - } - Debug(3, "video: possible stream change loss\n"); + if (!avpkt->stream_index) { // ignore empty packets + if (codec_id != AV_CODEC_ID_NONE) { + return; + } + Debug(3, "video: possible stream change loss\n"); } if (atomic_read(&stream->PacketsFilled) >= VIDEO_PACKET_MAX - 1) { - // no free slot available drop last packet - Error(_("video: no empty slot in packet ringbuffer\n")); - avpkt->stream_index = 0; - if (codec_id == AV_CODEC_ID_NONE) { - Debug(3, "video: possible stream change loss\n"); - } - return; + // no free slot available drop last packet + Error(_("video: no empty slot in packet ringbuffer\n")); + avpkt->stream_index = 0; + if (codec_id == AV_CODEC_ID_NONE) { + Debug(3, "video: possible stream change loss\n"); + } + return; } // clear area for decoder, always enough space allocated memset(avpkt->data + avpkt->stream_index, 0, AV_INPUT_BUFFER_PADDING_SIZE); @@ -1528,14 +1506,13 @@ static void VideoNextPacket(VideoStream * stream, int codec_id) ** @param data data of pes packet ** @param size size of pes packet */ -static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, - const uint8_t * data, int size) +static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const uint8_t * data, int size) { static const char startcode[3] = { 0x00, 0x00, 0x01 }; const uint8_t *p; int n; int first; - + // first scan first = !stream->PacketRb[stream->PacketWrite].stream_index; p = data; @@ -1543,129 +1520,129 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, #ifdef DEBUG if (n < 4) { - // is a problem with the pes start code detection - Error(_("[softhddev] too short PES video packet\n")); - fprintf(stderr, "[softhddev] too short PES video packet\n"); + // is a problem with the pes start code detection + Error(_("[softhddev] too short PES video packet\n")); + fprintf(stderr, "[softhddev] too short PES video packet\n"); } #endif - switch (stream->StartCodeState) { // prefix starting in last packet - case 3: // 0x00 0x00 0x01 seen + switch (stream->StartCodeState) { // prefix starting in last packet + case 3: // 0x00 0x00 0x01 seen #ifdef DEBUG - fprintf(stderr, "last: %d\n", stream->StartCodeState); + fprintf(stderr, "last: %d\n", stream->StartCodeState); #endif - if (!p[0] || p[0] == 0xb3) { + if (!p[0] || p[0] == 0xb3) { #ifdef DEBUG - printf("last: %d start aspect %02x\n", stream->StartCodeState,p[4]); + printf("last: %d start aspect %02x\n", stream->StartCodeState, p[4]); #endif - stream->PacketRb[stream->PacketWrite].stream_index -= 3; - VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); - VideoEnqueue(stream, pts, dts, startcode, 3); - first = p[0] == 0xb3; - p++; - n--; - pts = AV_NOPTS_VALUE; - } + stream->PacketRb[stream->PacketWrite].stream_index -= 3; + VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); + VideoEnqueue(stream, pts, dts, startcode, 3); + first = p[0] == 0xb3; + p++; + n--; + pts = AV_NOPTS_VALUE; + } - break; - case 2: // 0x00 0x00 seen + break; + case 2: // 0x00 0x00 seen #ifdef DEBUG - fprintf(stderr, "last: %d\n", stream->StartCodeState); + fprintf(stderr, "last: %d\n", stream->StartCodeState); #endif - if (p[0] == 0x01 && (!p[1] || p[1] == 0xb3)) { + if (p[0] == 0x01 && (!p[1] || p[1] == 0xb3)) { #ifdef DEBUG - printf( "last: %d start aspect %02x\n", stream->StartCodeState,p[5]); + printf("last: %d start aspect %02x\n", stream->StartCodeState, p[5]); #endif - stream->PacketRb[stream->PacketWrite].stream_index -= 2; - VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); - VideoEnqueue(stream, pts, dts, startcode, 2); - first = p[1] == 0xb3; - p += 2; - n -= 2; - pts = AV_NOPTS_VALUE; - } - break; - case 1: // 0x00 seen + stream->PacketRb[stream->PacketWrite].stream_index -= 2; + VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); + VideoEnqueue(stream, pts, dts, startcode, 2); + first = p[1] == 0xb3; + p += 2; + n -= 2; + pts = AV_NOPTS_VALUE; + } + break; + case 1: // 0x00 seen #ifdef DEBUG - fprintf(stderr, "last: %d\n", stream->StartCodeState); + fprintf(stderr, "last: %d\n", stream->StartCodeState); #endif - if (!p[0] && p[1] == 0x01 && (!p[2] || p[2] == 0xb3)) { + if (!p[0] && p[1] == 0x01 && (!p[2] || p[2] == 0xb3)) { #ifdef DEBUG - printf( "last: %d start aspect %02x\n", stream->StartCodeState,p[6]); + printf("last: %d start aspect %02x\n", stream->StartCodeState, p[6]); #endif - stream->PacketRb[stream->PacketWrite].stream_index -= 1; - VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); - VideoEnqueue(stream, pts, dts, startcode, 1); - first = p[2] == 0xb3; - p += 3; - n -= 3; - pts = AV_NOPTS_VALUE; - } - case 0: - break; + stream->PacketRb[stream->PacketWrite].stream_index -= 1; + VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); + VideoEnqueue(stream, pts, dts, startcode, 1); + first = p[2] == 0xb3; + p += 3; + n -= 3; + pts = AV_NOPTS_VALUE; + } + case 0: + break; } // b3 b4 b8 00 b5 ... 00 b5 ... while (n > 3) { - if (0 && !p[0] && !p[1] && p[2] == 0x01) { - fprintf(stderr, " %02x", p[3]); - } - // scan for picture header 0x00000100 - // FIXME: not perfect, must split at 0xb3 also - if (!p[0] && !p[1] && p[2] == 0x01 && !p[3]) { - if (first) { - first = 0; - n -= 4; - p += 4; - continue; - } - // packet has already an picture header - /* - fprintf(stderr, "\nfix:%9d,%02x%02x%02x %02x ", n, - p[0], p[1], p[2], p[3]); - */ - // first packet goes only upto picture header - VideoEnqueue(stream, pts, dts, data, p - data); - VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); + if (0 && !p[0] && !p[1] && p[2] == 0x01) { + fprintf(stderr, " %02x", p[3]); + } + // scan for picture header 0x00000100 + // FIXME: not perfect, must split at 0xb3 also + if (!p[0] && !p[1] && p[2] == 0x01 && !p[3]) { + if (first) { + first = 0; + n -= 4; + p += 4; + continue; + } + // packet has already an picture header + /* + fprintf(stderr, "\nfix:%9d,%02x%02x%02x %02x ", n, + p[0], p[1], p[2], p[3]); + */ + // first packet goes only upto picture header + VideoEnqueue(stream, pts, dts, data, p - data); + VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); #ifdef DEBUG - fprintf(stderr, "fix\r"); + fprintf(stderr, "fix\r"); #endif - data = p; - size = n; + data = p; + size = n; - // time-stamp only valid for first packet - pts = AV_NOPTS_VALUE; - n -= 4; - p += 4; - continue; - } - if (!p[0] && !p[1] && p[2] == 0x01 && p[3] == 0xb3) { -// printf("aspectratio %02x\n",p[7]>>4); - } - --n; - ++p; + // time-stamp only valid for first packet + pts = AV_NOPTS_VALUE; + n -= 4; + p += 4; + continue; + } + if (!p[0] && !p[1] && p[2] == 0x01 && p[3] == 0xb3) { +// printf("aspectratio %02x\n",p[7]>>4); + } + --n; + ++p; } stream->StartCodeState = 0; - switch (n) { // handle packet border start code - case 3: - if (!p[0] && !p[1] && p[2] == 0x01) { - stream->StartCodeState = 3; - } - break; - case 2: - if (!p[0] && !p[1]) { - stream->StartCodeState = 2; - } - break; - case 1: - if (!p[0]) { - stream->StartCodeState = 1; - } - break; - case 0: - break; + switch (n) { // handle packet border start code + case 3: + if (!p[0] && !p[1] && p[2] == 0x01) { + stream->StartCodeState = 3; + } + break; + case 2: + if (!p[0] && !p[1]) { + stream->StartCodeState = 2; + } + break; + case 1: + if (!p[0]) { + stream->StartCodeState = 1; + } + break; + case 0: + break; } VideoEnqueue(stream, pts, dts, data, size); } @@ -1703,54 +1680,53 @@ static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt) first = 1; #if STILL_DEBUG>1 if (InStillPicture) { - fprintf(stderr, "fix(%d): ", n); + fprintf(stderr, "fix(%d): ", n); } #endif while (n > 3) { #if STILL_DEBUG>1 - if (InStillPicture && !p[0] && !p[1] && p[2] == 0x01) { - fprintf(stderr, " %02x", p[3]); - } + if (InStillPicture && !p[0] && !p[1] && p[2] == 0x01) { + fprintf(stderr, " %02x", p[3]); + } #endif - // scan for picture header 0x00000100 - if (!p[0] && !p[1] && p[2] == 0x01 && !p[3]) { - if (first) { - first = 0; - n -= 4; - p += 4; - continue; - } - // packet has already an picture header - tmp->size = p - tmp->data; + // scan for picture header 0x00000100 + if (!p[0] && !p[1] && p[2] == 0x01 && !p[3]) { + if (first) { + first = 0; + n -= 4; + p += 4; + continue; + } + // packet has already an picture header + tmp->size = p - tmp->data; #if STILL_DEBUG>1 - if (InStillPicture) { - fprintf(stderr, "\nfix:%9d,%02x %02x %02x %02x\n", tmp->size, - tmp->data[0], tmp->data[1], tmp->data[2], tmp->data[3]); - } + if (InStillPicture) { + fprintf(stderr, "\nfix:%9d,%02x %02x %02x %02x\n", tmp->size, tmp->data[0], tmp->data[1], tmp->data[2], + tmp->data[3]); + } #endif - CodecVideoDecode(vdecoder, tmp); - // time-stamp only valid for first packet - tmp->pts = AV_NOPTS_VALUE; - tmp->dts = AV_NOPTS_VALUE; - tmp->data = p; - tmp->size = n; - } - --n; - ++p; + CodecVideoDecode(vdecoder, tmp); + // time-stamp only valid for first packet + tmp->pts = AV_NOPTS_VALUE; + tmp->dts = AV_NOPTS_VALUE; + tmp->data = p; + tmp->size = n; + } + --n; + ++p; } #if STILL_DEBUG>1 if (InStillPicture) { - fprintf(stderr, "\nfix:%9d.%02x %02x %02x %02x\n", tmp->size, - tmp->data[0], tmp->data[1], tmp->data[2], tmp->data[3]); + fprintf(stderr, "\nfix:%9d.%02x %02x %02x %02x\n", tmp->size, tmp->data[0], tmp->data[1], tmp->data[2], + tmp->data[3]); } #endif CodecVideoDecode(vdecoder, tmp); } #endif - /** ** Open video stream. ** @@ -1762,9 +1738,9 @@ static void VideoStreamOpen(VideoStream * stream) stream->CodecID = AV_CODEC_ID_NONE; stream->LastCodecID = AV_CODEC_ID_NONE; if ((stream->HwDecoder = VideoNewHwDecoder(stream))) { - stream->Decoder = CodecVideoNewDecoder(stream->HwDecoder); - VideoPacketInit(stream); - stream->SkipStream = 0; + stream->Decoder = CodecVideoNewDecoder(stream->HwDecoder); + VideoPacketInit(stream); + stream->SkipStream = 0; } } @@ -1781,22 +1757,23 @@ static void VideoStreamClose(VideoStream * stream, int delhw) { stream->SkipStream = 1; if (stream->Decoder) { - VideoDecoder *decoder; -Debug(3,"VideoStreamClose"); - decoder = stream->Decoder; - // FIXME: remove this lock for main stream close - pthread_mutex_lock(&stream->DecoderLockMutex); - stream->Decoder = NULL; // lock read thread - pthread_mutex_unlock(&stream->DecoderLockMutex); - CodecVideoClose(decoder); - CodecVideoDelDecoder(decoder); + VideoDecoder *decoder; + + Debug(3, "VideoStreamClose"); + decoder = stream->Decoder; + // FIXME: remove this lock for main stream close + pthread_mutex_lock(&stream->DecoderLockMutex); + stream->Decoder = NULL; // lock read thread + pthread_mutex_unlock(&stream->DecoderLockMutex); + CodecVideoClose(decoder); + CodecVideoDelDecoder(decoder); } if (stream->HwDecoder) { - if (delhw) { - VideoDelHwDecoder(stream->HwDecoder); - } - stream->HwDecoder = NULL; - // FIXME: CodecVideoClose calls/uses hw decoder + if (delhw) { + VideoDelHwDecoder(stream->HwDecoder); + } + stream->HwDecoder = NULL; + // FIXME: CodecVideoClose calls/uses hw decoder } VideoPacketExit(stream); @@ -1816,32 +1793,32 @@ Debug(3,"VideoStreamClose"); */ int VideoPollInput(VideoStream * stream) { - if (!stream->Decoder) { // closing + if (!stream->Decoder) { // closing #ifdef DEBUG - fprintf(stderr, "no decoder\n"); + fprintf(stderr, "no decoder\n"); #endif - return -1; + return -1; } - if (stream->Close) { // close stream request - VideoStreamClose(stream, 1); - stream->Close = 0; - return 1; + if (stream->Close) { // close stream request + VideoStreamClose(stream, 1); + stream->Close = 0; + return 1; } - if (stream->ClearBuffers) { // clear buffer request - atomic_set(&stream->PacketsFilled, 0); - stream->PacketRead = stream->PacketWrite; - // FIXME: ->Decoder already checked - Debug(3,"Clear buffer request in Poll\n"); - if (stream->Decoder) { - CodecVideoFlushBuffers(stream->Decoder); - VideoResetStart(stream->HwDecoder); - } - stream->ClearBuffers = 0; - return 1; + if (stream->ClearBuffers) { // clear buffer request + atomic_set(&stream->PacketsFilled, 0); + stream->PacketRead = stream->PacketWrite; + // FIXME: ->Decoder already checked + Debug(3, "Clear buffer request in Poll\n"); + if (stream->Decoder) { + CodecVideoFlushBuffers(stream->Decoder); + VideoResetStart(stream->HwDecoder); + } + stream->ClearBuffers = 0; + return 1; } if (!atomic_read(&stream->PacketsFilled)) { - return -1; + return -1; } return 1; } @@ -1861,102 +1838,101 @@ int VideoDecodeInput(VideoStream * stream) AVPacket *avpkt; int saved_size; - if (!stream->Decoder) { // closing + if (!stream->Decoder) { // closing #ifdef DEBUG - fprintf(stderr, "no decoder\n"); + fprintf(stderr, "no decoder\n"); #endif - return -1; + return -1; } - if (stream->Close) { // close stream request - VideoStreamClose(stream, 1); - stream->Close = 0; - return 1; + if (stream->Close) { // close stream request + VideoStreamClose(stream, 1); + stream->Close = 0; + return 1; } - if (stream->ClearBuffers) { // clear buffer request - atomic_set(&stream->PacketsFilled, 0); - stream->PacketRead = stream->PacketWrite; - // FIXME: ->Decoder already checked - if (stream->Decoder) { - CodecVideoFlushBuffers(stream->Decoder); - Debug(3,"Clear buffer request in Decode\n"); - VideoResetStart(stream->HwDecoder); - } - stream->ClearBuffers = 0; - return 1; + if (stream->ClearBuffers) { // clear buffer request + atomic_set(&stream->PacketsFilled, 0); + stream->PacketRead = stream->PacketWrite; + // FIXME: ->Decoder already checked + if (stream->Decoder) { + CodecVideoFlushBuffers(stream->Decoder); + Debug(3, "Clear buffer request in Decode\n"); + VideoResetStart(stream->HwDecoder); + } + stream->ClearBuffers = 0; + return 1; } - if (stream->Freezed) { // stream freezed - // clear is called during freezed - return 1; + if (stream->Freezed) { // stream freezed + // clear is called during freezed + return 1; } filled = atomic_read(&stream->PacketsFilled); -// printf("Packets in Decode %d\n",filled); +// printf("Packets in Decode %d\n",filled); if (!filled) { - return -1; + return -1; } #if 0 // clearing for normal channel switch has no advantage - if (stream->ClearClose || stream->ClosingStream ) { - int f; + if (stream->ClearClose || stream->ClosingStream) { + int f; - // FIXME: during replay all packets are always checked + // FIXME: during replay all packets are always checked - // flush buffers, if close is in the queue - for (f = 0; f < filled; ++f) { - if (stream->CodecIDRb[(stream->PacketRead + f) % VIDEO_PACKET_MAX] == AV_CODEC_ID_NONE) { - if (f) { - Debug(3, "video: cleared upto close\n"); - atomic_sub(f, &stream->PacketsFilled); - stream->PacketRead = - (stream->PacketRead + f) % VIDEO_PACKET_MAX; - stream->ClearClose = 0; - } - break; - } - } - stream->ClosingStream = 0; + // flush buffers, if close is in the queue + for (f = 0; f < filled; ++f) { + if (stream->CodecIDRb[(stream->PacketRead + f) % VIDEO_PACKET_MAX] == AV_CODEC_ID_NONE) { + if (f) { + Debug(3, "video: cleared upto close\n"); + atomic_sub(f, &stream->PacketsFilled); + stream->PacketRead = (stream->PacketRead + f) % VIDEO_PACKET_MAX; + stream->ClearClose = 0; + } + break; + } + } + stream->ClosingStream = 0; } #endif // - // handle queued commands + // handle queued commands // avpkt = &stream->PacketRb[stream->PacketRead]; switch (stream->CodecIDRb[stream->PacketRead]) { - case AV_CODEC_ID_NONE: - stream->ClosingStream = 0; - if (stream->LastCodecID != AV_CODEC_ID_NONE) { - Debug(3,"in VideoDecode make close\n"); - stream->LastCodecID = AV_CODEC_ID_NONE; - CodecVideoClose(stream->Decoder); - // FIXME: CodecVideoClose calls/uses hw decoder - goto skip; - } - // FIXME: look if more close are in the queue - // size can be zero - goto skip; - case AV_CODEC_ID_MPEG2VIDEO: - if (stream->LastCodecID != AV_CODEC_ID_MPEG2VIDEO) { - stream->LastCodecID = AV_CODEC_ID_MPEG2VIDEO; - CodecVideoOpen(stream->Decoder, AV_CODEC_ID_MPEG2VIDEO); - } - break; - case AV_CODEC_ID_H264: - if (stream->LastCodecID != AV_CODEC_ID_H264) { - Debug(3,"CodecVideoOpen h264\n"); - stream->LastCodecID = AV_CODEC_ID_H264; - CodecVideoOpen(stream->Decoder, AV_CODEC_ID_H264); - } - break; - case AV_CODEC_ID_HEVC: - if (stream->LastCodecID != AV_CODEC_ID_HEVC) { - stream->LastCodecID = AV_CODEC_ID_HEVC; - CodecVideoOpen(stream->Decoder, AV_CODEC_ID_HEVC); - } - break; - default: - break; + case AV_CODEC_ID_NONE: + stream->ClosingStream = 0; + if (stream->LastCodecID != AV_CODEC_ID_NONE) { + Debug(3, "in VideoDecode make close\n"); + stream->LastCodecID = AV_CODEC_ID_NONE; + CodecVideoClose(stream->Decoder); + // FIXME: CodecVideoClose calls/uses hw decoder + goto skip; + } + // FIXME: look if more close are in the queue + // size can be zero + goto skip; + case AV_CODEC_ID_MPEG2VIDEO: + if (stream->LastCodecID != AV_CODEC_ID_MPEG2VIDEO) { + stream->LastCodecID = AV_CODEC_ID_MPEG2VIDEO; + CodecVideoOpen(stream->Decoder, AV_CODEC_ID_MPEG2VIDEO); + } + break; + case AV_CODEC_ID_H264: + if (stream->LastCodecID != AV_CODEC_ID_H264) { + Debug(3, "CodecVideoOpen h264\n"); + stream->LastCodecID = AV_CODEC_ID_H264; + CodecVideoOpen(stream->Decoder, AV_CODEC_ID_H264); + } + break; + case AV_CODEC_ID_HEVC: + if (stream->LastCodecID != AV_CODEC_ID_HEVC) { + stream->LastCodecID = AV_CODEC_ID_HEVC; + CodecVideoOpen(stream->Decoder, AV_CODEC_ID_HEVC); + } + break; + default: + break; } // avcodec_decode_video2 needs size @@ -1969,22 +1945,22 @@ int VideoDecodeInput(VideoStream * stream) //DumpMpeg(avpkt->data, avpkt->size); #ifdef STILL_DEBUG if (InStillPicture) { - DumpMpeg(avpkt->data, avpkt->size); + DumpMpeg(avpkt->data, avpkt->size); } #endif // lock decoder against close pthread_mutex_lock(&stream->DecoderLockMutex); if (stream->Decoder) { - CodecVideoDecode(stream->Decoder, avpkt); - } + CodecVideoDecode(stream->Decoder, avpkt); + } pthread_mutex_unlock(&stream->DecoderLockMutex); //fprintf(stderr, "]\n"); #else // old version if (stream->LastCodecID == AV_CODEC_ID_MPEG2VIDEO) { - FixPacketForFFMpeg(stream->Decoder, avpkt); + FixPacketForFFMpeg(stream->Decoder, avpkt); } else { - CodecVideoDecode(stream->Decoder, avpkt); + CodecVideoDecode(stream->Decoder, avpkt); } #endif @@ -2018,13 +1994,13 @@ static void StartVideo(void) VideoInit(X11DisplayName); if (ConfigFullscreen) { - // FIXME: not good looking, mapped and then resized. - VideoSetFullscreen(1); + // FIXME: not good looking, mapped and then resized. + VideoSetFullscreen(1); } VideoOsdInit(); if (!MyVideoStream->Decoder) { - VideoStreamOpen(MyVideoStream); - AudioSyncStream = MyVideoStream; + VideoStreamOpen(MyVideoStream); + AudioSyncStream = MyVideoStream; } } @@ -2042,20 +2018,20 @@ static void StopVideo(void) #else MyVideoStream->SkipStream = 1; if (MyVideoStream->Decoder) { - VideoDecoder *decoder; + VideoDecoder *decoder; - decoder = MyVideoStream->Decoder; - pthread_mutex_lock(&MyVideoStream->DecoderLockMutex); - MyVideoStream->Decoder = NULL; // lock read thread - pthread_mutex_unlock(&MyVideoStream->DecoderLockMutex); - // FIXME: this can crash, hw decoder released by video exit - Debug(3,"in Stop Video"); - CodecVideoClose(decoder); - CodecVideoDelDecoder(decoder); + decoder = MyVideoStream->Decoder; + pthread_mutex_lock(&MyVideoStream->DecoderLockMutex); + MyVideoStream->Decoder = NULL; // lock read thread + pthread_mutex_unlock(&MyVideoStream->DecoderLockMutex); + // FIXME: this can crash, hw decoder released by video exit + Debug(3, "in Stop Video"); + CodecVideoClose(decoder); + CodecVideoDelDecoder(decoder); } if (MyVideoStream->HwDecoder) { - // done by exit: VideoDelHwDecoder(MyVideoStream->HwDecoder); - MyVideoStream->HwDecoder = NULL; + // done by exit: VideoDelHwDecoder(MyVideoStream->HwDecoder); + MyVideoStream->HwDecoder = NULL; } VideoPacketExit(MyVideoStream); @@ -2078,14 +2054,14 @@ static void DumpMpeg(const uint8_t * data, int size) // b3 b4 b8 00 b5 ... 00 b5 ... while (size > 3) { - if (!data[0] && !data[1] && data[2] == 0x01) { - fprintf(stderr, " %02x", data[3]); - size -= 4; - data += 4; - continue; - } - --size; - ++data; + if (!data[0] && !data[1] && data[2] == 0x01) { + fprintf(stderr, " %02x", data[3]); + size -= 4; + data += 4; + continue; + } + --size; + ++data; } fprintf(stderr, "\n"); } @@ -2099,15 +2075,15 @@ static int DumpH264(const uint8_t * data, int size) { printf("H264:"); do { - if (size < 4) { - printf("\n"); - return -1; - } - if (!data[0] && !data[1] && data[2] == 0x01) { - printf("%02x ", data[3]); - } - ++data; - --size; + if (size < 4) { + printf("\n"); + return -1; + } + if (!data[0] && !data[1] && data[2] == 0x01) { + printf("%02x ", data[3]); + } + ++data; + --size; } while (size); printf("\n"); @@ -2124,26 +2100,25 @@ static int ValidateMpeg(const uint8_t * data, int size) int pes_l; do { - if (size < 9) { - return -1; - } - if (data[0] || data[1] || data[2] != 0x01) { - printf("%02x: %02x %02x %02x %02x %02x\n", data[-1], data[0], - data[1], data[2], data[3], data[4]); - return -1; - } + if (size < 9) { + return -1; + } + if (data[0] || data[1] || data[2] != 0x01) { + printf("%02x: %02x %02x %02x %02x %02x\n", data[-1], data[0], data[1], data[2], data[3], data[4]); + return -1; + } - pes_l = (data[4] << 8) | data[5]; - if (!pes_l) { // contains unknown length - return 1; - } + pes_l = (data[4] << 8) | data[5]; + if (!pes_l) { // contains unknown length + return 1; + } - if (6 + pes_l > size) { - return -1; - } + if (6 + pes_l > size) { + return -1; + } - data += 6 + pes_l; - size -= 6 + pes_l; + data += 6 + pes_l; + size -= 6 + pes_l; } while (size); return 0; @@ -2163,199 +2138,200 @@ static int ValidateMpeg(const uint8_t * data, int size) int PlayVideo3(VideoStream * stream, const uint8_t * data, int size) { const uint8_t *check; - int64_t pts,dts; + int64_t pts, dts; int n; int z; int l; - if (!stream->Decoder) { // no x11 video started - return size; + + if (!stream->Decoder) { // no x11 video started + return size; } - if (stream->SkipStream) { // skip video stream - return size; + if (stream->SkipStream) { // skip video stream + return size; } - if (stream->Freezed) { // stream freezed - return 0; + if (stream->Freezed) { // stream freezed + return 0; } - if (stream->NewStream) { // channel switched - Debug(3, "video: new stream %dms\n", GetMsTicks() - VideoSwitch); - if (atomic_read(&stream->PacketsFilled) >= VIDEO_PACKET_MAX - 1) { - Debug(3, "video: new video stream lost\n"); - return 0; - } - VideoNextPacket(stream, AV_CODEC_ID_NONE); - stream->CodecID = AV_CODEC_ID_NONE; - stream->ClosingStream = 1; - stream->NewStream = 0; + if (stream->NewStream) { // channel switched + Debug(3, "video: new stream %dms\n", GetMsTicks() - VideoSwitch); + if (atomic_read(&stream->PacketsFilled) >= VIDEO_PACKET_MAX - 1) { + Debug(3, "video: new video stream lost\n"); + return 0; + } + VideoNextPacket(stream, AV_CODEC_ID_NONE); + stream->CodecID = AV_CODEC_ID_NONE; + stream->ClosingStream = 1; + stream->NewStream = 0; } // must be a PES start code // FIXME: Valgrind-3.8.1 has a problem with this code if (size < 9 || !data || data[0] || data[1] || data[2] != 0x01) { - if (!stream->InvalidPesCounter++) { - Error(_("[softhddev] invalid PES video packet\n")); - } - return size; + if (!stream->InvalidPesCounter++) { + Error(_("[softhddev] invalid PES video packet\n")); + } + return size; } if (stream->InvalidPesCounter) { - if (stream->InvalidPesCounter > 1) { - Error(_("[softhddev] %d invalid PES video packet(s)\n"), - stream->InvalidPesCounter); - } - stream->InvalidPesCounter = 0; + if (stream->InvalidPesCounter > 1) { + Error(_("[softhddev] %d invalid PES video packet(s)\n"), stream->InvalidPesCounter); + } + stream->InvalidPesCounter = 0; } // 0xBE, filler, padding stream - if (data[3] == PES_PADDING_STREAM) { // from DVD plugin - return size; + if (data[3] == PES_PADDING_STREAM) { // from DVD plugin + return size; } - n = data[8]; // header size - if (size <= 9 + n) { // wrong size - if (size == 9 + n) { - Warning(_("[softhddev] empty video packet\n")); - } else { - Error(_("[softhddev] invalid video packet %d/%d bytes\n"), 9 + n, size); - } - return size; + n = data[8]; // header size + if (size <= 9 + n) { // wrong size + if (size == 9 + n) { + Warning(_("[softhddev] empty video packet\n")); + } else { + Error(_("[softhddev] invalid video packet %d/%d bytes\n"), 9 + n, size); + } + return size; } // hard limit buffer full: needed for replay if (atomic_read(&stream->PacketsFilled) >= VIDEO_PACKET_MAX - 10) { -// Debug(3, "video: video buffer full\n"); - return 0; +// Debug(3, "video: video buffer full\n"); + return 0; } #ifdef USE_SOFTLIMIT // soft limit buffer full - if (AudioSyncStream == stream && atomic_read(&stream->PacketsFilled) > 3 && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) { - return 0; + if (AudioSyncStream == stream && atomic_read(&stream->PacketsFilled) > 3 + && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) { + return 0; } #endif // get pts/dts pts = AV_NOPTS_VALUE; - dts = AV_NOPTS_VALUE; + dts = AV_NOPTS_VALUE; if ((data[7] & 0xc0) == 0x80) { - pts = (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & - 0xFE) << 14 | data[12] << 7 | (data[13] & 0xFE) >> 1; + pts = + (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] & + 0xFE) >> 1; } - if ((data[7] & 0xC0) == 0xc0) { - pts = (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & - 0xFE) << 14 | data[12] << 7 | (data[13] & 0xFE) >> 1; - dts = (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & - 0xFE) << 14 | data[17] << 7 | (data[18] & 0xFE) >> 1; + if ((data[7] & 0xC0) == 0xc0) { + pts = + (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] & + 0xFE) >> 1; + dts = + (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | data[17] << 7 | (data[18] & + 0xFE) >> 1; } check = data + 9 + n; l = size - 9 - n; z = 0; - while (!*check) { // count leading zeros - if (l < 3) { -// Warning(_("[softhddev] empty video packet %d bytes\n"), size); - z = 0; - break; - } - --l; - ++check; - ++z; + while (!*check) { // count leading zeros + if (l < 3) { +// Warning(_("[softhddev] empty video packet %d bytes\n"), size); + z = 0; + break; + } + --l; + ++check; + ++z; } // H264 NAL AUD Access Unit Delimiter (0x00) 0x00 0x00 0x01 0x09 // and next start code if ((data[6] & 0xC0) == 0x80 && z >= 2 && check[0] == 0x01 && check[1] == 0x09 && !check[3] && !check[4]) { - // old PES HDTV recording z == 2 -> stronger check! - if (stream->CodecID == AV_CODEC_ID_H264) { + // old PES HDTV recording z == 2 -> stronger check! + if (stream->CodecID == AV_CODEC_ID_H264) { #ifdef DUMP_TRICKSPEED - if (stream->TrickSpeed) { - char buf[1024]; - int fd; - static int FrameCounter; + if (stream->TrickSpeed) { + char buf[1024]; + int fd; + static int FrameCounter; - snprintf(buf, sizeof(buf), "frame_%06d_%08d.raw", getpid(), - FrameCounter++); - if ((fd = - open(buf, O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC, - 0666)) >= 0) { - if (write(fd, data + 9 + n, size - 9 - n)) { - // this construct is to remove the annoying warning - } - close(fd); - } - } + snprintf(buf, sizeof(buf), "frame_%06d_%08d.raw", getpid(), FrameCounter++); + if ((fd = open(buf, O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC, 0666)) >= 0) { + if (write(fd, data + 9 + n, size - 9 - n)) { + // this construct is to remove the annoying warning + } + close(fd); + } + } #endif #ifdef H264_EOS_TRICKSPEED - // this should improve ffwd+frew, but produce crash in ffmpeg - // with some streams - if (stream->TrickSpeed && pts != (int64_t) AV_NOPTS_VALUE) { - // H264 NAL End of Sequence - static uint8_t seq_end_h264[] = { 0x00, 0x00, 0x00, 0x01, 0x0A }; + // this should improve ffwd+frew, but produce crash in ffmpeg + // with some streams + if (stream->TrickSpeed && pts != (int64_t) AV_NOPTS_VALUE) { + // H264 NAL End of Sequence + static uint8_t seq_end_h264[] = { 0x00, 0x00, 0x00, 0x01, 0x0A }; - // 1-5=SLICE 6=SEI 7=SPS 8=PPS - // NAL SPS sequence parameter set - if ((check[7] & 0x1F) == 0x07) { - VideoNextPacket(stream, AV_CODEC_ID_H264); - VideoEnqueue(stream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, seq_end_h264, sizeof(seq_end_h264)); - } - } + // 1-5=SLICE 6=SEI 7=SPS 8=PPS + // NAL SPS sequence parameter set + if ((check[7] & 0x1F) == 0x07) { + VideoNextPacket(stream, AV_CODEC_ID_H264); + VideoEnqueue(stream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, seq_end_h264, sizeof(seq_end_h264)); + } + } #endif - VideoNextPacket(stream, AV_CODEC_ID_H264); - } else { - Debug(3, "video: h264 detected\n"); - stream->CodecID = AV_CODEC_ID_H264; - } - // SKIP PES header (ffmpeg supports short start code) - VideoEnqueue(stream, pts, dts, check - 2, l + 2); - return size; + VideoNextPacket(stream, AV_CODEC_ID_H264); + } else { + Debug(3, "video: h264 detected\n"); + stream->CodecID = AV_CODEC_ID_H264; + } + // SKIP PES header (ffmpeg supports short start code) + VideoEnqueue(stream, pts, dts, check - 2, l + 2); + return size; } - // HEVC Codec + // HEVC Codec if ((data[6] & 0xC0) == 0x80 && z >= 2 && check[0] == 0x01 && check[1] == 0x46) { - // old PES HDTV recording z == 2 -> stronger check! - if (stream->CodecID == AV_CODEC_ID_HEVC) { - VideoNextPacket(stream, AV_CODEC_ID_HEVC); - } else { - Debug(3, "video: hvec detected\n"); - stream->CodecID = AV_CODEC_ID_HEVC; - } - // SKIP PES header (ffmpeg supports short start code) - VideoEnqueue(stream, pts, dts, check - 2, l + 2); - return size; + // old PES HDTV recording z == 2 -> stronger check! + if (stream->CodecID == AV_CODEC_ID_HEVC) { + VideoNextPacket(stream, AV_CODEC_ID_HEVC); + } else { + Debug(3, "video: hvec detected\n"); + stream->CodecID = AV_CODEC_ID_HEVC; + } + // SKIP PES header (ffmpeg supports short start code) + VideoEnqueue(stream, pts, dts, check - 2, l + 2); + return size; } // PES start code 0x00 0x00 0x01 0x00|0xb3 if (z > 1 && check[0] == 0x01 && (!check[1] || check[1] == 0xb3)) { - if (stream->CodecID == AV_CODEC_ID_MPEG2VIDEO) { - VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); - } else { - Debug(3, "video: mpeg2 detected ID %02x\n", check[3]); - stream->CodecID = AV_CODEC_ID_MPEG2VIDEO; - } + if (stream->CodecID == AV_CODEC_ID_MPEG2VIDEO) { + VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); + } else { + Debug(3, "video: mpeg2 detected ID %02x\n", check[3]); + stream->CodecID = AV_CODEC_ID_MPEG2VIDEO; + } - // SKIP PES header, begin of start code + // SKIP PES header, begin of start code #ifdef USE_PIP - VideoMpegEnqueue(stream, pts, dts, check - 2, l + 2); + VideoMpegEnqueue(stream, pts, dts, check - 2, l + 2); #else - VideoEnqueue(stream, pts, dts, check - 2, l + 2); + VideoEnqueue(stream, pts, dts, check - 2, l + 2); #endif - return size; + return size; } // this happens when vdr sends incomplete packets if (stream->CodecID == AV_CODEC_ID_NONE) { - Debug(3, "video: not detected\n"); - return size; + Debug(3, "video: not detected\n"); + return size; } #ifdef USE_PIP if (stream->CodecID == AV_CODEC_ID_MPEG2VIDEO) { - // SKIP PES header - VideoMpegEnqueue(stream, pts, dts, data + 9 + n, size - 9 - n); + // SKIP PES header + VideoMpegEnqueue(stream, pts, dts, data + 9 + n, size - 9 - n); #ifndef USE_MPEG_COMPLETE - if (size < 65526) { - // mpeg codec supports incomplete packets - // waiting for a full complete packages, increases needed delays - // PES recordings sends incomplete packets - // incomplete packets breaks the decoder for some stations - // for the new USE_PIP code, this is only a very little improvement - VideoNextPacket(stream, stream->CodecID); - } + if (size < 65526) { + // mpeg codec supports incomplete packets + // waiting for a full complete packages, increases needed delays + // PES recordings sends incomplete packets + // incomplete packets breaks the decoder for some stations + // for the new USE_PIP code, this is only a very little improvement + VideoNextPacket(stream, stream->CodecID); + } #endif } else { - // SKIP PES header - VideoEnqueue(stream, pts, dts, data + 9 + n, size - 9 - n); + // SKIP PES header + VideoEnqueue(stream, pts, dts, data + 9 + n, size - 9 - n); } #else @@ -2366,9 +2342,9 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size) // packet < 65526 is the last split packet, detect it here for // better latency if (size < 65526 && stream->CodecID == AV_CODEC_ID_MPEG2VIDEO) { - // mpeg codec supports incomplete packets - // waiting for a full complete packages, increases needed delays - VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); + // mpeg codec supports incomplete packets + // waiting for a full complete packages, increases needed delays + VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); } #endif @@ -2413,8 +2389,7 @@ extern uint8_t *CreateJpeg(uint8_t *, int *, int, int, int); ** ** @returns allocated jpeg image. */ -uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, - int width, int height) +uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, int width, int height) { struct jpeg_compress_struct cinfo; struct jpeg_error_mgr jerr; @@ -2440,8 +2415,8 @@ uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, row_stride = width * 3; while (cinfo.next_scanline < cinfo.image_height) { - row_ptr[0] = &image[cinfo.next_scanline * row_stride]; - jpeg_write_scanlines(&cinfo, row_ptr, 1); + row_ptr[0] = &image[cinfo.next_scanline * row_stride]; + jpeg_write_scanlines(&cinfo, row_ptr, 1); } jpeg_finish_compress(&cinfo); @@ -2465,20 +2440,20 @@ uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, uint8_t *GrabImage(int *size, int jpeg, int quality, int width, int height) { if (jpeg) { - uint8_t *image; - int raw_size; + uint8_t *image; + int raw_size; - raw_size = 0; - image = VideoGrab(&raw_size, &width, &height, 0); - if (image) { // can fail, suspended, ... - uint8_t *jpg_image; + raw_size = 0; + image = VideoGrab(&raw_size, &width, &height, 0); + if (image) { // can fail, suspended, ... + uint8_t *jpg_image; - jpg_image = CreateJpeg(image, size, quality, width, height); + jpg_image = CreateJpeg(image, size, quality, width, height); - free(image); - return jpg_image; - } - return NULL; + free(image); + return jpg_image; + } + return NULL; } return VideoGrab(size, &width, &height, 1); } @@ -2492,48 +2467,48 @@ uint8_t *GrabImage(int *size, int jpeg, int quality, int width, int height) */ int SetPlayMode(int play_mode) { - Debug(3,"Set Playmode %d\n",play_mode); + Debug(3, "Set Playmode %d\n", play_mode); switch (play_mode) { - case 0: // audio/video from decoder - // tell video parser we get new stream - if (MyVideoStream->Decoder && !MyVideoStream->SkipStream) { - // clear buffers on close configured always or replay only - if (ConfigVideoClearOnSwitch || MyVideoStream->ClearClose) { - Clear(); // flush all buffers - MyVideoStream->ClearClose = 0; - } - if (MyVideoStream->CodecID != AV_CODEC_ID_NONE) { - MyVideoStream->NewStream = 1; - MyVideoStream->InvalidPesCounter = 0; - // tell hw decoder we are closing stream - VideoSetClosing(MyVideoStream->HwDecoder); - VideoResetStart(MyVideoStream->HwDecoder); + case 0: // audio/video from decoder + // tell video parser we get new stream + if (MyVideoStream->Decoder && !MyVideoStream->SkipStream) { + // clear buffers on close configured always or replay only + if (ConfigVideoClearOnSwitch || MyVideoStream->ClearClose) { + Clear(); // flush all buffers + MyVideoStream->ClearClose = 0; + } + if (MyVideoStream->CodecID != AV_CODEC_ID_NONE) { + MyVideoStream->NewStream = 1; + MyVideoStream->InvalidPesCounter = 0; + // tell hw decoder we are closing stream + VideoSetClosing(MyVideoStream->HwDecoder); + VideoResetStart(MyVideoStream->HwDecoder); #ifdef DEBUG - VideoSwitch = GetMsTicks(); - Debug(3, "video: new stream start\n"); + VideoSwitch = GetMsTicks(); + Debug(3, "video: new stream start\n"); #endif - } - } - if (MyAudioDecoder) { // tell audio parser we have new stream - if (AudioCodecID != AV_CODEC_ID_NONE) { - NewAudioStream = 1; - } - } - break; - case 1: // audio/video from player - VideoDisplayWakeup(); - Play(); - break; - case 2: // audio only from player, video from decoder - case 3: // audio only from player, no video (black screen) - Debug(3, "softhddev: FIXME: audio only, silence video errors\n"); - VideoDisplayWakeup(); - Play(); - break; - case 4: // video only from player, audio from decoder - VideoDisplayWakeup(); - Play(); - break; + } + } + if (MyAudioDecoder) { // tell audio parser we have new stream + if (AudioCodecID != AV_CODEC_ID_NONE) { + NewAudioStream = 1; + } + } + break; + case 1: // audio/video from player + VideoDisplayWakeup(); + Play(); + break; + case 2: // audio only from player, video from decoder + case 3: // audio only from player, no video (black screen) + Debug(3, "softhddev: FIXME: audio only, silence video errors\n"); + VideoDisplayWakeup(); + Play(); + break; + case 4: // video only from player, audio from decoder + VideoDisplayWakeup(); + Play(); + break; } return 1; } @@ -2545,7 +2520,7 @@ int SetPlayMode(int play_mode) int64_t GetSTC(void) { if (MyVideoStream->HwDecoder) { - return VideoGetClock(MyVideoStream->HwDecoder); + return VideoGetClock(MyVideoStream->HwDecoder); } // could happen during dettached Warning(_("softhddev: %s called without hw decoder\n"), __FUNCTION__); @@ -2569,21 +2544,19 @@ void GetVideoSize(int *width, int *height, double *aspect) int aspect_den; if (MyVideoStream->HwDecoder) { - VideoGetVideoSize(MyVideoStream->HwDecoder, width, height, &aspect_num, - &aspect_den); - *aspect = (double)aspect_num / (double)aspect_den; + VideoGetVideoSize(MyVideoStream->HwDecoder, width, height, &aspect_num, &aspect_den); + *aspect = (double)aspect_num / (double)aspect_den; } else { - *width = 0; - *height = 0; - *aspect = 1.0; // like default cDevice::GetVideoSize + *width = 0; + *height = 0; + *aspect = 1.0; // like default cDevice::GetVideoSize } #ifdef DEBUG if (done_width != *width || done_height != *height) { - Debug(3, "[softhddev]%s: %dx%d %g\n", __FUNCTION__, *width, *height, - *aspect); - done_width = *width; - done_height = *height; + Debug(3, "[softhddev]%s: %dx%d %g\n", __FUNCTION__, *width, *height, *aspect); + done_width = *width; + done_height = *height; } #endif } @@ -2600,10 +2573,10 @@ void TrickSpeed(int speed) { MyVideoStream->TrickSpeed = speed; if (MyVideoStream->HwDecoder) { - VideoSetTrickSpeed(MyVideoStream->HwDecoder, speed); + VideoSetTrickSpeed(MyVideoStream->HwDecoder, speed); } else { - // can happen, during startup - Debug(3, "softhddev: %s called without hw decoder\n", __FUNCTION__); + // can happen, during startup + Debug(3, "softhddev: %s called without hw decoder\n", __FUNCTION__); } StreamFreezed = 0; MyVideoStream->Freezed = 0; @@ -2616,21 +2589,20 @@ void Clear(void) { int i; - VideoResetPacket(MyVideoStream); // terminate work + VideoResetPacket(MyVideoStream); // terminate work MyVideoStream->ClearBuffers = 1; if (!SkipAudio) { - AudioFlushBuffers(); - //NewAudioStream = 1; + AudioFlushBuffers(); + //NewAudioStream = 1; } // FIXME: audio avcodec_flush_buffers, video is done by VideoClearBuffers // wait for empty buffers // FIXME: without softstart sync VideoDecode isn't called. for (i = 0; MyVideoStream->ClearBuffers && i < 20; ++i) { - usleep(1 * 100); + usleep(1 * 100); } - Debug(3, "[softhddev]%s: %dms buffers %d\n", __FUNCTION__, i, - VideoGetBuffers(MyVideoStream)); + Debug(3, "[softhddev]%s: %dms buffers %d\n", __FUNCTION__, i, VideoGetBuffers(MyVideoStream)); } /** @@ -2638,7 +2610,7 @@ void Clear(void) */ void Play(void) { - TrickSpeed(0); // normal play + TrickSpeed(0); // normal play SkipAudio = 0; AudioPlay(); } @@ -2680,12 +2652,12 @@ void StillPicture(const uint8_t * data, int size) // might be called in Suspended Mode if (!MyVideoStream->Decoder || MyVideoStream->SkipStream) { - return; + return; } // must be a PES start code if (size < 9 || !data || data[0] || data[1] || data[2] != 0x01) { - Error(_("[softhddev] invalid still video packet\n")); - return; + Error(_("[softhddev] invalid still video packet\n")); + return; } #ifdef STILL_DEBUG InStillPicture = 1; @@ -2693,83 +2665,82 @@ void StillPicture(const uint8_t * data, int size) VideoSetTrickSpeed(MyVideoStream->HwDecoder, 1); VideoResetPacket(MyVideoStream); - VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream - + VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream if (MyVideoStream->CodecID == AV_CODEC_ID_NONE) { - // FIXME: should detect codec, see PlayVideo - Error(_("[softhddev] no codec known for still picture\n")); + // FIXME: should detect codec, see PlayVideo + Error(_("[softhddev] no codec known for still picture\n")); } // FIXME: can check video backend, if a frame was produced. // output for max reference frames #ifdef STILL_DEBUG fprintf(stderr, "still-picture\n"); #endif - for (i = 0; i < (MyVideoStream->CodecID == AV_CODEC_ID_HEVC ? 12 : 12); ++i) { - const uint8_t *split; - int n; + for (i = 0; i < (MyVideoStream->CodecID == AV_CODEC_ID_HEVC ? 12 : 12); ++i) { + const uint8_t *split; + int n; - // FIXME: vdr pes recordings sends mixed audio/video - if ((data[3] & 0xF0) == 0xE0) { // PES packet - split = data; - n = size; - // split the I-frame into single pes packets - do { - int len; + // FIXME: vdr pes recordings sends mixed audio/video + if ((data[3] & 0xF0) == 0xE0) { // PES packet + split = data; + n = size; + // split the I-frame into single pes packets + do { + int len; #ifdef DEBUG - if (split[0] || split[1] || split[2] != 0x01) { - Error(_("[softhddev] invalid still video packet\n")); - break; - } + if (split[0] || split[1] || split[2] != 0x01) { + Error(_("[softhddev] invalid still video packet\n")); + break; + } #endif - len = (split[4] << 8) + split[5]; - if (!len || len + 6 > n) { - if ((split[3] & 0xF0) == 0xE0) { - // video only - while (!PlayVideo3(MyVideoStream, split, n)) { // feed remaining bytes - } - } - break; - } - if ((split[3] & 0xF0) == 0xE0) { - // video only - while (!PlayVideo3(MyVideoStream, split, len + 6)) { // feed it - } - } - split += 6 + len; - n -= 6 + len; - } while (n > 6); + len = (split[4] << 8) + split[5]; + if (!len || len + 6 > n) { + if ((split[3] & 0xF0) == 0xE0) { + // video only + while (!PlayVideo3(MyVideoStream, split, n)) { // feed remaining bytes + } + } + break; + } + if ((split[3] & 0xF0) == 0xE0) { + // video only + while (!PlayVideo3(MyVideoStream, split, len + 6)) { // feed it + } + } + split += 6 + len; + n -= 6 + len; + } while (n > 6); - VideoNextPacket(MyVideoStream, MyVideoStream->CodecID); // terminate last packet - } else { // ES packet - if (MyVideoStream->CodecID != AV_CODEC_ID_MPEG2VIDEO) { - VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream - MyVideoStream->CodecID = AV_CODEC_ID_MPEG2VIDEO; - } - VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE,AV_NOPTS_VALUE, data, size); - } - if (MyVideoStream->CodecID == AV_CODEC_ID_H264) { - VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE,seq_end_h264,sizeof(seq_end_h264)); - } else if (MyVideoStream->CodecID == AV_CODEC_ID_HEVC) { - VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE,seq_end_h265,sizeof(seq_end_h265)); - } else { - VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, seq_end_mpeg, sizeof(seq_end_mpeg)); - } - VideoNextPacket(MyVideoStream, MyVideoStream->CodecID); // terminate last packet + VideoNextPacket(MyVideoStream, MyVideoStream->CodecID); // terminate last packet + } else { // ES packet + if (MyVideoStream->CodecID != AV_CODEC_ID_MPEG2VIDEO) { + VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream + MyVideoStream->CodecID = AV_CODEC_ID_MPEG2VIDEO; + } + VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, data, size); + } + if (MyVideoStream->CodecID == AV_CODEC_ID_H264) { + VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, seq_end_h264, sizeof(seq_end_h264)); + } else if (MyVideoStream->CodecID == AV_CODEC_ID_HEVC) { + VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, seq_end_h265, sizeof(seq_end_h265)); + } else { + VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, seq_end_mpeg, sizeof(seq_end_mpeg)); + } + VideoNextPacket(MyVideoStream, MyVideoStream->CodecID); // terminate last packet } // wait for empty buffers for (i = 0; VideoGetBuffers(MyVideoStream) && i < 50; ++i) { - usleep(10 * 1000); + usleep(10 * 1000); } - Debug(3, "[softhddev]%s: buffers %d %dms\n", __FUNCTION__, VideoGetBuffers(MyVideoStream), i * 10); + Debug(3, "[softhddev]%s: buffers %d %dms\n", __FUNCTION__, VideoGetBuffers(MyVideoStream), i * 10); #ifdef STILL_DEBUG InStillPicture = 0; #endif - VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream + VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream VideoSetTrickSpeed(MyVideoStream->HwDecoder, 0); } @@ -2792,29 +2763,28 @@ int Poll(int timeout) // poll is only called during replay, flush buffers after replay MyVideoStream->ClearClose = 1; for (;;) { - int full; - int t; - int used; - int filled; + int full; + int t; + int used; + int filled; - used = AudioUsedBytes(); - // FIXME: no video! - filled = atomic_read(&MyVideoStream->PacketsFilled); - // soft limit + hard limit - full = (used > AUDIO_MIN_BUFFER_FREE && filled > 3) - || AudioFreeBytes() < AUDIO_MIN_BUFFER_FREE - || filled >= VIDEO_PACKET_MAX - 10; + used = AudioUsedBytes(); + // FIXME: no video! + filled = atomic_read(&MyVideoStream->PacketsFilled); + // soft limit + hard limit + full = (used > AUDIO_MIN_BUFFER_FREE && filled > 3) + || AudioFreeBytes() < AUDIO_MIN_BUFFER_FREE || filled >= VIDEO_PACKET_MAX - 10; - if (!full || !timeout) { - return !full; - } + if (!full || !timeout) { + return !full; + } - t = 15; - if (timeout < t) { - t = timeout; - } - usleep(t * 1000); // let display thread work - timeout -= t; + t = 15; + if (timeout < t) { + t = timeout; + } + usleep(t * 1000); // let display thread work + timeout -= t; } } @@ -2826,16 +2796,16 @@ int Poll(int timeout) int Flush(int timeout) { if (atomic_read(&MyVideoStream->PacketsFilled)) { - if (timeout) { // let display thread work - usleep(timeout * 1000); - } - return !atomic_read(&MyVideoStream->PacketsFilled); + if (timeout) { // let display thread work + usleep(timeout * 1000); + } + return !atomic_read(&MyVideoStream->PacketsFilled); } return 1; } ////////////////////////////////////////////////////////////////////////////// -// OSD +// OSD ////////////////////////////////////////////////////////////////////////////// /** @@ -2857,10 +2827,9 @@ void GetOsdSize(int *width, int *height, double *aspect) #ifdef DEBUG if (done_width != *width || done_height != *height) { - Debug(3, "[softhddev]%s: %dx%d %g\n", __FUNCTION__, *width, *height, - *aspect); - done_width = *width; - done_height = *height; + Debug(3, "[softhddev]%s: %dx%d %g\n", __FUNCTION__, *width, *height, *aspect); + done_width = *width; + done_height = *height; } #endif } @@ -2885,8 +2854,7 @@ void OsdClose(void) ** @param x x-coordinate on screen of argb image ** @param y y-coordinate on screen of argb image */ -void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, - const uint8_t * argb, int x, int y) +void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, const uint8_t * argb, int x, int y) { // wakeup display for showing remote learning dialog VideoDisplayWakeup(); @@ -2901,26 +2869,21 @@ void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, const char *CommandLineHelp(void) { return " -a device\taudio device (fe. alsa: hw:0,0 oss: /dev/dsp)\n" - " -p device\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n" - " -c channel\taudio mixer channel name (fe. PCM)\n" - " -d display\tdisplay of x11 server (fe. :0.0)\n" - " -f\t\tstart with fullscreen window (only with window manager)\n" - " -g geometry\tx11 window geometry wxh+x+y\n" - " -v device\tvideo driver device (cuvid)\n" - " -s\t\tstart in suspended mode\n" - " -x\t\tstart x11 server, with -xx try to connect, if this fails\n" - " -X args\tX11 server arguments (f.e. -nocursor)\n" - " -w workaround\tenable/disable workarounds\n" - "\tno-hw-decoder\t\tdisable hw decoder, use software decoder only\n" - "\tno-mpeg-hw-decoder\tdisable hw decoder for mpeg only\n" - "\tstill-hw-decoder\tenable hardware decoder for still-pictures\n" - "\tstill-h264-hw-decoder\tenable h264 hw decoder for still-pictures\n" - "\talsa-driver-broken\tdisable broken alsa driver message\n" - "\talsa-no-close-open\tdisable close open to fix alsa no sound bug\n" - "\talsa-close-open-delay\tenable close open delay to fix no sound bug\n" - "\tignore-repeat-pict\tdisable repeat pict message\n" - "\tuse-possible-defect-frames prefer faster channel switch\n" - " -D\t\tstart in detached mode\n"; + " -p device\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n" + " -c channel\taudio mixer channel name (fe. PCM)\n" " -d display\tdisplay of x11 server (fe. :0.0)\n" + " -f\t\tstart with fullscreen window (only with window manager)\n" + " -g geometry\tx11 window geometry wxh+x+y\n" " -v device\tvideo driver device (cuvid)\n" + " -s\t\tstart in suspended mode\n" " -x\t\tstart x11 server, with -xx try to connect, if this fails\n" + " -X args\tX11 server arguments (f.e. -nocursor)\n" " -w workaround\tenable/disable workarounds\n" + "\tno-hw-decoder\t\tdisable hw decoder, use software decoder only\n" + "\tno-mpeg-hw-decoder\tdisable hw decoder for mpeg only\n" + "\tstill-hw-decoder\tenable hardware decoder for still-pictures\n" + "\tstill-h264-hw-decoder\tenable h264 hw decoder for still-pictures\n" + "\talsa-driver-broken\tdisable broken alsa driver message\n" + "\talsa-no-close-open\tdisable close open to fix alsa no sound bug\n" + "\talsa-close-open-delay\tenable close open delay to fix no sound bug\n" + "\tignore-repeat-pict\tdisable repeat pict message\n" + "\tuse-possible-defect-frames prefer faster channel switch\n" " -D\t\tstart in detached mode\n"; } /** @@ -2932,127 +2895,124 @@ const char *CommandLineHelp(void) int ProcessArgs(int argc, char *const argv[]) { // - // Parse arguments. + // Parse arguments. // #ifdef __FreeBSD__ if (!strcmp(*argv, "softhddevice")) { - ++argv; - --argc; + ++argv; + --argc; } #endif for (;;) { - switch (getopt(argc, argv, "-a:c:d:fg:p:sv:w:xDX:")) { - case 'a': // audio device for pcm - AudioSetDevice(optarg); - continue; - case 'c': // channel of audio mixer - AudioSetChannel(optarg); - continue; - case 'p': // pass-through audio device - AudioSetPassthroughDevice(optarg); - continue; - case 'd': // x11 display name - X11DisplayName = optarg; - continue; - case 'f': // fullscreen mode - ConfigFullscreen = 1; - continue; - case 'g': // geometry - if (VideoSetGeometry(optarg) < 0) { - fprintf(stderr, - _ - ("Bad formated geometry please use: [=][{xX}][{+-}{+-}]\n")); - return 0; - } - continue; - case 'v': // video driver - VideoSetDevice(optarg); - continue; - case 'x': // x11 server - ConfigStartX11Server++; - continue; - case 'X': // x11 server arguments - X11ServerArguments = optarg; - continue; - case 's': // start in suspend mode - ConfigStartSuspended = 1; - continue; - case 'D': // start in detached mode - ConfigStartSuspended = -1; - continue; - case 'w': // workarounds - if (!strcasecmp("no-hw-decoder", optarg)) { - VideoHardwareDecoder = 0; - } else if (!strcasecmp("no-mpeg-hw-decoder", optarg)) { - VideoHardwareDecoder = 1; - if (ConfigStillDecoder) { - ConfigStillDecoder = 1; - } - } else if (!strcasecmp("still-hw-decoder", optarg)) { - ConfigStillDecoder = -1; - } else if (!strcasecmp("still-h264-hw-decoder", optarg)) { - ConfigStillDecoder = 1; - } else if (!strcasecmp("alsa-driver-broken", optarg)) { - AudioAlsaDriverBroken = 1; - } else if (!strcasecmp("alsa-no-close-open", optarg)) { - AudioAlsaNoCloseOpen = 1; - } else if (!strcasecmp("alsa-close-open-delay", optarg)) { - AudioAlsaCloseOpenDelay = 1; - } else if (!strcasecmp("ignore-repeat-pict", optarg)) { - VideoIgnoreRepeatPict = 1; - } else if (!strcasecmp("use-possible-defect-frames", optarg)) { - CodecUsePossibleDefectFrames = 1; - } else { - fprintf(stderr, _("Workaround '%s' unsupported\n"), - optarg); - return 0; - } - continue; - case EOF: - break; - case '-': - fprintf(stderr, _("We need no long options\n")); - return 0; - case ':': - fprintf(stderr, _("Missing argument for option '%c'\n"), - optopt); - return 0; - default: - fprintf(stderr, _("Unknown option '%c'\n"), optopt); - return 0; - } - break; + switch (getopt(argc, argv, "-a:c:d:fg:p:sv:w:xDX:")) { + case 'a': // audio device for pcm + AudioSetDevice(optarg); + continue; + case 'c': // channel of audio mixer + AudioSetChannel(optarg); + continue; + case 'p': // pass-through audio device + AudioSetPassthroughDevice(optarg); + continue; + case 'd': // x11 display name + X11DisplayName = optarg; + continue; + case 'f': // fullscreen mode + ConfigFullscreen = 1; + continue; + case 'g': // geometry + if (VideoSetGeometry(optarg) < 0) { + fprintf(stderr, + _("Bad formated geometry please use: [=][{xX}][{+-}{+-}]\n")); + return 0; + } + continue; + case 'v': // video driver + VideoSetDevice(optarg); + continue; + case 'x': // x11 server + ConfigStartX11Server++; + continue; + case 'X': // x11 server arguments + X11ServerArguments = optarg; + continue; + case 's': // start in suspend mode + ConfigStartSuspended = 1; + continue; + case 'D': // start in detached mode + ConfigStartSuspended = -1; + continue; + case 'w': // workarounds + if (!strcasecmp("no-hw-decoder", optarg)) { + VideoHardwareDecoder = 0; + } else if (!strcasecmp("no-mpeg-hw-decoder", optarg)) { + VideoHardwareDecoder = 1; + if (ConfigStillDecoder) { + ConfigStillDecoder = 1; + } + } else if (!strcasecmp("still-hw-decoder", optarg)) { + ConfigStillDecoder = -1; + } else if (!strcasecmp("still-h264-hw-decoder", optarg)) { + ConfigStillDecoder = 1; + } else if (!strcasecmp("alsa-driver-broken", optarg)) { + AudioAlsaDriverBroken = 1; + } else if (!strcasecmp("alsa-no-close-open", optarg)) { + AudioAlsaNoCloseOpen = 1; + } else if (!strcasecmp("alsa-close-open-delay", optarg)) { + AudioAlsaCloseOpenDelay = 1; + } else if (!strcasecmp("ignore-repeat-pict", optarg)) { + VideoIgnoreRepeatPict = 1; + } else if (!strcasecmp("use-possible-defect-frames", optarg)) { + CodecUsePossibleDefectFrames = 1; + } else { + fprintf(stderr, _("Workaround '%s' unsupported\n"), optarg); + return 0; + } + continue; + case EOF: + break; + case '-': + fprintf(stderr, _("We need no long options\n")); + return 0; + case ':': + fprintf(stderr, _("Missing argument for option '%c'\n"), optopt); + return 0; + default: + fprintf(stderr, _("Unknown option '%c'\n"), optopt); + return 0; + } + break; } while (optind < argc) { - fprintf(stderr, _("Unhandled argument '%s'\n"), argv[optind++]); + fprintf(stderr, _("Unhandled argument '%s'\n"), argv[optind++]); } return 1; } ////////////////////////////////////////////////////////////////////////////// -// Init/Exit +// Init/Exit ////////////////////////////////////////////////////////////////////////////// #include #include -#define XSERVER_MAX_ARGS 512 ///< how many arguments support +#define XSERVER_MAX_ARGS 512 ///< how many arguments support #ifndef __FreeBSD__ -static const char *X11Server = "/usr/bin/X"; ///< default x11 server +static const char *X11Server = "/usr/bin/X"; ///< default x11 server #else -static const char *X11Server = LOCALBASE "/bin/X"; ///< default x11 server +static const char *X11Server = LOCALBASE "/bin/X"; ///< default x11 server #endif -static pid_t X11ServerPid; ///< x11 server pid +static pid_t X11ServerPid; ///< x11 server pid /** ** USR1 signal handler. ** ** @param sig signal number */ -static void Usr1Handler(int __attribute__ ((unused)) sig) +static void Usr1Handler(int __attribute__((unused)) sig) { ++Usr1Signal; @@ -3073,71 +3033,70 @@ static void StartXServer(void) int maxfd; int fd; - // X server + // X server if (X11Server) { - args[0] = X11Server; + args[0] = X11Server; } else { - Error(_("x-setup: No X server configured!\n")); - return; + Error(_("x-setup: No X server configured!\n")); + return; } argn = 1; - if (X11DisplayName) { // append display name - args[argn++] = X11DisplayName; - // export display for childs - setenv("DISPLAY", X11DisplayName, 1); + if (X11DisplayName) { // append display name + args[argn++] = X11DisplayName; + // export display for childs + setenv("DISPLAY", X11DisplayName, 1); } - // split X server arguments string into words + // split X server arguments string into words if ((sval = X11ServerArguments)) { - char *s; + char *s; #ifndef __FreeBSD__ - s = buf = strdupa(sval); + s = buf = strdupa(sval); #else - s = buf = alloca(strlen(sval) + 1); - strcpy(buf, sval); + s = buf = alloca(strlen(sval) + 1); + strcpy(buf, sval); #endif - while ((sval = strsep(&s, " \t"))) { - args[argn++] = sval; + while ((sval = strsep(&s, " \t"))) { + args[argn++] = sval; - if (argn == XSERVER_MAX_ARGS - 1) { // argument overflow - Error(_("x-setup: too many arguments for xserver\n")); - // argn = 1; - break; - } - } + if (argn == XSERVER_MAX_ARGS - 1) { // argument overflow + Error(_("x-setup: too many arguments for xserver\n")); + // argn = 1; + break; + } + } } // FIXME: auth // FIXME: append VTxx args[argn] = NULL; - // arm the signal + // arm the signal memset(&usr1, 0, sizeof(struct sigaction)); usr1.sa_handler = Usr1Handler; sigaction(SIGUSR1, &usr1, NULL); - Debug(3, "x-setup: Starting X server '%s' '%s'\n", args[0], - X11ServerArguments); - // fork - if ((pid = fork())) { // parent + Debug(3, "x-setup: Starting X server '%s' '%s'\n", args[0], X11ServerArguments); + // fork + if ((pid = fork())) { // parent - X11ServerPid = pid; - Debug(3, "x-setup: Started x-server pid=%d\n", X11ServerPid); + X11ServerPid = pid; + Debug(3, "x-setup: Started x-server pid=%d\n", X11ServerPid); - return; + return; } // child - signal(SIGUSR1, SIG_IGN); // ignore to force answer + signal(SIGUSR1, SIG_IGN); // ignore to force answer //setpgid(0,getpid()); setpgid(pid, 0); // close all open file-handles maxfd = sysconf(_SC_OPEN_MAX); - for (fd = 3; fd < maxfd; fd++) { // keep stdin, stdout, stderr - close(fd); // vdr should open with O_CLOEXEC + for (fd = 3; fd < maxfd; fd++) { // keep stdin, stdout, stderr + close(fd); // vdr should open with O_CLOEXEC } - // start the X server + // start the X server execvp(args[0], (char *const *)args); Error(_("x-setup: Failed to start X server '%s'\n"), args[0]); @@ -3153,9 +3112,9 @@ void SoftHdDeviceExit(void) AudioExit(); if (MyAudioDecoder) { - CodecAudioClose(MyAudioDecoder); - CodecAudioDelDecoder(MyAudioDecoder); - MyAudioDecoder = NULL; + CodecAudioClose(MyAudioDecoder); + CodecAudioDelDecoder(MyAudioDecoder); + MyAudioDecoder = NULL; } NewAudioStream = 0; av_packet_unref(AudioAvPkt); @@ -3165,38 +3124,36 @@ void SoftHdDeviceExit(void) CodecExit(); if (ConfigStartX11Server) { - Debug(3, "x-setup: Stop x11 server\n"); + Debug(3, "x-setup: Stop x11 server\n"); - if (X11ServerPid) { - int waittime; - int timeout; - pid_t wpid; - int status; + if (X11ServerPid) { + int waittime; + int timeout; + pid_t wpid; + int status; - kill(X11ServerPid, SIGTERM); - waittime = 0; - timeout = 500; // 0.5s - // wait for x11 finishing, with timeout - do { - wpid = waitpid(X11ServerPid, &status, WNOHANG); - if (wpid) { - break; - } - if (waittime++ < timeout) { - usleep(1 * 1000); - continue; - } - kill(X11ServerPid, SIGKILL); - } while (waittime < timeout); - if (wpid && WIFEXITED(status)) { - Debug(3, "x-setup: x11 server exited (%d)\n", - WEXITSTATUS(status)); - } - if (wpid && WIFSIGNALED(status)) { - Debug(3, "x-setup: x11 server killed (%d)\n", - WTERMSIG(status)); - } - } + kill(X11ServerPid, SIGTERM); + waittime = 0; + timeout = 500; // 0.5s + // wait for x11 finishing, with timeout + do { + wpid = waitpid(X11ServerPid, &status, WNOHANG); + if (wpid) { + break; + } + if (waittime++ < timeout) { + usleep(1 * 1000); + continue; + } + kill(X11ServerPid, SIGKILL); + } while (waittime < timeout); + if (wpid && WIFEXITED(status)) { + Debug(3, "x-setup: x11 server exited (%d)\n", WEXITSTATUS(status)); + } + if (wpid && WIFSIGNALED(status)) { + Debug(3, "x-setup: x11 server killed (%d)\n", WTERMSIG(status)); + } + } } pthread_mutex_destroy(&SuspendLockMutex); @@ -3216,7 +3173,7 @@ void SoftHdDeviceExit(void) int Start(void) { if (ConfigStartX11Server) { - StartXServer(); + StartXServer(); } CodecInit(); @@ -3227,28 +3184,27 @@ int Start(void) pthread_mutex_init(&SuspendLockMutex, NULL); if (!ConfigStartSuspended) { - // FIXME: AudioInit for HDMI after X11 startup - // StartAudio(); - AudioInit(); - av_new_packet(AudioAvPkt, AUDIO_BUFFER_SIZE); - MyAudioDecoder = CodecAudioNewDecoder(); - AudioCodecID = AV_CODEC_ID_NONE; - AudioChannelID = -1; + // FIXME: AudioInit for HDMI after X11 startup + // StartAudio(); + AudioInit(); + av_new_packet(AudioAvPkt, AUDIO_BUFFER_SIZE); + MyAudioDecoder = CodecAudioNewDecoder(); + AudioCodecID = AV_CODEC_ID_NONE; + AudioChannelID = -1; - if (!ConfigStartX11Server) { - StartVideo(); - } + if (!ConfigStartX11Server) { + StartVideo(); + } } else { - MyVideoStream->SkipStream = 1; - SkipAudio = 1; + MyVideoStream->SkipStream = 1; + SkipAudio = 1; } #ifndef NO_TS_AUDIO PesInit(PesDemuxAudio); #endif Info(_("[softhddev] ready%s\n"), - ConfigStartSuspended ? ConfigStartSuspended == - -1 ? " detached" : " suspended" : ""); + ConfigStartSuspended ? ConfigStartSuspended == -1 ? " detached" : " suspended" : ""); return ConfigStartSuspended; } @@ -3271,29 +3227,27 @@ void Stop(void) void Housekeeping(void) { // - // when starting an own X11 server fails, try to connect to a already - // running X11 server. This can take some time. + // when starting an own X11 server fails, try to connect to a already + // running X11 server. This can take some time. // - if (X11ServerPid) { // check if X11 server still running - pid_t wpid; - int status; + if (X11ServerPid) { // check if X11 server still running + pid_t wpid; + int status; - wpid = waitpid(X11ServerPid, &status, WNOHANG); - if (wpid) { - if (WIFEXITED(status)) { - Debug(3, "x-setup: x11 server exited (%d)\n", - WEXITSTATUS(status)); - } - if (WIFSIGNALED(status)) { - Debug(3, "x-setup: x11 server killed (%d)\n", - WTERMSIG(status)); - } - X11ServerPid = 0; - // video not running - if (ConfigStartX11Server > 1 && !MyVideoStream->HwDecoder) { - StartVideo(); - } - } + wpid = waitpid(X11ServerPid, &status, WNOHANG); + if (wpid) { + if (WIFEXITED(status)) { + Debug(3, "x-setup: x11 server exited (%d)\n", WEXITSTATUS(status)); + } + if (WIFSIGNALED(status)) { + Debug(3, "x-setup: x11 server killed (%d)\n", WTERMSIG(status)); + } + X11ServerPid = 0; + // video not running + if (ConfigStartX11Server > 1 && !MyVideoStream->HwDecoder) { + StartVideo(); + } + } } } @@ -3302,17 +3256,17 @@ void Housekeeping(void) */ void MainThreadHook(void) { - if (Usr1Signal) { // x11 server ready - // FIYME: x11 server keeps sending sigusr1 signals - signal(SIGUSR1, SIG_IGN); // ignore further signals - Usr1Signal = 0; - StartVideo(); - VideoDisplayWakeup(); + if (Usr1Signal) { // x11 server ready + // FIYME: x11 server keeps sending sigusr1 signals + signal(SIGUSR1, SIG_IGN); // ignore further signals + Usr1Signal = 0; + StartVideo(); + VideoDisplayWakeup(); } } ////////////////////////////////////////////////////////////////////////////// -// Suspend/Resume +// Suspend/Resume ////////////////////////////////////////////////////////////////////////////// /// call VDR support function @@ -3328,15 +3282,15 @@ extern void DelPip(void); void Suspend(int video, int audio, int dox11) { pthread_mutex_lock(&SuspendLockMutex); - if (MyVideoStream->SkipStream && SkipAudio) { // already suspended - pthread_mutex_unlock(&SuspendLockMutex); - return; + if (MyVideoStream->SkipStream && SkipAudio) { // already suspended + pthread_mutex_unlock(&SuspendLockMutex); + return; } Debug(3, "[softhddev]%s:\n", __FUNCTION__); #ifdef USE_PIP - DelPip(); // must stop PIP + DelPip(); // must stop PIP #endif // FIXME: should not be correct, if not both are suspended! @@ -3345,21 +3299,21 @@ void Suspend(int video, int audio, int dox11) SkipAudio = 1; if (audio) { - AudioExit(); - if (MyAudioDecoder) { - CodecAudioClose(MyAudioDecoder); - CodecAudioDelDecoder(MyAudioDecoder); - MyAudioDecoder = NULL; - } - NewAudioStream = 0; - av_packet_unref(AudioAvPkt); + AudioExit(); + if (MyAudioDecoder) { + CodecAudioClose(MyAudioDecoder); + CodecAudioDelDecoder(MyAudioDecoder); + MyAudioDecoder = NULL; + } + NewAudioStream = 0; + av_packet_unref(AudioAvPkt); } if (video) { - StopVideo(); + StopVideo(); } if (dox11) { - // FIXME: stop x11, if started + // FIXME: stop x11, if started } pthread_mutex_unlock(&SuspendLockMutex); @@ -3370,8 +3324,8 @@ void Suspend(int video, int audio, int dox11) */ void Resume(void) { - if (!MyVideoStream->SkipStream && !SkipAudio) { // we are not suspended - return; + if (!MyVideoStream->SkipStream && !SkipAudio) { // we are not suspended + return; } Debug(3, "[softhddev]%s:\n", __FUNCTION__); @@ -3379,20 +3333,20 @@ void Resume(void) pthread_mutex_lock(&SuspendLockMutex); // FIXME: start x11 - if (!MyVideoStream->HwDecoder) { // video not running - StartVideo(); + if (!MyVideoStream->HwDecoder) { // video not running + StartVideo(); } - if (!MyAudioDecoder) { // audio not running - // StartAudio(); - AudioInit(); - av_new_packet(AudioAvPkt, AUDIO_BUFFER_SIZE); - MyAudioDecoder = CodecAudioNewDecoder(); - AudioCodecID = AV_CODEC_ID_NONE; - AudioChannelID = -1; + if (!MyAudioDecoder) { // audio not running + // StartAudio(); + AudioInit(); + av_new_packet(AudioAvPkt, AUDIO_BUFFER_SIZE); + MyAudioDecoder = CodecAudioNewDecoder(); + AudioCodecID = AV_CODEC_ID_NONE; + AudioChannelID = -1; } if (MyVideoStream->Decoder) { - MyVideoStream->SkipStream = 0; + MyVideoStream->SkipStream = 0; } SkipAudio = 0; @@ -3413,9 +3367,9 @@ void GetStats(int *missed, int *duped, int *dropped, int *counter, float *framet *duped = 0; *dropped = 0; *counter = 0; - *frametime = 0.0f; + *frametime = 0.0f; if (MyVideoStream->HwDecoder) { - VideoGetStats(MyVideoStream->HwDecoder, missed, duped, dropped, counter, frametime); + VideoGetStats(MyVideoStream->HwDecoder, missed, duped, dropped, counter, frametime); } } @@ -3430,12 +3384,12 @@ void GetStats(int *missed, int *duped, int *dropped, int *counter, float *framet void ScaleVideo(int x, int y, int width, int height) { if (MyVideoStream->HwDecoder) { - VideoSetOutputPosition(MyVideoStream->HwDecoder, x, y, width, height); + VideoSetOutputPosition(MyVideoStream->HwDecoder, x, y, width, height); } } ////////////////////////////////////////////////////////////////////////////// -// PIP +// PIP ////////////////////////////////////////////////////////////////////////////// #ifdef USE_PIP @@ -3452,19 +3406,17 @@ void ScaleVideo(int x, int y, int width, int height) ** @param pip_width pip window width OSD relative ** @param pip_height pip window height OSD relative */ -void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, - int pip_width, int pip_height) +void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height) { - if (!MyVideoStream->HwDecoder) { // video not running - return; + if (!MyVideoStream->HwDecoder) { // video not running + return; } ScaleVideo(x, y, width, height); - if (!PipVideoStream->HwDecoder) { // pip not running - return; + if (!PipVideoStream->HwDecoder) { // pip not running + return; } - VideoSetOutputPosition(PipVideoStream->HwDecoder, pip_x, pip_y, pip_width, - pip_height); + VideoSetOutputPosition(PipVideoStream->HwDecoder, pip_x, pip_y, pip_width, pip_height); } /** @@ -3479,15 +3431,14 @@ void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, ** @param pip_width pip window width OSD relative ** @param pip_height pip window height OSD relative */ -void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, - int pip_width, int pip_height) +void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height) { - if (!MyVideoStream->HwDecoder) { // video not running - return; + if (!MyVideoStream->HwDecoder) { // video not running + return; } if (!PipVideoStream->Decoder) { - VideoStreamOpen(PipVideoStream); + VideoStreamOpen(PipVideoStream); } PipSetPosition(x, y, width, height, pip_x, pip_y, pip_width, pip_height); } @@ -3499,15 +3450,15 @@ void PipStop(void) { int i; - if (!MyVideoStream->HwDecoder) { // video not running - return; + if (!MyVideoStream->HwDecoder) { // video not running + return; } ScaleVideo(0, 0, 0, 0); PipVideoStream->Close = 1; for (i = 0; PipVideoStream->Close && i < 50; ++i) { - usleep(1 * 1000); + usleep(1 * 1000); } Info("[softhddev]%s: pip close %dms\n", __FUNCTION__, i); } diff --git a/softhddev.h b/softhddev.h index bef4337..9b2bb63 100644 --- a/softhddev.h +++ b/softhddev.h @@ -1,23 +1,23 @@ /// -/// @file softhddev.h @brief software HD device plugin header file. +/// @file softhddev.h @brief software HD device plugin header file. /// -/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: efb2e251dd7082138ec21609478d2402be8208cf $ +/// $Id: efb2e251dd7082138ec21609478d2402be8208cf $ ////////////////////////////////////////////////////////////////////////////// #ifdef __cplusplus @@ -25,8 +25,7 @@ extern "C" { #endif /// C callback feed key press - extern void FeedKeyPress(const char *, const char *, int, int, - const char *); + extern void FeedKeyPress(const char *, const char *, int, int, const char *); /// C plugin get osd size and ascpect extern void GetOsdSize(int *, int *, double *); @@ -34,8 +33,7 @@ extern "C" /// C plugin close osd extern void OsdClose(void); /// C plugin draw osd pixmap - extern void OsdDrawARGB(int, int, int, int, int, const uint8_t *, int, - int); + extern void OsdDrawARGB(int, int, int, int, int, const uint8_t *, int, int); /// C plugin play audio packet extern int PlayAudio(const uint8_t *, int, uint8_t); @@ -111,7 +109,7 @@ extern "C" /// Pip play video packet extern int PipPlayVideo(const uint8_t *, int); - extern const char *X11DisplayName; ///< x11 display name + extern const char *X11DisplayName; ///< x11 display name #ifdef __cplusplus } #endif diff --git a/softhddevice.h b/softhddevice.h index 2ed0692..45906a1 100644 --- a/softhddevice.h +++ b/softhddevice.h @@ -1,21 +1,21 @@ /// -/// @file softhddevice.h @brief software HD device plugin header file. +/// @file softhddevice.h @brief software HD device plugin header file. /// -/// Copyright (c) 2011, 2014 by Johns. All Rights Reserved. +/// Copyright (c) 2011, 2014 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: 3bc44809950ec019529638cad9223a85d4ab5576 $ +/// $Id: 3bc44809950ec019529638cad9223a85d4ab5576 $ ////////////////////////////////////////////////////////////////////////////// diff --git a/softhddevice_service.h b/softhddevice_service.h index a5a0e2c..7794f83 100644 --- a/softhddevice_service.h +++ b/softhddevice_service.h @@ -1,23 +1,23 @@ /// -/// @file softhddev_service.h @brief software HD device service header file. +/// @file softhddev_service.h @brief software HD device service header file. /// -/// Copyright (c) 2012 by durchflieger. All Rights Reserved. +/// Copyright (c) 2012 by durchflieger. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: c7c2d5e8b724515d7c767668aab717b27f4e4068 $ +/// $Id: c7c2d5e8b724515d7c767668aab717b27f4e4068 $ ////////////////////////////////////////////////////////////////////////////// #pragma once diff --git a/video.c b/video.c index 96888b5..00249d6 100644 --- a/video.c +++ b/video.c @@ -1,60 +1,58 @@ /// -/// @file video.c @brief Video module +/// @file video.c @brief Video module /// -/// Copyright (c) 2009 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2015 by Johns. All Rights Reserved. /// -/// Contributor(s): +/// Contributor(s): /// -/// License: AGPLv3 +/// License: AGPLv3 /// -/// This program is free software: you can redistribute it and/or modify -/// it under the terms of the GNU Affero General Public License as -/// published by the Free Software Foundation, either version 3 of the -/// License. +/// This program is free software: you can redistribute it and/or modify +/// it under the terms of the GNU Affero General Public License as +/// published by the Free Software Foundation, either version 3 of the +/// License. /// -/// This program is distributed in the hope that it will be useful, -/// but WITHOUT ANY WARRANTY; without even the implied warranty of -/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -/// GNU Affero General Public License for more details. +/// This program is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Affero General Public License for more details. /// -/// $Id: bacf89f24503be74d113a83139a277ff2290014a $ +/// $Id: bacf89f24503be74d113a83139a277ff2290014a $ ////////////////////////////////////////////////////////////////////////////// /// -/// @defgroup Video The video module. +/// @defgroup Video The video module. /// -/// This module contains all video rendering functions. +/// This module contains all video rendering functions. /// -/// @todo disable screen saver support +/// @todo disable screen saver support /// -/// Uses Xlib where it is needed for VA-API or cuvid. XCB is used for -/// everything else. +/// Uses Xlib where it is needed for VA-API or cuvid. XCB is used for +/// everything else. /// -/// - X11 -/// - OpenGL rendering -/// - OpenGL rendering with GLX texture-from-pixmap -/// - Xrender rendering +/// - X11 +/// - OpenGL rendering +/// - OpenGL rendering with GLX texture-from-pixmap +/// - Xrender rendering /// -/// @todo FIXME: use vaErrorStr for all VA-API errors. +/// @todo FIXME: use vaErrorStr for all VA-API errors. /// //#define PLACEBO +#define USE_XLIB_XCB ///< use xlib/xcb backend +#define noUSE_SCREENSAVER ///< support disable screensaver -#define USE_XLIB_XCB ///< use xlib/xcb backend -#define noUSE_SCREENSAVER ///< support disable screensaver - -#define USE_GRAB ///< experimental grab code -//#define USE_GLX ///< outdated GLX code -#define USE_DOUBLEBUFFER ///< use GLX double buffers -#define USE_CUVID ///< enable cuvid support -//#define AV_INFO ///< log a/v sync informations +#define USE_GRAB ///< experimental grab code +//#define USE_GLX ///< outdated GLX code +#define USE_DOUBLEBUFFER ///< use GLX double buffers +#define USE_CUVID ///< enable cuvid support +//#define AV_INFO ///< log a/v sync informations #ifndef AV_INFO_TIME -#define AV_INFO_TIME (50 * 60) ///< a/v info every minute +#define AV_INFO_TIME (50 * 60) ///< a/v info every minute #endif -#define USE_VIDEO_THREAD ///< run decoder in an own thread - +#define USE_VIDEO_THREAD ///< run decoder in an own thread #include #include @@ -62,11 +60,11 @@ #include #include -#include /* File Control Definitions */ -#include /* POSIX Terminal Control Definitions */ -#include /* UNIX Standard Definitions */ -#include /* ERROR Number Definitions */ -#include /* ioctl() */ +#include /* File Control Definitions */ +#include /* POSIX Terminal Control Definitions */ +#include /* UNIX Standard Definitions */ +#include /* ERROR Number Definitions */ +#include /* ioctl() */ #include #include @@ -77,8 +75,8 @@ #include #include -#define _(str) gettext(str) ///< gettext shortcut -#define _N(str) str ///< gettext_noop shortcut +#define _(str) gettext(str) ///< gettext shortcut +#define _N(str) str ///< gettext_noop shortcut #ifdef USE_VIDEO_THREAD #ifndef __USE_GNU @@ -126,7 +124,7 @@ typedef enum XCB_EWMH_WM_STATE_REMOVE = 0, /* Add/set property */ XCB_EWMH_WM_STATE_ADD = 1, - /* Toggle property */ + /* Toggle property */ XCB_EWMH_WM_STATE_TOGGLE = 2 } xcb_ewmh_wm_state_action_t; #endif @@ -134,8 +132,8 @@ typedef enum #ifdef USE_GLX #include -//#include // For GL_COLOR_BUFFER_BIT -//#include // For GL_COLOR_BUFFER_BIT +//#include // For GL_COLOR_BUFFER_BIT +//#include // For GL_COLOR_BUFFER_BIT //#include //#include // only for gluErrorString @@ -148,8 +146,8 @@ typedef enum #include #ifdef CUVID -//#include // For GL_COLOR_BUFFER_BIT -//#include // For GL_COLOR_BUFFER_BIT +//#include // For GL_COLOR_BUFFER_BIT +//#include // For GL_COLOR_BUFFER_BIT #include #include #include @@ -174,7 +172,7 @@ typedef enum #include #include #ifndef GL_OES_EGL_image -typedef void* GLeglImageOES; +typedef void *GLeglImageOES; #endif #ifndef EGL_KHR_image typedef void *EGLImageKHR; @@ -197,7 +195,7 @@ typedef void *EGLImageKHR; #include #endif -#include "iatomic.h" // portable atomic_t +#include "iatomic.h" // portable atomic_t #include "misc.h" #include "video.h" #include "audio.h" @@ -215,126 +213,127 @@ typedef void *EGLImageKHR; #endif //---------------------------------------------------------------------------- -// Declarations +// Declarations //---------------------------------------------------------------------------- /// -/// Video resolutions selector. +/// Video resolutions selector. /// typedef enum _video_resolutions_ { - VideoResolution576i, ///< ...x576 interlaced - VideoResolution720p, ///< ...x720 progressive - VideoResolutionFake1080i, ///< 1280x1080 1440x1080 interlaced - VideoResolution1080i, ///< 1920x1080 interlaced - VideoResolutionUHD, /// UHD progressive - VideoResolutionMax ///< number of resolution indexs + VideoResolution576i, ///< ...x576 interlaced + VideoResolution720p, ///< ...x720 progressive + VideoResolutionFake1080i, ///< 1280x1080 1440x1080 interlaced + VideoResolution1080i, ///< 1920x1080 interlaced + VideoResolutionUHD, /// UHD progressive + VideoResolutionMax ///< number of resolution indexs } VideoResolutions; /// -/// Video deinterlace modes. +/// Video deinterlace modes. /// typedef enum _video_deinterlace_modes_ { - VideoDeinterlaceCuda, ///< Cuda build in deinterlace - VideoDeinterlaceYadif, ///< Yadif deinterlace + VideoDeinterlaceCuda, ///< Cuda build in deinterlace + VideoDeinterlaceYadif, ///< Yadif deinterlace } VideoDeinterlaceModes; /// -/// Video scaleing modes. +/// Video scaleing modes. /// typedef enum _video_scaling_modes_ { - VideoScalingNormal, ///< normal scaling - VideoScalingFast, ///< fastest scaling - VideoScalingHQ, ///< high quality scaling - VideoScalingAnamorphic, ///< anamorphic scaling + VideoScalingNormal, ///< normal scaling + VideoScalingFast, ///< fastest scaling + VideoScalingHQ, ///< high quality scaling + VideoScalingAnamorphic, ///< anamorphic scaling } VideoScalingModes; /// -/// Video zoom modes. +/// Video zoom modes. /// typedef enum _video_zoom_modes_ { - VideoNormal, ///< normal - VideoStretch, ///< stretch to all edges - VideoCenterCutOut, ///< center and cut out - VideoAnamorphic, ///< anamorphic scaled (unsupported) + VideoNormal, ///< normal + VideoStretch, ///< stretch to all edges + VideoCenterCutOut, ///< center and cut out + VideoAnamorphic, ///< anamorphic scaled (unsupported) } VideoZoomModes; /// -/// Video color space conversions. +/// Video color space conversions. /// typedef enum _video_color_space_ { - VideoColorSpaceNone, ///< no conversion - VideoColorSpaceBt601, ///< ITU.BT-601 Y'CbCr - VideoColorSpaceBt709, ///< ITU.BT-709 HDTV Y'CbCr - VideoColorSpaceSmpte240 ///< SMPTE-240M Y'PbPr + VideoColorSpaceNone, ///< no conversion + VideoColorSpaceBt601, ///< ITU.BT-601 Y'CbCr + VideoColorSpaceBt709, ///< ITU.BT-709 HDTV Y'CbCr + VideoColorSpaceSmpte240 ///< SMPTE-240M Y'PbPr } VideoColorSpace; /// -/// Video output module structure and typedef. +/// Video output module structure and typedef. /// typedef struct _video_module_ { - const char *Name; ///< video output module name - char Enabled; ///< flag output module enabled + const char *Name; ///< video output module name + char Enabled; ///< flag output module enabled /// allocate new video hw decoder VideoHwDecoder *(*const NewHwDecoder)(VideoStream *); - void (*const DelHwDecoder) (VideoHwDecoder *); - unsigned (*const GetSurface) (VideoHwDecoder *, const AVCodecContext *); - void (*const ReleaseSurface) (VideoHwDecoder *, unsigned); - enum AVPixelFormat (*const get_format) (VideoHwDecoder *, AVCodecContext *, - const enum AVPixelFormat *); - void (*const RenderFrame) (VideoHwDecoder *, const AVCodecContext *, - const AVFrame *); + void (*const DelHwDecoder)(VideoHwDecoder *); + unsigned (*const GetSurface)(VideoHwDecoder *, const AVCodecContext *); + void (*const ReleaseSurface)(VideoHwDecoder *, unsigned); + enum AVPixelFormat (*const get_format) (VideoHwDecoder *, AVCodecContext *, const enum AVPixelFormat *); + void (*const RenderFrame)(VideoHwDecoder *, const AVCodecContext *, const AVFrame *); void *(*const GetHwAccelContext)(VideoHwDecoder *); - void (*const SetClock) (VideoHwDecoder *, int64_t); + void (*const SetClock)(VideoHwDecoder *, int64_t); int64_t(*const GetClock) (const VideoHwDecoder *); - void (*const SetClosing) (const VideoHwDecoder *); - void (*const ResetStart) (const VideoHwDecoder *); - void (*const SetTrickSpeed) (const VideoHwDecoder *, int); + void (*const SetClosing)(const VideoHwDecoder *); + void (*const ResetStart)(const VideoHwDecoder *); + void (*const SetTrickSpeed)(const VideoHwDecoder *, int); uint8_t *(*const GrabOutput)(int *, int *, int *, int); - void (*const GetStats) (VideoHwDecoder *, int *, int *, int *, int *, float *); - void (*const SetBackground) (uint32_t); - void (*const SetVideoMode) (void); + void (*const GetStats)(VideoHwDecoder *, int *, int *, int *, int *, float *); + void (*const SetBackground)(uint32_t); + void (*const SetVideoMode)(void); /// module display handler thread - void (*const DisplayHandlerThread) (void); + void (*const DisplayHandlerThread)(void); - void (*const OsdClear) (void); ///< clear OSD + void (*const OsdClear)(void); ///< clear OSD /// draw OSD ARGB area - void (*const OsdDrawARGB) (int, int, int, int, int, const uint8_t *, int, - int); - void (*const OsdInit) (int, int); ///< initialize OSD - void (*const OsdExit) (void); ///< cleanup OSD + void (*const OsdDrawARGB)(int, int, int, int, int, const uint8_t *, int, int); + void (*const OsdInit)(int, int); ///< initialize OSD + void (*const OsdExit)(void); ///< cleanup OSD - int (*const Init) (const char *); ///< initialize video output module - void (*const Exit) (void); ///< cleanup video output module + int (*const Init)(const char *); ///< initialize video output module + void (*const Exit)(void); ///< cleanup video output module } VideoModule; -typedef struct { +typedef struct +{ + /** Left X co-ordinate. Inclusive. */ uint32_t x0; + /** Top Y co-ordinate. Inclusive. */ uint32_t y0; + /** Right X co-ordinate. Exclusive. */ uint32_t x1; + /** Bottom Y co-ordinate. Exclusive. */ uint32_t y1; } VdpRect; //---------------------------------------------------------------------------- -// Defines +// Defines //---------------------------------------------------------------------------- -#define CODEC_SURFACES_MAX 12 // +#define CODEC_SURFACES_MAX 12 // - -#define VIDEO_SURFACES_MAX 6 ///< video output surfaces for queue -//#define OUTPUT_SURFACES_MAX 4 ///< output surfaces for flip page +#define VIDEO_SURFACES_MAX 6 ///< video output surfaces for queue +//#define OUTPUT_SURFACES_MAX 4 ///< output surfaces for flip page #ifdef VAAPI #define PIXEL_FORMAT AV_PIX_FMT_VAAPI #define SWAP_BUFFER_SIZE 3 @@ -344,44 +343,42 @@ typedef struct { #define SWAP_BUFFER_SIZE 1 #endif //---------------------------------------------------------------------------- -// Variables +// Variables //---------------------------------------------------------------------------- -AVBufferRef *HwDeviceContext; ///< ffmpeg HW device context -char VideoIgnoreRepeatPict; ///< disable repeat pict warning - - +AVBufferRef *HwDeviceContext; ///< ffmpeg HW device context +char VideoIgnoreRepeatPict; ///< disable repeat pict warning unsigned char *posd; -static const char *VideoDriverName="cuvid"; ///< video output device -static Display *XlibDisplay; ///< Xlib X11 display -static xcb_connection_t *Connection; ///< xcb connection -static xcb_colormap_t VideoColormap; ///< video colormap -static xcb_window_t VideoWindow; ///< video window -static xcb_screen_t const *VideoScreen; ///< video screen -static uint32_t VideoBlankTick; ///< blank cursor timer -static xcb_pixmap_t VideoCursorPixmap; ///< blank curosr pixmap -static xcb_cursor_t VideoBlankCursor; ///< empty invisible cursor +static const char *VideoDriverName = "cuvid"; ///< video output device +static Display *XlibDisplay; ///< Xlib X11 display +static xcb_connection_t *Connection; ///< xcb connection +static xcb_colormap_t VideoColormap; ///< video colormap +static xcb_window_t VideoWindow; ///< video window +static xcb_screen_t const *VideoScreen; ///< video screen +static uint32_t VideoBlankTick; ///< blank cursor timer +static xcb_pixmap_t VideoCursorPixmap; ///< blank curosr pixmap +static xcb_cursor_t VideoBlankCursor; ///< empty invisible cursor -static int VideoWindowX; ///< video output window x coordinate -static int VideoWindowY; ///< video outout window y coordinate -static unsigned VideoWindowWidth; ///< video output window width -static unsigned VideoWindowHeight; ///< video output window height +static int VideoWindowX; ///< video output window x coordinate +static int VideoWindowY; ///< video outout window y coordinate +static unsigned VideoWindowWidth; ///< video output window width +static unsigned VideoWindowHeight; ///< video output window height -static const VideoModule NoopModule; ///< forward definition of noop module +static const VideoModule NoopModule; ///< forward definition of noop module /// selected video module static const VideoModule *VideoUsedModule = &NoopModule; -signed char VideoHardwareDecoder = -1; ///< flag use hardware decoder +signed char VideoHardwareDecoder = -1; ///< flag use hardware decoder -static char VideoSurfaceModesChanged; ///< flag surface modes changed +static char VideoSurfaceModesChanged; ///< flag surface modes changed /// flag use transparent OSD. static const char VideoTransparentOsd = 1; -static uint32_t VideoBackground; ///< video background color -static char VideoStudioLevels; ///< flag use studio levels +static uint32_t VideoBackground; ///< video background color +static char VideoStudioLevels; ///< flag use studio levels /// Default deinterlace mode. static VideoDeinterlaceModes VideoDeinterlace[VideoResolutionMax]; @@ -419,10 +416,10 @@ static VideoZoomModes Video4to3ZoomMode; /// Default zoom mode for 16:9 and others static VideoZoomModes VideoOtherZoomMode; -static char Video60HzMode; ///< handle 60hz displays -static char VideoSoftStartSync; ///< soft start sync audio/video -static const int VideoSoftStartFrames = 100; ///< soft start frames -static char VideoShowBlackPicture; ///< flag show black picture +static char Video60HzMode; ///< handle 60hz displays +static char VideoSoftStartSync; ///< soft start sync audio/video +static const int VideoSoftStartFrames = 100; ///< soft start frames +static char VideoShowBlackPicture; ///< flag show black picture static float VideoBrightness = 0.0f; static float VideoContrast = 1.0f; @@ -434,103 +431,94 @@ static int VideoScalerTest = 0; static int VideoColorBlindness = 0; static float VideoColorBlindnessFaktor = 1.0f; -static xcb_atom_t WmDeleteWindowAtom; ///< WM delete message atom -static xcb_atom_t NetWmState; ///< wm-state message atom -static xcb_atom_t NetWmStateFullscreen; ///< fullscreen wm-state message atom +static xcb_atom_t WmDeleteWindowAtom; ///< WM delete message atom +static xcb_atom_t NetWmState; ///< wm-state message atom +static xcb_atom_t NetWmStateFullscreen; ///< fullscreen wm-state message atom static xcb_atom_t NetWmStateAbove; #ifdef DEBUG -extern uint32_t VideoSwitch; ///< ticks for channel switch +extern uint32_t VideoSwitch; ///< ticks for channel switch #endif -extern void AudioVideoReady(int64_t); ///< tell audio video is ready - +extern void AudioVideoReady(int64_t); ///< tell audio video is ready #ifdef USE_VIDEO_THREAD -static pthread_t VideoThread; ///< video decode thread -static pthread_cond_t VideoWakeupCond; ///< wakeup condition variable -static pthread_mutex_t VideoMutex; ///< video condition mutex -static pthread_mutex_t VideoLockMutex; ///< video lock mutex -pthread_mutex_t OSDMutex; ///< OSD update mutex +static pthread_t VideoThread; ///< video decode thread +static pthread_cond_t VideoWakeupCond; ///< wakeup condition variable +static pthread_mutex_t VideoMutex; ///< video condition mutex +static pthread_mutex_t VideoLockMutex; ///< video lock mutex +pthread_mutex_t OSDMutex; ///< OSD update mutex #endif +static pthread_t VideoDisplayThread; ///< video display thread +//static pthread_cond_t VideoDisplayWakeupCond; ///< wakeup condition variable +//static pthread_mutex_t VideoDisplayMutex; ///< video condition mutex +//static pthread_mutex_t VideoDisplayLockMutex; ///< video lock mutex -static pthread_t VideoDisplayThread; ///< video display thread -//static pthread_cond_t VideoDisplayWakeupCond; ///< wakeup condition variable -//static pthread_mutex_t VideoDisplayMutex; ///< video condition mutex -//static pthread_mutex_t VideoDisplayLockMutex; ///< video lock mutex +static int OsdConfigWidth; ///< osd configured width +static int OsdConfigHeight; ///< osd configured height +static char OsdShown; ///< flag show osd +static char Osd3DMode; ///< 3D OSD mode +static int OsdWidth; ///< osd width +static int OsdHeight; ///< osd height +static int OsdDirtyX; ///< osd dirty area x +static int OsdDirtyY; ///< osd dirty area y +static int OsdDirtyWidth; ///< osd dirty area width +static int OsdDirtyHeight; ///< osd dirty area height +static void (*VideoEventCallback)(void) = NULL; /// callback function to notify VDR about Video Events - -static int OsdConfigWidth; ///< osd configured width -static int OsdConfigHeight; ///< osd configured height -static char OsdShown; ///< flag show osd -static char Osd3DMode; ///< 3D OSD mode -static int OsdWidth; ///< osd width -static int OsdHeight; ///< osd height -static int OsdDirtyX; ///< osd dirty area x -static int OsdDirtyY; ///< osd dirty area y -static int OsdDirtyWidth; ///< osd dirty area width -static int OsdDirtyHeight; ///< osd dirty area height - - -static void (*VideoEventCallback)(void) = NULL; /// callback function to notify VDR about Video Events - -static int64_t VideoDeltaPTS; ///< FIXME: fix pts +static int64_t VideoDeltaPTS; ///< FIXME: fix pts #ifdef USE_SCREENSAVER -static char DPMSDisabled; ///< flag we have disabled dpms -static char EnableDPMSatBlackScreen; ///< flag we should enable dpms at black screen +static char DPMSDisabled; ///< flag we have disabled dpms +static char EnableDPMSatBlackScreen; ///< flag we should enable dpms at black screen #endif -static int EglEnabled; ///< use EGL -static int GlxVSyncEnabled = 1; ///< enable/disable v-sync - +static int EglEnabled; ///< use EGL +static int GlxVSyncEnabled = 1; ///< enable/disable v-sync #ifdef CUVID - static GLXContext glxSharedContext; ///< shared gl context - static GLXContext glxContext; ///< our gl context +static GLXContext glxSharedContext; ///< shared gl context +static GLXContext glxContext; ///< our gl context #ifdef USE_VIDEO_THREAD - static GLXContext glxThreadContext; ///< our gl context for the thread +static GLXContext glxThreadContext; ///< our gl context for the thread #endif - static XVisualInfo *GlxVisualInfo; ///< our gl visual - static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContext context); - GLXContext OSDcontext; +static XVisualInfo *GlxVisualInfo; ///< our gl visual +static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContext context); +GLXContext OSDcontext; #else - static EGLContext eglSharedContext; ///< shared gl context +static EGLContext eglSharedContext; ///< shared gl context - static EGLContext eglContext; ///< our gl context - static EGLConfig eglConfig; - static EGLDisplay eglDisplay; - static EGLSurface eglSurface,eglOSDSurface; - static EGLint eglAttrs[10]; - static int eglVersion = 2; - static EGLImageKHR (EGLAPIENTRY *CreateImageKHR)(EGLDisplay, EGLContext, - EGLenum, EGLClientBuffer, - const EGLint *); - static EGLBoolean (EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR); - static void (EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES); +static EGLContext eglContext; ///< our gl context +static EGLConfig eglConfig; +static EGLDisplay eglDisplay; +static EGLSurface eglSurface, eglOSDSurface; +static EGLint eglAttrs[10]; +static int eglVersion = 2; +static EGLImageKHR(EGLAPIENTRY * CreateImageKHR) (EGLDisplay, EGLContext, EGLenum, EGLClientBuffer, const EGLint *); +static EGLBoolean(EGLAPIENTRY * DestroyImageKHR) (EGLDisplay, EGLImageKHR); +static void (EGLAPIENTRY * EGLImageTargetTexture2DOES) (GLenum, GLeglImageOES); #ifdef USE_VIDEO_THREAD - static EGLContext eglThreadContext; ///< our gl context for the thread +static EGLContext eglThreadContext; ///< our gl context for the thread #endif - static void GlxSetupWindow(xcb_window_t window, int width, int height, EGLContext context); - EGLContext OSDcontext; +static void GlxSetupWindow(xcb_window_t window, int width, int height, EGLContext context); +EGLContext OSDcontext; #endif -static GLuint OsdGlTextures[2]; ///< gl texture for OSD -static int OsdIndex=0; ///< index into OsdGlTextures - +static GLuint OsdGlTextures[2]; ///< gl texture for OSD +static int OsdIndex = 0; ///< index into OsdGlTextures //---------------------------------------------------------------------------- -// Common Functions +// Common Functions //---------------------------------------------------------------------------- -void VideoThreadLock(void); ///< lock video thread -void VideoThreadUnlock(void); ///< unlock video thread -static void VideoThreadExit(void); ///< exit/kill video thread +void VideoThreadLock(void); ///< lock video thread +void VideoThreadUnlock(void); ///< unlock video thread +static void VideoThreadExit(void); ///< exit/kill video thread #ifdef USE_SCREENSAVER static void X11SuspendScreenSaver(xcb_connection_t *, int); @@ -540,110 +528,105 @@ static void X11DPMSDisable(xcb_connection_t *); #endif /// -/// Update video pts. +/// Update video pts. /// -/// @param pts_p pointer to pts -/// @param interlaced interlaced flag (frame isn't right) -/// @param frame frame to display +/// @param pts_p pointer to pts +/// @param interlaced interlaced flag (frame isn't right) +/// @param frame frame to display /// -/// @note frame->interlaced_frame can't be used for interlace detection +/// @note frame->interlaced_frame can't be used for interlace detection /// -static void VideoSetPts(int64_t * pts_p, int interlaced, - const AVCodecContext * video_ctx, const AVFrame * frame) +static void VideoSetPts(int64_t * pts_p, int interlaced, const AVCodecContext * video_ctx, const AVFrame * frame) { int64_t pts; int duration; // - // Get duration for this frame. - // FIXME: using framerate as workaround for av_frame_get_pkt_duration + // Get duration for this frame. + // FIXME: using framerate as workaround for av_frame_get_pkt_duration // - + // if (video_ctx->framerate.num && video_ctx->framerate.den) { -// duration = 1000 * video_ctx->framerate.den / video_ctx->framerate.num; +// duration = 1000 * video_ctx->framerate.den / video_ctx->framerate.num; // } else { - duration = interlaced ? 40 : 20; // 50Hz -> 20ms default + duration = interlaced ? 40 : 20; // 50Hz -> 20ms default // } -// Debug(4, "video: %d/%d %" PRIx64 " -> %d\n", video_ctx->framerate.den, video_ctx->framerate.num, av_frame_get_pkt_duration(frame), duration); +// Debug(4, "video: %d/%d %" PRIx64 " -> %d\n", video_ctx->framerate.den, video_ctx->framerate.num, av_frame_get_pkt_duration(frame), duration); // update video clock if (*pts_p != (int64_t) AV_NOPTS_VALUE) { - *pts_p += duration * 90; - //Info("video: %s +pts\n", Timestamp2String(*pts_p)); + *pts_p += duration * 90; + //Info("video: %s +pts\n", Timestamp2String(*pts_p)); } //av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp"); //pts = frame->best_effort_timestamp; // pts = frame->pkt_pts; - pts = frame->pts; + pts = frame->pts; if (pts == (int64_t) AV_NOPTS_VALUE || !pts) { - // libav: 0.8pre didn't set pts - pts = frame->pkt_dts; + // libav: 0.8pre didn't set pts + pts = frame->pkt_dts; } // libav: sets only pkt_dts which can be 0 if (pts && pts != (int64_t) AV_NOPTS_VALUE) { - // build a monotonic pts - if (*pts_p != (int64_t) AV_NOPTS_VALUE) { - int64_t delta; + // build a monotonic pts + if (*pts_p != (int64_t) AV_NOPTS_VALUE) { + int64_t delta; - delta = pts - *pts_p; - // ignore negative jumps - if (delta > -600 * 90 && delta <= -40 * 90) { - if (-delta > VideoDeltaPTS) { - VideoDeltaPTS = -delta; - Debug(4,"video: %#012" PRIx64 "->%#012" PRIx64 " delta%+4" PRId64 " pts\n", *pts_p, pts, pts - *pts_p); - } - return; - } - } else { // first new clock value - Debug(3,"++++++++++++++++++++++++++++++++++++starte audio\n"); - AudioVideoReady(pts); - } - if (*pts_p != pts) { - Debug(4,"video: %#012" PRIx64 "->%#012" PRIx64 " delta=%4" PRId64" pts\n", *pts_p, pts, pts - *pts_p); - *pts_p = pts; - } + delta = pts - *pts_p; + // ignore negative jumps + if (delta > -600 * 90 && delta <= -40 * 90) { + if (-delta > VideoDeltaPTS) { + VideoDeltaPTS = -delta; + Debug(4, "video: %#012" PRIx64 "->%#012" PRIx64 " delta%+4" PRId64 " pts\n", *pts_p, pts, + pts - *pts_p); + } + return; + } + } else { // first new clock value + Debug(3, "++++++++++++++++++++++++++++++++++++starte audio\n"); + AudioVideoReady(pts); + } + if (*pts_p != pts) { + Debug(4, "video: %#012" PRIx64 "->%#012" PRIx64 " delta=%4" PRId64 " pts\n", *pts_p, pts, pts - *pts_p); + *pts_p = pts; + } } } int CuvidMessage(int level, const char *format, ...); + /// -/// Update output for new size or aspect ratio. +/// Update output for new size or aspect ratio. /// -/// @param input_aspect_ratio video stream aspect +/// @param input_aspect_ratio video stream aspect /// -static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, - int input_height, VideoResolutions resolution, int video_x, int video_y, - int video_width, int video_height, int *output_x, int *output_y, - int *output_width, int *output_height, int *crop_x, int *crop_y, - int *crop_width, int *crop_height) +static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, int input_height, + VideoResolutions resolution, int video_x, int video_y, int video_width, int video_height, int *output_x, + int *output_y, int *output_width, int *output_height, int *crop_x, int *crop_y, int *crop_width, int *crop_height) { AVRational display_aspect_ratio; AVRational tmp_ratio; if (!input_aspect_ratio.num || !input_aspect_ratio.den) { - input_aspect_ratio.num = 1; - input_aspect_ratio.den = 1; - Debug(3, "video: aspect defaults to %d:%d\n", input_aspect_ratio.num, - input_aspect_ratio.den); + input_aspect_ratio.num = 1; + input_aspect_ratio.den = 1; + Debug(3, "video: aspect defaults to %d:%d\n", input_aspect_ratio.num, input_aspect_ratio.den); } - av_reduce(&input_aspect_ratio.num, &input_aspect_ratio.den, - input_width * input_aspect_ratio.num, - input_height * input_aspect_ratio.den, 1024 * 1024); + av_reduce(&input_aspect_ratio.num, &input_aspect_ratio.den, input_width * input_aspect_ratio.num, + input_height * input_aspect_ratio.den, 1024 * 1024); // InputWidth/Height can be zero = uninitialized if (!input_aspect_ratio.num || !input_aspect_ratio.den) { - input_aspect_ratio.num = 1; - input_aspect_ratio.den = 1; + input_aspect_ratio.num = 1; + input_aspect_ratio.den = 1; } - display_aspect_ratio.num = - VideoScreen->width_in_pixels * VideoScreen->height_in_millimeters; - display_aspect_ratio.den = - VideoScreen->height_in_pixels * VideoScreen->width_in_millimeters; + display_aspect_ratio.num = VideoScreen->width_in_pixels * VideoScreen->height_in_millimeters; + display_aspect_ratio.den = VideoScreen->height_in_pixels * VideoScreen->width_in_millimeters; display_aspect_ratio = av_mul_q(input_aspect_ratio, display_aspect_ratio); - Debug(3, "video: aspect %d:%d Resolution %d\n", display_aspect_ratio.num, display_aspect_ratio.den, resolution); + Debug(3, "video: aspect %d:%d Resolution %d\n", display_aspect_ratio.num, display_aspect_ratio.den, resolution); *crop_x = VideoCutLeftRight[resolution]; *crop_y = VideoCutTopBottom[resolution]; @@ -654,48 +637,52 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, tmp_ratio.num = 4; tmp_ratio.den = 3; #ifdef DEBUG - Debug(4, "ratio: %d:%d %d:%d\n", input_aspect_ratio.num, input_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); + Debug(4, "ratio: %d:%d %d:%d\n", input_aspect_ratio.num, input_aspect_ratio.den, display_aspect_ratio.num, + display_aspect_ratio.den); #endif if (!av_cmp_q(input_aspect_ratio, tmp_ratio)) { - switch (Video4to3ZoomMode) { - case VideoNormal: - goto normal; - case VideoStretch: - goto stretch; - case VideoCenterCutOut: - goto center_cut_out; - case VideoAnamorphic: - // FIXME: rest should be done by hardware - goto stretch; - } + switch (Video4to3ZoomMode) { + case VideoNormal: + goto normal; + case VideoStretch: + goto stretch; + case VideoCenterCutOut: + goto center_cut_out; + case VideoAnamorphic: + // FIXME: rest should be done by hardware + goto stretch; + } } switch (VideoOtherZoomMode) { - case VideoNormal: - goto normal; - case VideoStretch: - goto stretch; - case VideoCenterCutOut: - goto center_cut_out; - case VideoAnamorphic: - // FIXME: rest should be done by hardware - goto stretch; + case VideoNormal: + goto normal; + case VideoStretch: + goto stretch; + case VideoCenterCutOut: + goto center_cut_out; + case VideoAnamorphic: + // FIXME: rest should be done by hardware + goto stretch; } normal: *output_x = video_x; *output_y = video_y; - *output_width = (video_height * display_aspect_ratio.num + display_aspect_ratio.den -1 ) / display_aspect_ratio.den; - *output_height = (video_width * display_aspect_ratio.den + display_aspect_ratio.num -1 ) / display_aspect_ratio.num; + *output_width = + (video_height * display_aspect_ratio.num + display_aspect_ratio.den - 1) / display_aspect_ratio.den; + *output_height = + (video_width * display_aspect_ratio.den + display_aspect_ratio.num - 1) / display_aspect_ratio.num; // JOJO if (*output_width > video_width) { - *output_width = video_width; - *output_y += (video_height - *output_height) / 2; + *output_width = video_width; + *output_y += (video_height - *output_height) / 2; } else if (*output_height > video_height) { - *output_height = video_height; - *output_x += (video_width - *output_width) / 2; + *output_height = video_height; + *output_x += (video_width - *output_width) / 2; } - - CuvidMessage(2, "video: normal aspect output %dx%d%+d%+d Video %dx%d\n", *output_width, *output_height, *output_x, *output_y,video_width,video_height); + + CuvidMessage(2, "video: normal aspect output %dx%d%+d%+d Video %dx%d\n", *output_width, *output_height, *output_x, + *output_y, video_width, video_height); return; stretch: @@ -703,8 +690,7 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, *output_y = video_y; *output_width = video_width; *output_height = video_height; - Debug(3, "video: stretch output %dx%d%+d%+d\n", *output_width, - *output_height, *output_x, *output_y); + Debug(3, "video: stretch output %dx%d%+d%+d\n", *output_width, *output_height, *output_x, *output_y); return; center_cut_out: @@ -713,51 +699,47 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, *output_height = video_height; *output_width = video_width; - *crop_width = - (video_height * display_aspect_ratio.num + display_aspect_ratio.den - - 1) / display_aspect_ratio.den; - *crop_height = - (video_width * display_aspect_ratio.den + display_aspect_ratio.num - - 1) / display_aspect_ratio.num; + *crop_width = (video_height * display_aspect_ratio.num + display_aspect_ratio.den - 1) / display_aspect_ratio.den; + *crop_height = (video_width * display_aspect_ratio.den + display_aspect_ratio.num - 1) / display_aspect_ratio.num; // look which side must be cut if (*crop_width > video_width) { - int tmp; + int tmp; - *crop_height = input_height - VideoCutTopBottom[resolution] * 2; + *crop_height = input_height - VideoCutTopBottom[resolution] * 2; - // adjust scaling - tmp = ((*crop_width - video_width) * input_width) / (2 * video_width); - // FIXME: round failure? - if (tmp > *crop_x) { - *crop_x = tmp; - } - *crop_width = input_width - *crop_x * 2; + // adjust scaling + tmp = ((*crop_width - video_width) * input_width) / (2 * video_width); + // FIXME: round failure? + if (tmp > *crop_x) { + *crop_x = tmp; + } + *crop_width = input_width - *crop_x * 2; } else if (*crop_height > video_height) { - int tmp; + int tmp; - *crop_width = input_width - VideoCutLeftRight[resolution] * 2; + *crop_width = input_width - VideoCutLeftRight[resolution] * 2; - // adjust scaling - tmp = ((*crop_height - video_height) * input_height) - / (2 * video_height); - // FIXME: round failure? - if (tmp > *crop_y) { - *crop_y = tmp; - } - *crop_height = input_height - *crop_y * 2; + // adjust scaling + tmp = ((*crop_height - video_height) * input_height) + / (2 * video_height); + // FIXME: round failure? + if (tmp > *crop_y) { + *crop_y = tmp; + } + *crop_height = input_height - *crop_y * 2; } else { - *crop_width = input_width - VideoCutLeftRight[resolution] * 2; - *crop_height = input_height - VideoCutTopBottom[resolution] * 2; + *crop_width = input_width - VideoCutLeftRight[resolution] * 2; + *crop_height = input_height - VideoCutTopBottom[resolution] * 2; } - Debug(3, "video: aspect crop %dx%d%+d%+d\n", *crop_width, *crop_height, - *crop_x, *crop_y); + Debug(3, "video: aspect crop %dx%d%+d%+d\n", *crop_width, *crop_height, *crop_x, *crop_y); return; } - -static uint64_t test_time=0; + +static uint64_t test_time = 0; + /// -/// Lock video thread. +/// Lock video thread. /// #define VideoThreadLock(void)\ {\ @@ -767,10 +749,10 @@ static uint64_t test_time=0; }\ }\ } -// test_time = GetusTicks(); -// printf("Lock start...."); +// test_time = GetusTicks(); +// printf("Lock start...."); /// -/// Unlock video thread. +/// Unlock video thread. /// #define VideoThreadUnlock(void)\ {\ @@ -780,16 +762,16 @@ static uint64_t test_time=0; }\ }\ } -// printf("Video Locked for %d\n",(GetusTicks()-test_time)/1000); +// printf("Video Locked for %d\n",(GetusTicks()-test_time)/1000); //---------------------------------------------------------------------------- -// GLX +// GLX //---------------------------------------------------------------------------- #ifdef USE_GLX /// -/// GLX extension functions +/// GLX extension functions ///@{ #ifdef GLX_MESA_swap_control static PFNGLXSWAPINTERVALMESAPROC GlxSwapIntervalMESA; @@ -802,7 +784,7 @@ static PFNGLXSWAPINTERVALSGIPROC GlxSwapIntervalSGI; #endif /// -/// GLX check error. +/// GLX check error. /// #define GlxCheck(void)\ {\ @@ -815,28 +797,43 @@ static PFNGLXSWAPINTERVALSGIPROC GlxSwapIntervalSGI; char *eglErrorString(EGLint error) { - switch(error) - { - case EGL_SUCCESS: return "No error"; - case EGL_NOT_INITIALIZED: return "EGL not initialized or failed to initialize"; - case EGL_BAD_ACCESS: return "Resource inaccessible"; - case EGL_BAD_ALLOC: return "Cannot allocate resources"; - case EGL_BAD_ATTRIBUTE: return "Unrecognized attribute or attribute value"; - case EGL_BAD_CONTEXT: return "Invalid EGL context"; - case EGL_BAD_CONFIG: return "Invalid EGL frame buffer configuration"; - case EGL_BAD_CURRENT_SURFACE: return "Current surface is no longer valid"; - case EGL_BAD_DISPLAY: return "Invalid EGL display"; - case EGL_BAD_SURFACE: return "Invalid surface"; - case EGL_BAD_MATCH: return "Inconsistent arguments"; - case EGL_BAD_PARAMETER: return "Invalid argument"; - case EGL_BAD_NATIVE_PIXMAP: return "Invalid native pixmap"; - case EGL_BAD_NATIVE_WINDOW: return "Invalid native window"; - case EGL_CONTEXT_LOST: return "Context lost"; + switch (error) { + case EGL_SUCCESS: + return "No error"; + case EGL_NOT_INITIALIZED: + return "EGL not initialized or failed to initialize"; + case EGL_BAD_ACCESS: + return "Resource inaccessible"; + case EGL_BAD_ALLOC: + return "Cannot allocate resources"; + case EGL_BAD_ATTRIBUTE: + return "Unrecognized attribute or attribute value"; + case EGL_BAD_CONTEXT: + return "Invalid EGL context"; + case EGL_BAD_CONFIG: + return "Invalid EGL frame buffer configuration"; + case EGL_BAD_CURRENT_SURFACE: + return "Current surface is no longer valid"; + case EGL_BAD_DISPLAY: + return "Invalid EGL display"; + case EGL_BAD_SURFACE: + return "Invalid surface"; + case EGL_BAD_MATCH: + return "Inconsistent arguments"; + case EGL_BAD_PARAMETER: + return "Invalid argument"; + case EGL_BAD_NATIVE_PIXMAP: + return "Invalid native pixmap"; + case EGL_BAD_NATIVE_WINDOW: + return "Invalid native window"; + case EGL_CONTEXT_LOST: + return "Context lost"; } return "Unknown error "; } + /// -/// egl check error. +/// egl check error. /// #define EglCheck(void) \ {\ @@ -851,54 +848,52 @@ char *eglErrorString(EGLint error) void OSD_get_shared_context() { - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); -// EglCheck(); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); +// EglCheck(); } void OSD_get_context() { - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, OSDcontext); -// EglCheck(); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, OSDcontext); +// EglCheck(); } - + void OSD_release_context() { - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); -// EglCheck(); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); +// EglCheck(); } - + #endif /// -/// GLX check if a GLX extension is supported. +/// GLX check if a GLX extension is supported. /// -/// @param ext extension to query -/// @returns true if supported, false otherwise +/// @param ext extension to query +/// @returns true if supported, false otherwise /// static int GlxIsExtensionSupported(const char *ext) { const char *extensions; - if ((extensions = - glXQueryExtensionsString(XlibDisplay, - DefaultScreen(XlibDisplay)))) { - const char *s; - int l; + if ((extensions = glXQueryExtensionsString(XlibDisplay, DefaultScreen(XlibDisplay)))) { + const char *s; + int l; - s = strstr(extensions, ext); - l = strlen(ext); - return s && (s[l] == ' ' || s[l] == '\0'); + s = strstr(extensions, ext); + l = strlen(ext); + return s && (s[l] == ' ' || s[l] == '\0'); } return 0; } /// -/// Setup GLX window. +/// Setup GLX window. /// -/// @param window xcb window id -/// @param width window width -/// @param height window height -/// @param context GLX context +/// @param window xcb window id +/// @param width window width +/// @param height window height +/// @param context GLX context /// #ifdef CUVID static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContext context) @@ -913,18 +908,18 @@ static void GlxSetupWindow(xcb_window_t window, int width, int height, EGLContex unsigned count; #ifdef PLACEBO_ - return; + return; #endif - + Debug(3, "video/egl: %s %x %dx%d context: %p", __FUNCTION__, window, width, height, context); // set gl context #ifdef CUVID - if (!glXMakeCurrent(XlibDisplay, window, context)) { - Fatal(_("video/egl: GlxSetupWindow can't make egl/glx context current\n")); - EglEnabled = 0; - return; - } + if (!glXMakeCurrent(XlibDisplay, window, context)) { + Fatal(_("video/egl: GlxSetupWindow can't make egl/glx context current\n")); + EglEnabled = 0; + return; + } #endif Debug(3, "video/egl: ok\n"); @@ -932,66 +927,68 @@ static void GlxSetupWindow(xcb_window_t window, int width, int height, EGLContex // check if v-sync is working correct end = GetMsTicks(); for (i = 0; i < 10; ++i) { - start = end; - - glClear(GL_COLOR_BUFFER_BIT); - glXSwapBuffers(XlibDisplay, window); - end = GetMsTicks(); - - GlxGetVideoSyncSGI(&count); - Debug(4, "video/glx: %5d frame rate %dms\n", count, end - start); - // nvidia can queue 5 swaps - if (i > 5 && (end - start) < 15) { - Warning(_("video/glx: no v-sync\n")); - } + start = end; + + glClear(GL_COLOR_BUFFER_BIT); + glXSwapBuffers(XlibDisplay, window); + end = GetMsTicks(); + + GlxGetVideoSyncSGI(&count); + Debug(4, "video/glx: %5d frame rate %dms\n", count, end - start); + // nvidia can queue 5 swaps + if (i > 5 && (end - start) < 15) { + Warning(_("video/glx: no v-sync\n")); + } } - GLenum err = glewInit(); - if (err != GLEW_OK) { - Debug(3,"Error: %s\n", glewGetErrorString(err)); - } - GlxCheck(); + GLenum err = glewInit(); + + if (err != GLEW_OK) { + Debug(3, "Error: %s\n", glewGetErrorString(err)); + } + GlxCheck(); #endif // viewpoint glViewport(0, 0, width, height); - GlxCheck(); + GlxCheck(); #ifdef VAAPI - OSD_release_context(); + OSD_release_context(); #endif } /// -/// Initialize GLX. +/// Initialize GLX. /// #ifdef CUVID static void EglInit(void) { - XVisualInfo *vi=NULL; + XVisualInfo *vi = NULL; + #ifdef PLACEBO - return; + return; #endif - - //The desired 30-bit color visual - int attributeList10[] = { - GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, - GLX_RENDER_TYPE, GLX_RGBA_BIT, - GLX_DOUBLEBUFFER, True, - GLX_RED_SIZE, 10, /*10bits for R */ - GLX_GREEN_SIZE, 10, /*10bits for G */ - GLX_BLUE_SIZE, 10, /*10bits for B */ - None - }; - int attributeList[] = { - GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, - GLX_RENDER_TYPE, GLX_RGBA_BIT, - GLX_DOUBLEBUFFER, True, - GLX_RED_SIZE, 8, /*8 bits for R */ - GLX_GREEN_SIZE, 8, /*8 bits for G */ - GLX_BLUE_SIZE, 8, /*8 bits for B */ - None - }; - int fbcount; - + + //The desired 30-bit color visual + int attributeList10[] = { + GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, + GLX_RENDER_TYPE, GLX_RGBA_BIT, + GLX_DOUBLEBUFFER, True, + GLX_RED_SIZE, 10, /*10bits for R */ + GLX_GREEN_SIZE, 10, /*10bits for G */ + GLX_BLUE_SIZE, 10, /*10bits for B */ + None + }; + int attributeList[] = { + GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, + GLX_RENDER_TYPE, GLX_RGBA_BIT, + GLX_DOUBLEBUFFER, True, + GLX_RED_SIZE, 8, /*8 bits for R */ + GLX_GREEN_SIZE, 8, /*8 bits for G */ + GLX_BLUE_SIZE, 8, /*8 bits for B */ + None + }; + int fbcount; + GLXContext context; int major; int minor; @@ -999,422 +996,423 @@ static void EglInit(void) int glx_GLX_MESA_swap_control; int glx_GLX_SGI_swap_control; int glx_GLX_SGI_video_sync; - GLXFBConfig *fbc; - int redSize, greenSize, blueSize; - + GLXFBConfig *fbc; + int redSize, greenSize, blueSize; + if (!glXQueryVersion(XlibDisplay, &major, &minor)) { - Fatal(_("video/glx: no GLX support\n")); + Fatal(_("video/glx: no GLX support\n")); } Info(_("video/glx: glx version %d.%d\n"), major, minor); - + // - // check which extension are supported + // check which extension are supported // glx_GLX_EXT_swap_control = GlxIsExtensionSupported("GLX_EXT_swap_control"); - glx_GLX_MESA_swap_control = - GlxIsExtensionSupported("GLX_MESA_swap_control"); + glx_GLX_MESA_swap_control = GlxIsExtensionSupported("GLX_MESA_swap_control"); glx_GLX_SGI_swap_control = GlxIsExtensionSupported("GLX_SGI_swap_control"); glx_GLX_SGI_video_sync = GlxIsExtensionSupported("GLX_SGI_video_sync"); #ifdef GLX_MESA_swap_control if (glx_GLX_MESA_swap_control) { - GlxSwapIntervalMESA = (PFNGLXSWAPINTERVALMESAPROC) - glXGetProcAddress((const GLubyte *)"glXSwapIntervalMESA"); + GlxSwapIntervalMESA = (PFNGLXSWAPINTERVALMESAPROC) + glXGetProcAddress((const GLubyte *)"glXSwapIntervalMESA"); } Debug(3, "video/glx: GlxSwapIntervalMESA=%p\n", GlxSwapIntervalMESA); #endif #ifdef GLX_SGI_swap_control if (glx_GLX_SGI_swap_control) { - GlxSwapIntervalSGI = (PFNGLXSWAPINTERVALSGIPROC) - glXGetProcAddress((const GLubyte *)"wglSwapIntervalEXT"); + GlxSwapIntervalSGI = (PFNGLXSWAPINTERVALSGIPROC) + glXGetProcAddress((const GLubyte *)"wglSwapIntervalEXT"); } Debug(3, "video/glx: GlxSwapIntervalSGI=%p\n", GlxSwapIntervalSGI); #endif #ifdef GLX_SGI_video_sync if (glx_GLX_SGI_video_sync) { - GlxGetVideoSyncSGI = (PFNGLXGETVIDEOSYNCSGIPROC) - glXGetProcAddress((const GLubyte *)"glXGetVideoSyncSGI"); + GlxGetVideoSyncSGI = (PFNGLXGETVIDEOSYNCSGIPROC) + glXGetProcAddress((const GLubyte *)"glXGetVideoSyncSGI"); } Debug(3, "video/glx: GlxGetVideoSyncSGI=%p\n", GlxGetVideoSyncSGI); #endif // create glx context glXMakeCurrent(XlibDisplay, None, NULL); - - fbc = glXChooseFBConfig(XlibDisplay, DefaultScreen(XlibDisplay),attributeList10,&fbcount); // try 10 Bit - if (fbc==NULL) { - fbc = glXChooseFBConfig(XlibDisplay, DefaultScreen(XlibDisplay),attributeList,&fbcount); // fall back to 8 Bit - if (fbc==NULL) - Fatal(_("did not get FBconfig")); - } - vi = glXGetVisualFromFBConfig(XlibDisplay, fbc[0]); - - glXGetFBConfigAttrib(XlibDisplay, fbc[0], GLX_RED_SIZE, &redSize); - glXGetFBConfigAttrib(XlibDisplay, fbc[0], GLX_GREEN_SIZE, &greenSize); - glXGetFBConfigAttrib(XlibDisplay, fbc[0], GLX_BLUE_SIZE, &blueSize); - - Debug(3,"RGB size %d:%d:%d\n",redSize, greenSize, blueSize); - Debug(3, "Chosen visual ID = 0x%x\n", vi->visualid ); + fbc = glXChooseFBConfig(XlibDisplay, DefaultScreen(XlibDisplay), attributeList10, &fbcount); // try 10 Bit + if (fbc == NULL) { + fbc = glXChooseFBConfig(XlibDisplay, DefaultScreen(XlibDisplay), attributeList, &fbcount); // fall back to 8 Bit + if (fbc == NULL) + Fatal(_("did not get FBconfig")); + } + + vi = glXGetVisualFromFBConfig(XlibDisplay, fbc[0]); + + glXGetFBConfigAttrib(XlibDisplay, fbc[0], GLX_RED_SIZE, &redSize); + glXGetFBConfigAttrib(XlibDisplay, fbc[0], GLX_GREEN_SIZE, &greenSize); + glXGetFBConfigAttrib(XlibDisplay, fbc[0], GLX_BLUE_SIZE, &blueSize); + + Debug(3, "RGB size %d:%d:%d\n", redSize, greenSize, blueSize); + Debug(3, "Chosen visual ID = 0x%x\n", vi->visualid); context = glXCreateContext(XlibDisplay, vi, NULL, GL_TRUE); if (!context) { - Fatal(_("video/glx: can't create glx context\n")); + Fatal(_("video/glx: can't create glx context\n")); } glxSharedContext = context; context = glXCreateContext(XlibDisplay, vi, glxSharedContext, GL_TRUE); if (!context) { - Fatal(_("video/glx: can't create glx context\n")); + Fatal(_("video/glx: can't create glx context\n")); } glxContext = context; - - EglEnabled = 1; + + EglEnabled = 1; GlxVisualInfo = vi; Debug(3, "video/glx: visual %#02x depth %u\n", (unsigned)vi->visualid, vi->depth); // - // query default v-sync state + // query default v-sync state // if (glx_GLX_EXT_swap_control) { - unsigned tmp; + unsigned tmp; - tmp = -1; - glXQueryDrawable(XlibDisplay, DefaultRootWindow(XlibDisplay), GLX_SWAP_INTERVAL_EXT, &tmp); - GlxCheck(); + tmp = -1; + glXQueryDrawable(XlibDisplay, DefaultRootWindow(XlibDisplay), GLX_SWAP_INTERVAL_EXT, &tmp); + GlxCheck(); - Debug(3, "video/glx: default v-sync is %d\n", tmp); + Debug(3, "video/glx: default v-sync is %d\n", tmp); } else { - Debug(3, "video/glx: default v-sync is unknown\n"); + Debug(3, "video/glx: default v-sync is unknown\n"); } // - // disable wait on v-sync + // disable wait on v-sync // // FIXME: sleep before swap / busy waiting hardware // FIXME: 60hz lcd panel // FIXME: config: default, on, off #ifdef GLX_SGI_swap_control if (GlxVSyncEnabled < 0 && GlxSwapIntervalSGI) { - if (GlxSwapIntervalSGI(0)) { - GlxCheck(); - Warning(_("video/glx: can't disable v-sync\n")); - } else { - Info(_("video/glx: v-sync disabled\n")); - } + if (GlxSwapIntervalSGI(0)) { + GlxCheck(); + Warning(_("video/glx: can't disable v-sync\n")); + } else { + Info(_("video/glx: v-sync disabled\n")); + } } else #endif #ifdef GLX_MESA_swap_control if (GlxVSyncEnabled < 0 && GlxSwapIntervalMESA) { - if (GlxSwapIntervalMESA(0)) { - GlxCheck(); - Warning(_("video/glx: can't disable v-sync\n")); - } else { - Info(_("video/glx: v-sync disabled\n")); - } + if (GlxSwapIntervalMESA(0)) { + GlxCheck(); + Warning(_("video/glx: can't disable v-sync\n")); + } else { + Info(_("video/glx: v-sync disabled\n")); + } } #endif // - // enable wait on v-sync + // enable wait on v-sync // #ifdef GLX_SGI_swap_control if (GlxVSyncEnabled > 0 && GlxSwapIntervalMESA) { - if (GlxSwapIntervalMESA(1)) { - GlxCheck(); - Warning(_("video/glx: can't enable v-sync\n")); - } else { - Info(_("video/glx: v-sync enabled\n")); - } + if (GlxSwapIntervalMESA(1)) { + GlxCheck(); + Warning(_("video/glx: can't enable v-sync\n")); + } else { + Info(_("video/glx: v-sync enabled\n")); + } } else #endif #ifdef GLX_MESA_swap_control if (GlxVSyncEnabled > 0 && GlxSwapIntervalSGI) { - if (GlxSwapIntervalSGI(1)) { - GlxCheck(); - Warning(_("video/glx: SGI can't enable v-sync\n")); - } else { - Info(_("video/glx: SGI v-sync enabled\n")); - } + if (GlxSwapIntervalSGI(1)) { + GlxCheck(); + Warning(_("video/glx: SGI can't enable v-sync\n")); + } else { + Info(_("video/glx: SGI v-sync enabled\n")); + } } #endif } -#else // VAAPI +#else // VAAPI static void EglInit(void) { - int redSize, greenSize, blueSize, alphaSize; - + int redSize, greenSize, blueSize, alphaSize; + #ifdef PLACEBO - return; + return; #endif - EGLContext context; + EGLContext context; + // create egl context make_egl(); - GLenum err = glewInit(); - if (err != GLEW_OK) { - Debug(3,"Error: %s\n", glewGetErrorString(err)); - } - - eglGetConfigAttrib(eglDisplay, eglConfig, EGL_BLUE_SIZE, &blueSize); - eglGetConfigAttrib(eglDisplay, eglConfig, EGL_RED_SIZE, &redSize); - eglGetConfigAttrib(eglDisplay, eglConfig, EGL_GREEN_SIZE, &greenSize); - eglGetConfigAttrib(eglDisplay, eglConfig, EGL_ALPHA_SIZE, &alphaSize); - Debug(3,"RGB size %d:%d:%d Alpha %d\n",redSize, greenSize, blueSize,alphaSize); - + GLenum err = glewInit(); + + if (err != GLEW_OK) { + Debug(3, "Error: %s\n", glewGetErrorString(err)); + } + + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_BLUE_SIZE, &blueSize); + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_RED_SIZE, &redSize); + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_GREEN_SIZE, &greenSize); + eglGetConfigAttrib(eglDisplay, eglConfig, EGL_ALPHA_SIZE, &alphaSize); + Debug(3, "RGB size %d:%d:%d Alpha %d\n", redSize, greenSize, blueSize, alphaSize); + eglSharedContext = eglContext; - + context = eglCreateContext(eglDisplay, eglConfig, eglSharedContext, eglAttrs); - EglCheck(); + EglCheck(); if (!context) { - Fatal(_("video/egl: can't create egl context\n")); + Fatal(_("video/egl: can't create egl context\n")); } eglContext = context; -} +} #endif - /// -/// Cleanup GLX. +/// Cleanup GLX. /// static void EglExit(void) { Debug(3, "video/egl: %s\n", __FUNCTION__); #ifdef PLACEBO - return; + return; #endif - + glFinish(); // must destroy contet #ifdef CUVID - // must destroy glx + // must destroy glx // if (glXGetCurrentContext() == glxContext) { - // if currently used, set to none - glXMakeCurrent(XlibDisplay, None, NULL); + // if currently used, set to none + glXMakeCurrent(XlibDisplay, None, NULL); // } if (OSDcontext) { - glXDestroyContext(XlibDisplay, OSDcontext); - GlxCheck(); - OSDcontext = NULL; + glXDestroyContext(XlibDisplay, OSDcontext); + GlxCheck(); + OSDcontext = NULL; } if (glxContext) { - glXDestroyContext(XlibDisplay, glxContext); - GlxCheck(); - glxContext = NULL; + glXDestroyContext(XlibDisplay, glxContext); + GlxCheck(); + glxContext = NULL; } if (glxThreadContext) { - glXDestroyContext(XlibDisplay, glxThreadContext); - GlxCheck(); - glxThreadContext = NULL; + glXDestroyContext(XlibDisplay, glxThreadContext); + GlxCheck(); + glxThreadContext = NULL; } if (glxSharedContext) { - glXDestroyContext(XlibDisplay, glxSharedContext); - GlxCheck(); - glxSharedContext = NULL; - } + glXDestroyContext(XlibDisplay, glxSharedContext); + GlxCheck(); + glxSharedContext = NULL; + } #else if (eglGetCurrentContext() == eglContext) { - // if currently used, set to none - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); + // if currently used, set to none + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); } if (eglSharedContext) { - eglDestroyContext(eglDisplay, eglSharedContext); - EglCheck(); + eglDestroyContext(eglDisplay, eglSharedContext); + EglCheck(); } if (eglContext) { - eglDestroyContext(eglDisplay, eglContext); - EglCheck(); + eglDestroyContext(eglDisplay, eglContext); + EglCheck(); } if (eglThreadContext) { - eglDestroyContext(eglDisplay, eglThreadContext); - EglCheck(); + eglDestroyContext(eglDisplay, eglThreadContext); + EglCheck(); } - eglTerminate(eglDisplay); + eglTerminate(eglDisplay); #endif } #endif //---------------------------------------------------------------------------- -// common functions +// common functions //---------------------------------------------------------------------------- /// -/// Calculate resolution group. +/// Calculate resolution group. /// -/// @param width video picture raw width -/// @param height video picture raw height -/// @param interlace flag interlaced video picture +/// @param width video picture raw width +/// @param height video picture raw height +/// @param interlace flag interlaced video picture /// -/// @note interlace isn't used yet and probably wrong set by caller. +/// @note interlace isn't used yet and probably wrong set by caller. /// -static VideoResolutions VideoResolutionGroup(int width, int height, - __attribute__ ((unused)) +static VideoResolutions VideoResolutionGroup(int width, int height, __attribute__((unused)) int interlace) { if (height == 2160) { - return VideoResolutionUHD; + return VideoResolutionUHD; } if (height <= 576) { - return VideoResolution576i; + return VideoResolution576i; } if (height <= 720) { - return VideoResolution720p; + return VideoResolution720p; } if (height < 1080) { - return VideoResolutionFake1080i; + return VideoResolutionFake1080i; } if (width < 1920) { - return VideoResolutionFake1080i; + return VideoResolutionFake1080i; } return VideoResolution1080i; } //---------------------------------------------------------------------------- -// CUVID +// CUVID //---------------------------------------------------------------------------- #ifdef USE_CUVID #ifdef PLACEBO -struct ext_buf { +struct ext_buf +{ int fd; #ifdef CUVID CUexternalMemory mem; - CUmipmappedArray mma; - CUexternalSemaphore ss; - CUexternalSemaphore ws; - const struct pl_sysnc *sysnc; + CUmipmappedArray mma; + CUexternalSemaphore ss; + CUexternalSemaphore ws; + const struct pl_sysnc *sysnc; #endif }; #endif #ifdef VAAPI -static VADisplay *VaDisplay; ///< VA-API display +static VADisplay *VaDisplay; ///< VA-API display #endif /// -/// CUVID decoder +/// CUVID decoder /// typedef struct _cuvid_decoder_ { -#ifdef VAAPI - VADisplay *VaDisplay; ///< VA-API display +#ifdef VAAPI + VADisplay *VaDisplay; ///< VA-API display #endif - xcb_window_t Window; ///< output window + xcb_window_t Window; ///< output window - int VideoX; ///< video base x coordinate - int VideoY; ///< video base y coordinate - int VideoWidth; ///< video base width - int VideoHeight; ///< video base height + int VideoX; ///< video base x coordinate + int VideoY; ///< video base y coordinate + int VideoWidth; ///< video base width + int VideoHeight; ///< video base height - int OutputX; ///< real video output x coordinate - int OutputY; ///< real video output y coordinate - int OutputWidth; ///< real video output width - int OutputHeight; ///< real video output height + int OutputX; ///< real video output x coordinate + int OutputY; ///< real video output y coordinate + int OutputWidth; ///< real video output width + int OutputHeight; ///< real video output height - enum AVPixelFormat PixFmt; ///< ffmpeg frame pixfmt - enum AVColorSpace ColorSpace; /// ffmpeg ColorSpace - enum AVColorTransferCharacteristic trc; // - enum AVColorPrimaries color_primaries; - int WrongInterlacedWarned; ///< warning about interlace flag issued - int Interlaced; ///< ffmpeg interlaced flag - int TopFieldFirst; ///< ffmpeg top field displayed first + enum AVPixelFormat PixFmt; ///< ffmpeg frame pixfmt + enum AVColorSpace ColorSpace; /// ffmpeg ColorSpace + enum AVColorTransferCharacteristic trc; // + enum AVColorPrimaries color_primaries; + int WrongInterlacedWarned; ///< warning about interlace flag issued + int Interlaced; ///< ffmpeg interlaced flag + int TopFieldFirst; ///< ffmpeg top field displayed first - int InputWidth; ///< video input width - int InputHeight; ///< video input height - AVRational InputAspect; ///< video input aspect ratio - VideoResolutions Resolution; ///< resolution group + int InputWidth; ///< video input width + int InputHeight; ///< video input height + AVRational InputAspect; ///< video input aspect ratio + VideoResolutions Resolution; ///< resolution group - int CropX; ///< video crop x - int CropY; ///< video crop y - int CropWidth; ///< video crop width - int CropHeight; ///< video crop height + int CropX; ///< video crop x + int CropY; ///< video crop y + int CropWidth; ///< video crop width + int CropHeight; ///< video crop height - int grabwidth,grabheight,grab; // Grab Data - void *grabbase; + int grabwidth, grabheight, grab; // Grab Data + void *grabbase; - int SurfacesNeeded; ///< number of surface to request - int SurfaceUsedN; ///< number of used video surfaces + int SurfacesNeeded; ///< number of surface to request + int SurfaceUsedN; ///< number of used video surfaces /// used video surface ids int SurfacesUsed[CODEC_SURFACES_MAX]; - int SurfaceFreeN; ///< number of free video surfaces + int SurfaceFreeN; ///< number of free video surfaces /// free video surface ids int SurfacesFree[CODEC_SURFACES_MAX]; /// video surface ring buffer int SurfacesRb[VIDEO_SURFACES_MAX]; -// CUcontext cuda_ctx; +// CUcontext cuda_ctx; -// cudaStream_t stream; // make my own cuda stream -// CUgraphicsResource cuResource; - int SurfaceWrite; ///< write pointer - int SurfaceRead; ///< read pointer - atomic_t SurfacesFilled; ///< how many of the buffer is used - AVFrame *frames[CODEC_SURFACES_MAX+1]; -#ifdef CUVID - CUarray cu_array[CODEC_SURFACES_MAX+1][2]; - CUgraphicsResource cu_res[CODEC_SURFACES_MAX+1][2]; - CUcontext cuda_ctx; -#endif - GLuint gl_textures[(CODEC_SURFACES_MAX+1)*2]; // where we will copy the CUDA result +// cudaStream_t stream; // make my own cuda stream +// CUgraphicsResource cuResource; + int SurfaceWrite; ///< write pointer + int SurfaceRead; ///< read pointer + atomic_t SurfacesFilled; ///< how many of the buffer is used + AVFrame *frames[CODEC_SURFACES_MAX + 1]; +#ifdef CUVID + CUarray cu_array[CODEC_SURFACES_MAX + 1][2]; + CUgraphicsResource cu_res[CODEC_SURFACES_MAX + 1][2]; + CUcontext cuda_ctx; +#endif + GLuint gl_textures[(CODEC_SURFACES_MAX + 1) * 2]; // where we will copy the CUDA result #ifdef VAAPI - EGLImageKHR images[(CODEC_SURFACES_MAX+1)*2]; - int fds[(CODEC_SURFACES_MAX+1)*2]; + EGLImageKHR images[(CODEC_SURFACES_MAX + 1) * 2]; + int fds[(CODEC_SURFACES_MAX + 1) * 2]; #endif #ifdef PLACEBO - struct pl_image pl_images[CODEC_SURFACES_MAX+1]; // images for Placebo chain - struct ext_buf ebuf[CODEC_SURFACES_MAX+1]; // for managing vk buffer -#endif - - int SurfaceField; ///< current displayed field - int TrickSpeed; ///< current trick speed - int TrickCounter; ///< current trick speed counter - struct timespec FrameTime; ///< time of last display - VideoStream *Stream; ///< video stream - int Closing; ///< flag about closing current stream - int SyncOnAudio; ///< flag sync to audio - int64_t PTS; ///< video PTS clock - -#if defined(YADIF) || defined (VAAPI) - AVFilterContext *buffersink_ctx; - AVFilterContext *buffersrc_ctx; - AVFilterGraph *filter_graph; + struct pl_image pl_images[CODEC_SURFACES_MAX + 1]; // images for Placebo chain + struct ext_buf ebuf[CODEC_SURFACES_MAX + 1]; // for managing vk buffer #endif - int LastAVDiff; ///< last audio - video difference - int SyncCounter; ///< counter to sync frames - int StartCounter; ///< counter for video start - int FramesDuped; ///< number of frames duplicated - int FramesMissed; ///< number of frames missed - int FramesDropped; ///< number of frames dropped - int FrameCounter; ///< number of frames decoded - int FramesDisplayed; ///< number of frames displayed - float Frameproc; /// Time to process frame - int newchannel; + int SurfaceField; ///< current displayed field + int TrickSpeed; ///< current trick speed + int TrickCounter; ///< current trick speed counter + struct timespec FrameTime; ///< time of last display + VideoStream *Stream; ///< video stream + int Closing; ///< flag about closing current stream + int SyncOnAudio; ///< flag sync to audio + int64_t PTS; ///< video PTS clock + +#if defined(YADIF) || defined (VAAPI) + AVFilterContext *buffersink_ctx; + AVFilterContext *buffersrc_ctx; + AVFilterGraph *filter_graph; +#endif + + int LastAVDiff; ///< last audio - video difference + int SyncCounter; ///< counter to sync frames + int StartCounter; ///< counter for video start + int FramesDuped; ///< number of frames duplicated + int FramesMissed; ///< number of frames missed + int FramesDropped; ///< number of frames dropped + int FrameCounter; ///< number of frames decoded + int FramesDisplayed; ///< number of frames displayed + float Frameproc; /// Time to process frame + int newchannel; } CuvidDecoder; -static CuvidDecoder *CuvidDecoders[2]; ///< open decoder streams -static int CuvidDecoderN; ///< number of decoder streams +static CuvidDecoder *CuvidDecoders[2]; ///< open decoder streams +static int CuvidDecoderN; ///< number of decoder streams #ifdef PLACEBO -typedef struct priv { - const struct pl_gpu *gpu; - const struct pl_vulkan *vk; - const struct pl_vk_inst *vk_inst; - struct pl_context *ctx; - struct pl_renderer *renderer; - struct pl_renderer *renderertest; - const struct pl_swapchain *swapchain; - struct pl_context_params context; -// struct pl_render_target r_target; -// struct pl_render_params r_params; -// struct pl_tex final_fbo; - VkSurfaceKHR pSurface; -// VkSemaphore sig_in; - int has_dma_buf; -}priv; +typedef struct priv +{ + const struct pl_gpu *gpu; + const struct pl_vulkan *vk; + const struct pl_vk_inst *vk_inst; + struct pl_context *ctx; + struct pl_renderer *renderer; + struct pl_renderer *renderertest; + const struct pl_swapchain *swapchain; + struct pl_context_params context; +// struct pl_render_target r_target; +// struct pl_render_params r_params; +// struct pl_tex final_fbo; + VkSurfaceKHR pSurface; +// VkSemaphore sig_in; + int has_dma_buf; +} priv; static priv *p; static struct pl_overlay osdoverlay; @@ -1422,58 +1420,58 @@ static int semid; struct itimerval itimer; #endif +GLuint vao_buffer; // -GLuint vao_buffer; // -//GLuint vao_vao[4]; // -GLuint gl_shader=0,gl_prog = 0,gl_fbo=0; // shader programm -GLint gl_colormatrix,gl_colormatrix_c; -GLuint OSDfb=0; -GLuint OSDtexture,gl_prog_osd=0; +//GLuint vao_vao[4]; // +GLuint gl_shader = 0, gl_prog = 0, gl_fbo = 0; // shader programm +GLint gl_colormatrix, gl_colormatrix_c; +GLuint OSDfb = 0; +GLuint OSDtexture, gl_prog_osd = 0; -int OSDx,OSDy,OSDxsize,OSDysize; +int OSDx, OSDy, OSDxsize, OSDysize; -static struct timespec CuvidFrameTime; ///< time of last display +static struct timespec CuvidFrameTime; ///< time of last display -int window_width,window_height; +int window_width, window_height; #include "shaders.h" //---------------------------------------------------------------------------- /// -/// Output video messages. +/// Output video messages. /// -/// Reduce output. +/// Reduce output. /// -/// @param level message level (Error, Warning, Info, Debug, ...) -/// @param format printf format string (NULL to flush messages) -/// @param ... printf arguments +/// @param level message level (Error, Warning, Info, Debug, ...) +/// @param format printf format string (NULL to flush messages) +/// @param ... printf arguments /// -/// @returns true, if message shown +/// @returns true, if message shown /// int CuvidMessage(int level, const char *format, ...) { if (SysLogLevel > level || DebugLevel > level) { - static const char *last_format; - static char buf[256]; - va_list ap; + static const char *last_format; + static char buf[256]; + va_list ap; - va_start(ap, format); - if (format != last_format) { // don't repeat same message - if (buf[0]) { // print last repeated message - syslog(LOG_ERR, "%s", buf); - buf[0] = '\0'; - } + va_start(ap, format); + if (format != last_format) { // don't repeat same message + if (buf[0]) { // print last repeated message + syslog(LOG_ERR, "%s", buf); + buf[0] = '\0'; + } - if (format) { - last_format = format; - vsyslog(LOG_ERR, format, ap); - } - va_end(ap); - return 1; - } - vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); + if (format) { + last_format = format; + vsyslog(LOG_ERR, format, ap); + } + va_end(ap); + return 1; + } + vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); } return 0; } @@ -1487,32 +1485,32 @@ int CuvidMessage(int level, const char *format, ...) // These are the inline versions for all of the SDK helper functions static inline void __checkCudaErrors(CUresult err, const char *file, const int line) { - if (CUDA_SUCCESS != err) - { - CuvidMessage( 2,"checkCudaErrors() Driver API error = %04d \"%s\" from file <%s>, line %i.\n", err, getCudaDrvErrorString(err), file, line); + if (CUDA_SUCCESS != err) { + CuvidMessage(2, "checkCudaErrors() Driver API error = %04d \"%s\" from file <%s>, line %i.\n", err, + getCudaDrvErrorString(err), file, line); exit(EXIT_FAILURE); } } #endif -// Surfaces ------------------------------------------------------------- -void -createTextureDst(CuvidDecoder * decoder,int anz, unsigned int size_x, unsigned int size_y, enum AVPixelFormat PixFmt); +// Surfaces ------------------------------------------------------------- +void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsigned int size_y, + enum AVPixelFormat PixFmt); /// -/// Create surfaces for CUVID decoder. +/// Create surfaces for CUVID decoder. /// -/// @param decoder CUVID hw decoder -/// @param width surface source/video width -/// @param height surface source/video height +/// @param decoder CUVID hw decoder +/// @param width surface source/video width +/// @param height surface source/video height /// -static void CuvidCreateSurfaces(CuvidDecoder * decoder, int width, int height,enum AVPixelFormat PixFmt ) +static void CuvidCreateSurfaces(CuvidDecoder * decoder, int width, int height, enum AVPixelFormat PixFmt) { int i; - + #ifdef DEBUG if (!decoder->SurfacesNeeded) { - Error(_("video/cuvid: surface needed not set\n")); - decoder->SurfacesNeeded = VIDEO_SURFACES_MAX; + Error(_("video/cuvid: surface needed not set\n")); + decoder->SurfacesNeeded = VIDEO_SURFACES_MAX; } #endif Debug(3, "video/cuvid: %s: %dx%d * %d \n", __FUNCTION__, width, height, decoder->SurfacesNeeded); @@ -1520,100 +1518,100 @@ static void CuvidCreateSurfaces(CuvidDecoder * decoder, int width, int height,en // allocate only the number of needed surfaces decoder->SurfaceFreeN = decoder->SurfacesNeeded; - createTextureDst(decoder,decoder->SurfacesNeeded,width,height,PixFmt); - - for (i = 0; i < decoder->SurfaceFreeN; ++i) { - decoder->SurfacesFree[i] = i; - } + createTextureDst(decoder, decoder->SurfacesNeeded, width, height, PixFmt); - Debug(4, "video/cuvid: created video surface %dx%d with id %d\n",width, height, decoder->SurfacesFree[i]); + for (i = 0; i < decoder->SurfaceFreeN; ++i) { + decoder->SurfacesFree[i] = i; + } + + Debug(4, "video/cuvid: created video surface %dx%d with id %d\n", width, height, decoder->SurfacesFree[i]); } /// -/// Destroy surfaces of CUVID decoder. +/// Destroy surfaces of CUVID decoder. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// static void CuvidDestroySurfaces(CuvidDecoder * decoder) { - int i,j; + int i, j; Debug(3, "video/cuvid: %s\n", __FUNCTION__); - + #ifndef PLACEBO #ifdef CUVID - glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); - GlxCheck(); + glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); + GlxCheck(); #else - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext); - EglCheck(); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext); + EglCheck(); #endif -#endif - for (i=0;iSurfacesNeeded;i++) { - if (decoder->frames[i]) { - av_frame_free(&decoder->frames[i]); - } - for (j=0;j<2;j++) { -#ifdef PLACEBO - if (decoder->pl_images[i].planes[j].texture) { +#endif + for (i = 0; i < decoder->SurfacesNeeded; i++) { + if (decoder->frames[i]) { + av_frame_free(&decoder->frames[i]); + } + for (j = 0; j < 2; j++) { +#ifdef PLACEBO + if (decoder->pl_images[i].planes[j].texture) { -#ifdef VAAPI - if (p->has_dma_buf && decoder->pl_images[i].planes[j].texture->params.shared_mem.handle.fd) { - close(decoder->pl_images[i].planes[j].texture->params.shared_mem.handle.fd); - } +#ifdef VAAPI + if (p->has_dma_buf && decoder->pl_images[i].planes[j].texture->params.shared_mem.handle.fd) { + close(decoder->pl_images[i].planes[j].texture->params.shared_mem.handle.fd); + } #endif - pl_tex_destroy(p->gpu,&decoder->pl_images[i].planes[j].texture); - } + pl_tex_destroy(p->gpu, &decoder->pl_images[i].planes[j].texture); + } #else #ifdef CUVID - checkCudaErrors(cuGraphicsUnregisterResource(decoder->cu_res[i][j])); + checkCudaErrors(cuGraphicsUnregisterResource(decoder->cu_res[i][j])); #endif #ifdef VAAPI - if (decoder->images[i*2+j]) { - DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[i*2+j]); - if (decoder->fds[i*2+j]) - close(decoder->fds[i*2+j]); - } - decoder->fds[i*2+j] = 0; - decoder->images[i*2+j] = 0; + if (decoder->images[i * 2 + j]) { + DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[i * 2 + j]); + if (decoder->fds[i * 2 + j]) + close(decoder->fds[i * 2 + j]); + } + decoder->fds[i * 2 + j] = 0; + decoder->images[i * 2 + j] = 0; #endif #endif - } - } + } + } #ifdef PLACEBO - pl_renderer_destroy(&p->renderer); - p->renderer = pl_renderer_create(p->ctx, p->gpu); + pl_renderer_destroy(&p->renderer); + p->renderer = pl_renderer_create(p->ctx, p->gpu); #else - glDeleteTextures(CODEC_SURFACES_MAX * 2,(GLuint*)&decoder->gl_textures); - GlxCheck(); + glDeleteTextures(CODEC_SURFACES_MAX * 2, (GLuint *) & decoder->gl_textures); + GlxCheck(); + + if (CuvidDecoderN == 1) { // only wenn last decoder closes + Debug(3, "Last decoder closes\n"); + glDeleteBuffers(1, (GLuint *) & vao_buffer); + if (gl_prog) + glDeleteProgram(gl_prog); + gl_prog = 0; + } +#endif - if (CuvidDecoderN == 1) { // only wenn last decoder closes - Debug(3,"Last decoder closes\n"); - glDeleteBuffers(1,(GLuint *)&vao_buffer); - if (gl_prog) - glDeleteProgram(gl_prog); - gl_prog = 0; - } -#endif - for (i = 0; i < decoder->SurfaceFreeN; ++i) { - decoder->SurfacesFree[i] = -1; + decoder->SurfacesFree[i] = -1; } - + for (i = 0; i < decoder->SurfaceUsedN; ++i) { - decoder->SurfacesUsed[i] = -1; + decoder->SurfacesUsed[i] = -1; } - + decoder->SurfaceFreeN = 0; decoder->SurfaceUsedN = 0; } /// -/// Get a free surface. +/// Get a free surface. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// -/// @returns the oldest free surface +/// @returns the oldest free surface /// static int CuvidGetVideoSurface0(CuvidDecoder * decoder) { @@ -1621,15 +1619,15 @@ static int CuvidGetVideoSurface0(CuvidDecoder * decoder) int i; if (!decoder->SurfaceFreeN) { -// Error(_("video/cuvid: out of surfaces\n")); - return -1; +// Error(_("video/cuvid: out of surfaces\n")); + return -1; } // use oldest surface surface = decoder->SurfacesFree[0]; decoder->SurfaceFreeN--; for (i = 0; i < decoder->SurfaceFreeN; ++i) { - decoder->SurfacesFree[i] = decoder->SurfacesFree[i + 1]; + decoder->SurfacesFree[i] = decoder->SurfacesFree[i + 1]; } decoder->SurfacesFree[i] = -1; // save as used @@ -1639,88 +1637,91 @@ static int CuvidGetVideoSurface0(CuvidDecoder * decoder) } /// -/// Release a surface. +/// Release a surface. /// -/// @param decoder CUVID hw decoder -/// @param surface surface no longer used +/// @param decoder CUVID hw decoder +/// @param surface surface no longer used /// static void CuvidReleaseSurface(CuvidDecoder * decoder, int surface) { int i; - if (decoder->frames[surface]) { - av_frame_free(&decoder->frames[surface]); - } -#ifdef PLACEBO - if (p->has_dma_buf) { - if (decoder->pl_images[surface].planes[0].texture) { - if (decoder->pl_images[surface].planes[0].texture->params.shared_mem.handle.fd) { - close(decoder->pl_images[surface].planes[0].texture->params.shared_mem.handle.fd); - } - pl_tex_destroy(p->gpu,&decoder->pl_images[surface].planes[0].texture); - } - if (decoder->pl_images[surface].planes[1].texture) { - if (decoder->pl_images[surface].planes[1].texture->params.shared_mem.handle.fd) { - close(decoder->pl_images[surface].planes[1].texture->params.shared_mem.handle.fd); - } - pl_tex_destroy(p->gpu,&decoder->pl_images[surface].planes[1].texture); - } - } + + if (decoder->frames[surface]) { + av_frame_free(&decoder->frames[surface]); + } +#ifdef PLACEBO + if (p->has_dma_buf) { + if (decoder->pl_images[surface].planes[0].texture) { + if (decoder->pl_images[surface].planes[0].texture->params.shared_mem.handle.fd) { + close(decoder->pl_images[surface].planes[0].texture->params.shared_mem.handle.fd); + } + pl_tex_destroy(p->gpu, &decoder->pl_images[surface].planes[0].texture); + } + if (decoder->pl_images[surface].planes[1].texture) { + if (decoder->pl_images[surface].planes[1].texture->params.shared_mem.handle.fd) { + close(decoder->pl_images[surface].planes[1].texture->params.shared_mem.handle.fd); + } + pl_tex_destroy(p->gpu, &decoder->pl_images[surface].planes[1].texture); + } + } #else -#ifdef VAAPI - if (decoder->images[surface*2]) { - DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[surface*2]); - DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[surface*2+1]); - if (decoder->fds[surface*2]) { - close(decoder->fds[surface*2]); - close(decoder->fds[surface*2+1]); - } - } - decoder->fds[surface*2] = 0; - decoder->fds[surface*2+1] = 0; - decoder->images[surface*2] = 0; - decoder->images[surface*2+1] = 0; +#ifdef VAAPI + if (decoder->images[surface * 2]) { + DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[surface * 2]); + DestroyImageKHR(eglGetCurrentDisplay(), decoder->images[surface * 2 + 1]); + if (decoder->fds[surface * 2]) { + close(decoder->fds[surface * 2]); + close(decoder->fds[surface * 2 + 1]); + } + } + decoder->fds[surface * 2] = 0; + decoder->fds[surface * 2 + 1] = 0; + decoder->images[surface * 2] = 0; + decoder->images[surface * 2 + 1] = 0; #endif #endif for (i = 0; i < decoder->SurfaceUsedN; ++i) { - if (decoder->SurfacesUsed[i] == surface) { - // no problem, with last used - decoder->SurfacesUsed[i] = decoder->SurfacesUsed[--decoder->SurfaceUsedN]; - decoder->SurfacesFree[decoder->SurfaceFreeN++] = surface; - return; - } + if (decoder->SurfacesUsed[i] == surface) { + // no problem, with last used + decoder->SurfacesUsed[i] = decoder->SurfacesUsed[--decoder->SurfaceUsedN]; + decoder->SurfacesFree[decoder->SurfaceFreeN++] = surface; + return; + } } Fatal(_("video/cuvid: release surface %#08x, which is not in use\n"), surface); } /// -/// Debug CUVID decoder frames drop... +/// Debug CUVID decoder frames drop... /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// static void CuvidPrintFrames(const CuvidDecoder * decoder) { - Debug(3, "video/cuvid: %d missed, %d duped, %d dropped frames of %d,%d\n", - decoder->FramesMissed, decoder->FramesDuped, decoder->FramesDropped, - decoder->FrameCounter, decoder->FramesDisplayed); + Debug(3, "video/cuvid: %d missed, %d duped, %d dropped frames of %d,%d\n", decoder->FramesMissed, + decoder->FramesDuped, decoder->FramesDropped, decoder->FrameCounter, decoder->FramesDisplayed); #ifndef DEBUG (void)decoder; #endif } -int CuvidTestSurfaces() { - if (CuvidDecoders[0] != NULL) { - if (atomic_read(&CuvidDecoders[0]->SurfacesFilled) < VIDEO_SURFACES_MAX) - return 1; - return 0; - } else - return 0; +int CuvidTestSurfaces() +{ + if (CuvidDecoders[0] != NULL) { + if (atomic_read(&CuvidDecoders[0]->SurfacesFilled) < VIDEO_SURFACES_MAX) + return 1; + return 0; + } else + return 0; } #ifdef VAAPI -struct mp_egl_config_attr { +struct mp_egl_config_attr +{ int attrib; const char *name; }; + #define MPGL_VER(major, minor) (((major) * 100) + (minor) * 10) #define MPGL_VER_GET_MAJOR(ver) ((unsigned)(ver) / 100) #define MPGL_VER_GET_MINOR(ver) ((unsigned)(ver) % 100 / 10) @@ -1748,35 +1749,35 @@ const int mpgl_preferred_gl_versions[] = { 210, 0 }; -static bool create_context_cb(EGLDisplay display, - int es_version, - EGLContext *out_context, EGLConfig *out_config) + +static bool create_context_cb(EGLDisplay display, int es_version, EGLContext * out_context, EGLConfig * out_config) { - + EGLenum api; EGLint rend, *attribs; const char *name; switch (es_version) { - case 0: - api = EGL_OPENGL_API; - rend = EGL_OPENGL_BIT; - name = "Desktop OpenGL"; - break; - case 2: - api = EGL_OPENGL_ES_API; - rend = EGL_OPENGL_ES2_BIT; - name = "GLES 2.x"; - break; - case 3: - api = EGL_OPENGL_ES_API; - rend = EGL_OPENGL_ES3_BIT; - name = "GLES 3.x"; - break; - default: Fatal(_("Wrong ES version \n"));; + case 0: + api = EGL_OPENGL_API; + rend = EGL_OPENGL_BIT; + name = "Desktop OpenGL"; + break; + case 2: + api = EGL_OPENGL_ES_API; + rend = EGL_OPENGL_ES2_BIT; + name = "GLES 2.x"; + break; + case 3: + api = EGL_OPENGL_ES_API; + rend = EGL_OPENGL_ES3_BIT; + name = "GLES 3.x"; + break; + default: + Fatal(_("Wrong ES version \n"));; } - Debug(3,"Trying to create %s context.\n", name); + Debug(3, "Trying to create %s context.\n", name); if (!eglBindAPI(api)) { Fatal(_(" Could not bind API!\n")); @@ -1791,175 +1792,174 @@ static bool create_context_cb(EGLDisplay display, EGL_RENDERABLE_TYPE, rend, EGL_NONE }; - EGLint attributes10[] = { - EGL_SURFACE_TYPE, EGL_WINDOW_BIT, - EGL_RED_SIZE, 10, - EGL_GREEN_SIZE, 10, - EGL_BLUE_SIZE, 10, - EGL_ALPHA_SIZE, 2, - EGL_RENDERABLE_TYPE, rend, - EGL_NONE - }; + EGLint attributes10[] = { + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RED_SIZE, 10, + EGL_GREEN_SIZE, 10, + EGL_BLUE_SIZE, 10, + EGL_ALPHA_SIZE, 2, + EGL_RENDERABLE_TYPE, rend, + EGL_NONE + }; EGLint num_configs; - - attribs = attributes10; - - if (!eglChooseConfig(display, attributes10, NULL, 0, &num_configs)) { // try 10 Bit - Debug(3," 10 Bit egl Failed\n"); - attribs = attributes8; - if (!eglChooseConfig(display, attributes8, NULL, 0, &num_configs)) { // try 8 Bit - num_configs = 0; - } - } else if (num_configs == 0) { - EglCheck(); - Debug(3," 10 Bit egl Failed\n"); - attribs = attributes8; - if (!eglChooseConfig(display, attributes8, NULL, 0, &num_configs)) { // try 8 Bit - num_configs = 0; - } - } + + attribs = attributes10; + + if (!eglChooseConfig(display, attributes10, NULL, 0, &num_configs)) { // try 10 Bit + Debug(3, " 10 Bit egl Failed\n"); + attribs = attributes8; + if (!eglChooseConfig(display, attributes8, NULL, 0, &num_configs)) { // try 8 Bit + num_configs = 0; + } + } else if (num_configs == 0) { + EglCheck(); + Debug(3, " 10 Bit egl Failed\n"); + attribs = attributes8; + if (!eglChooseConfig(display, attributes8, NULL, 0, &num_configs)) { // try 8 Bit + num_configs = 0; + } + } EGLConfig *configs = malloc(sizeof(EGLConfig) * num_configs); + if (!eglChooseConfig(display, attribs, configs, num_configs, &num_configs)) num_configs = 0; if (!num_configs) { - free (configs); - Debug(3,"Could not choose EGLConfig for %s!\n", name); + free(configs); + Debug(3, "Could not choose EGLConfig for %s!\n", name); return false; } EGLConfig config = configs[0]; + free(configs); EGLContext *egl_ctx = NULL; if (es_version) { eglAttrs[0] = EGL_CONTEXT_CLIENT_VERSION; - eglAttrs[1] = es_version; - eglAttrs[2] = EGL_NONE; + eglAttrs[1] = es_version; + eglAttrs[2] = EGL_NONE; egl_ctx = eglCreateContext(display, config, EGL_NO_CONTEXT, eglAttrs); } else { for (int n = 0; mpgl_preferred_gl_versions[n]; n++) { int ver = mpgl_preferred_gl_versions[n]; - + eglAttrs[0] = EGL_CONTEXT_MAJOR_VERSION; - eglAttrs[1] = MPGL_VER_GET_MAJOR(ver); - eglAttrs[2] = EGL_CONTEXT_MINOR_VERSION; - eglAttrs[3] = MPGL_VER_GET_MINOR(ver); - eglAttrs[4] = EGL_CONTEXT_OPENGL_PROFILE_MASK; - eglAttrs[5] = ver >= 320 ? EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT : 0; - eglAttrs[6] = EGL_NONE; - + eglAttrs[1] = MPGL_VER_GET_MAJOR(ver); + eglAttrs[2] = EGL_CONTEXT_MINOR_VERSION; + eglAttrs[3] = MPGL_VER_GET_MINOR(ver); + eglAttrs[4] = EGL_CONTEXT_OPENGL_PROFILE_MASK; + eglAttrs[5] = ver >= 320 ? EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT : 0; + eglAttrs[6] = EGL_NONE; egl_ctx = eglCreateContext(display, config, EGL_NO_CONTEXT, eglAttrs); - EglCheck(); + EglCheck(); if (egl_ctx) { - Debug(3,"Use %d GLVersion\n",ver); + Debug(3, "Use %d GLVersion\n", ver); break; - } + } } } if (!egl_ctx) { - Debug(3,"Could not create EGL context for %s!\n", name); + Debug(3, "Could not create EGL context for %s!\n", name); return false; } *out_context = egl_ctx; *out_config = config; - eglVersion = es_version; + eglVersion = es_version; return true; } -make_egl() { +make_egl() +{ - CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR"); - DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR"); - EGLImageTargetTexture2DOES = (void *)eglGetProcAddress("glEGLImageTargetTexture2DOES"); + CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR"); + DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR"); + EGLImageTargetTexture2DOES = (void *)eglGetProcAddress("glEGLImageTargetTexture2DOES"); + + if (!CreateImageKHR || !DestroyImageKHR || !EGLImageTargetTexture2DOES) + Fatal(_("Can't get EGL Extentions\n")); + + eglDisplay = eglGetDisplay(XlibDisplay); - if (!CreateImageKHR || !DestroyImageKHR || !EGLImageTargetTexture2DOES) - Fatal(_("Can't get EGL Extentions\n")); - - eglDisplay = eglGetDisplay(XlibDisplay); - if (!eglInitialize(eglDisplay, NULL, NULL)) { Fatal(_("Could not initialize EGL.\n")); - } + } + + if (!create_context_cb(eglDisplay, 0, &eglContext, &eglConfig)) { + Fatal(_("Could not create EGL Context\n")); + } + int vID, n; - if (!create_context_cb(eglDisplay, 0, &eglContext, &eglConfig)) { - Fatal(_("Could not create EGL Context\n")); - } - int vID, n; eglGetConfigAttrib(eglDisplay, eglConfig, EGL_NATIVE_VISUAL_ID, &vID); - Debug(3,"chose visual 0x%x\n", vID); - - eglSurface = eglCreateWindowSurface(eglDisplay, eglConfig, - (EGLNativeWindowType)VideoWindow, NULL); + Debug(3, "chose visual 0x%x\n", vID); + + eglSurface = eglCreateWindowSurface(eglDisplay, eglConfig, (EGLNativeWindowType) VideoWindow, NULL); if (eglSurface == EGL_NO_SURFACE) { Fatal(_("Could not create EGL surface!\n")); } - if (!eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) - { + if (!eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) { Fatal(_("Could not make context current!\n")); } - EglEnabled = 1; -} + EglEnabled = 1; +} #endif - - /// -/// Allocate new CUVID decoder. +/// Allocate new CUVID decoder. /// -/// @param stream video stream +/// @param stream video stream /// -/// @returns a new prepared cuvid hardware decoder. +/// @returns a new prepared cuvid hardware decoder. /// static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) { - + CuvidDecoder *decoder; - int i=0; + int i = 0; -// setenv ("DISPLAY", ":0", 0); +// setenv ("DISPLAY", ":0", 0); - Debug(3,"Cuvid New HW Decoder\n"); - if ((unsigned)CuvidDecoderN >= sizeof(CuvidDecoders) / sizeof(*CuvidDecoders)) { - Error(_("video/cuvid: out of decoders\n")); - return NULL; + Debug(3, "Cuvid New HW Decoder\n"); + if ((unsigned)CuvidDecoderN >= sizeof(CuvidDecoders) / sizeof(*CuvidDecoders)) { + Error(_("video/cuvid: out of decoders\n")); + return NULL; } -#ifdef CUVID +#ifdef CUVID if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_CUDA, X11DisplayName, NULL, 0)) != 0) { - Fatal("codec: can't allocate HW video codec context err %04x",i); + Fatal("codec: can't allocate HW video codec context err %04x", i); } #endif #ifdef VAAPI - if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, "/dev/dri/renderD128" , NULL, 0)) != 0) { -// if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, ":0.0" , NULL, 0)) != 0 ) { - Fatal("codec: can't allocate HW video codec context err %04x",i); + if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, "/dev/dri/renderD128", NULL, 0)) != 0) { +// if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, ":0.0" , NULL, 0)) != 0 ) { + Fatal("codec: can't allocate HW video codec context err %04x", i); } #endif HwDeviceContext = av_buffer_ref(hw_device_ctx); - + if (!(decoder = calloc(1, sizeof(*decoder)))) { - Error(_("video/cuvid: out of memory\n")); - return NULL; + Error(_("video/cuvid: out of memory\n")); + return NULL; } -#ifdef VAAPI +#ifdef VAAPI VaDisplay = TO_VAAPI_DEVICE_CTX(HwDeviceContext)->display; decoder->VaDisplay = VaDisplay; #endif decoder->Window = VideoWindow; - //decoder->VideoX = 0; // done by calloc + //decoder->VideoX = 0; // done by calloc //decoder->VideoY = 0; decoder->VideoWidth = VideoWindowWidth; decoder->VideoHeight = VideoWindowHeight; for (i = 0; i < CODEC_SURFACES_MAX; ++i) { - decoder->SurfacesUsed[i] = -1; - decoder->SurfacesFree[i] = -1; + decoder->SurfacesUsed[i] = -1; + decoder->SurfacesFree[i] = -1; } // @@ -1968,7 +1968,7 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) atomic_set(&decoder->SurfacesFilled, 0); for (i = 0; i < VIDEO_SURFACES_MAX; ++i) { - decoder->SurfacesRb[i] = -1; + decoder->SurfacesRb[i] = -1; } decoder->OutputWidth = VideoWindowWidth; @@ -1976,8 +1976,8 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) decoder->PixFmt = AV_PIX_FMT_NONE; decoder->Stream = stream; - if (!CuvidDecoderN) { // FIXME: hack sync on audio - decoder->SyncOnAudio = 1; + if (!CuvidDecoderN) { // FIXME: hack sync on audio + decoder->SyncOnAudio = 1; } decoder->Closing = -300 - 1; decoder->PTS = AV_NOPTS_VALUE; @@ -1988,18 +1988,18 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) } /// -/// Cleanup CUVID. +/// Cleanup CUVID. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// static void CuvidCleanup(CuvidDecoder * decoder) { int i; -Debug(3,"Cuvid Clean up\n"); - + Debug(3, "Cuvid Clean up\n"); + if (decoder->SurfaceFreeN || decoder->SurfaceUsedN) { - CuvidDestroySurfaces(decoder); + CuvidDestroySurfaces(decoder); } // // reset video surface ring buffer @@ -2007,7 +2007,7 @@ Debug(3,"Cuvid Clean up\n"); atomic_set(&decoder->SurfacesFilled, 0); for (i = 0; i < VIDEO_SURFACES_MAX; ++i) { - decoder->SurfacesRb[i] = -1; + decoder->SurfacesRb[i] = -1; } decoder->SurfaceRead = 0; decoder->SurfaceWrite = 0; @@ -2023,63 +2023,65 @@ Debug(3,"Cuvid Clean up\n"); } /// -/// Destroy a CUVID decoder. +/// Destroy a CUVID decoder. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// static void CuvidDelHwDecoder(CuvidDecoder * decoder) { int i; -Debug(3,"cuvid del hw decoder \n"); - if (decoder == CuvidDecoders[0]) - VideoThreadLock(); + + Debug(3, "cuvid del hw decoder \n"); + if (decoder == CuvidDecoders[0]) + VideoThreadLock(); #ifndef PLACEBO #ifdef CUVID - glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); - GlxCheck(); + glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); + GlxCheck(); #else - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext); - EglCheck(); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext); + EglCheck(); #endif #endif if (decoder->SurfaceFreeN || decoder->SurfaceUsedN) { - CuvidDestroySurfaces(decoder); + CuvidDestroySurfaces(decoder); } - if (decoder == CuvidDecoders[0]) - VideoThreadUnlock(); + if (decoder == CuvidDecoders[0]) + VideoThreadUnlock(); -// glXMakeCurrent(XlibDisplay, None, NULL); +// glXMakeCurrent(XlibDisplay, None, NULL); for (i = 0; i < CuvidDecoderN; ++i) { - if (CuvidDecoders[i] == decoder) { - CuvidDecoders[i] = NULL; - // copy last slot into empty slot - if (i < --CuvidDecoderN) { - CuvidDecoders[i] = CuvidDecoders[CuvidDecoderN]; - } -// CuvidCleanup(decoder); - CuvidPrintFrames(decoder); + if (CuvidDecoders[i] == decoder) { + CuvidDecoders[i] = NULL; + // copy last slot into empty slot + if (i < --CuvidDecoderN) { + CuvidDecoders[i] = CuvidDecoders[CuvidDecoderN]; + } +// CuvidCleanup(decoder); + CuvidPrintFrames(decoder); #ifdef CUVID - if (decoder->cuda_ctx && CuvidDecoderN == 1) { - cuCtxDestroy (decoder->cuda_ctx); - } + if (decoder->cuda_ctx && CuvidDecoderN == 1) { + cuCtxDestroy(decoder->cuda_ctx); + } #endif - free(decoder); - return; - } + free(decoder); + return; + } } Error(_("video/cuvid: decoder not in decoder list.\n")); } -static int CuvidGlxInit(__attribute__((unused))const char *display_name) +static int CuvidGlxInit( __attribute__((unused)) + const char *display_name) { #ifndef PLACEBO - + EglInit(); if (EglEnabled) { #ifdef CUVID GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, glxContext); #else - GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, eglContext); + GlxSetupWindow(VideoWindow, VideoWindowWidth, VideoWindowHeight, eglContext); #endif } @@ -2087,420 +2089,425 @@ static int CuvidGlxInit(__attribute__((unused))const char *display_name) Fatal(_("video/egl: egl init error\n")); } #else - EglEnabled = 0; + EglEnabled = 0; #endif return 1; } + /// -/// CUVID cleanup. +/// CUVID cleanup. /// static void CuvidExit(void) { int i; for (i = 0; i < CuvidDecoderN; ++i) { - if (CuvidDecoders[i]) { - CuvidDelHwDecoder(CuvidDecoders[i]); - CuvidDecoders[i] = NULL; - } + if (CuvidDecoders[i]) { + CuvidDelHwDecoder(CuvidDecoders[i]); + CuvidDecoders[i] = NULL; + } } CuvidDecoderN = 0; - Debug(3,"CuvidExit\n"); - + Debug(3, "CuvidExit\n"); + } /// -/// Update output for new size or aspect ratio. +/// Update output for new size or aspect ratio. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// static void CuvidUpdateOutput(CuvidDecoder * decoder) { - VideoUpdateOutput(decoder->InputAspect, decoder->InputWidth, - decoder->InputHeight, decoder->Resolution, decoder->VideoX, - decoder->VideoY, decoder->VideoWidth, decoder->VideoHeight, - &decoder->OutputX, &decoder->OutputY, &decoder->OutputWidth, - &decoder->OutputHeight, &decoder->CropX, &decoder->CropY, - &decoder->CropWidth, &decoder->CropHeight); + VideoUpdateOutput(decoder->InputAspect, decoder->InputWidth, decoder->InputHeight, decoder->Resolution, + decoder->VideoX, decoder->VideoY, decoder->VideoWidth, decoder->VideoHeight, &decoder->OutputX, + &decoder->OutputY, &decoder->OutputWidth, &decoder->OutputHeight, &decoder->CropX, &decoder->CropY, + &decoder->CropWidth, &decoder->CropHeight); } -void SDK_CHECK_ERROR_GL() { +void SDK_CHECK_ERROR_GL() +{ GLenum gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { - Fatal(_("video/cuvid: SDL error %d\n"),gl_error); + Fatal(_("video/cuvid: SDL error %d\n"), gl_error); } } - + #ifdef CUVID // copy image and process using CUDA -void generateCUDAImage(CuvidDecoder * decoder,int index, const AVFrame *frame,int image_width , int image_height, int bytes) +void generateCUDAImage(CuvidDecoder * decoder, int index, const AVFrame * frame, int image_width, int image_height, + int bytes) { int n; - for (n = 0; n < 2; n++) { // + + for (n = 0; n < 2; n++) { // // widthInBytes must account for the chroma plane // elements being two samples wide. CUDA_MEMCPY2D cpy = { .srcMemoryType = CU_MEMORYTYPE_DEVICE, - .dstMemoryType = CU_MEMORYTYPE_ARRAY, - .srcDevice = (CUdeviceptr)frame->data[n], - .srcPitch = frame->linesize[n], - .srcY = 0, - .dstArray = decoder->cu_array[index][n], - .WidthInBytes = image_width * bytes, - .Height = n==0?image_height:image_height/2 , + .dstMemoryType = CU_MEMORYTYPE_ARRAY, + .srcDevice = (CUdeviceptr) frame->data[n], + .srcPitch = frame->linesize[n], + .srcY = 0, + .dstArray = decoder->cu_array[index][n], + .WidthInBytes = image_width * bytes, + .Height = n == 0 ? image_height : image_height / 2, }; - checkCudaErrors(cuMemcpy2D(&cpy)); + checkCudaErrors(cuMemcpy2D(&cpy)); } } #endif #ifdef PLACEBO -void -createTextureDst(CuvidDecoder * decoder,int anz, unsigned int size_x, unsigned int size_y, enum AVPixelFormat PixFmt) -{ - int n,i,size=1,fd; - const struct pl_fmt *fmt; - struct pl_tex *tex; - struct pl_image *img; - struct pl_plane *pl; - -//printf("Create textures and planes %d %d\n",size_x,size_y); - Debug(3,"video/vulkan: create %d Textures Format %s w %d h %d \n",anz,PixFmt==AV_PIX_FMT_NV12?"NV12":"P010",size_x,size_y); - - for (i=0;iframes[i]) { - av_frame_free(&decoder->frames[i]); - decoder->frames[i] = NULL; - } - for (n=0;n<2;n++ ) { // number of planes - bool ok = true; - if (PixFmt == AV_PIX_FMT_NV12) { - fmt = pl_find_named_fmt(p->gpu, n==0?"r8":"rg8"); // 8 Bit YUV - size = 1; - } else { - fmt = pl_find_named_fmt(p->gpu, n==0?"r16":"rg16"); // 10 Bit YUV - size = 2; - } - if (decoder->pl_images[i].planes[n].texture) { +void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsigned int size_y, + enum AVPixelFormat PixFmt) +{ + int n, i, size = 1, fd; + const struct pl_fmt *fmt; + struct pl_tex *tex; + struct pl_image *img; + struct pl_plane *pl; + +//printf("Create textures and planes %d %d\n",size_x,size_y); + Debug(3, "video/vulkan: create %d Textures Format %s w %d h %d \n", anz, + PixFmt == AV_PIX_FMT_NV12 ? "NV12" : "P010", size_x, size_y); + + for (i = 0; i < anz; i++) { // number of texture + if (decoder->frames[i]) { + av_frame_free(&decoder->frames[i]); + decoder->frames[i] = NULL; + } + for (n = 0; n < 2; n++) { // number of planes + bool ok = true; + + if (PixFmt == AV_PIX_FMT_NV12) { + fmt = pl_find_named_fmt(p->gpu, n == 0 ? "r8" : "rg8"); // 8 Bit YUV + size = 1; + } else { + fmt = pl_find_named_fmt(p->gpu, n == 0 ? "r16" : "rg16"); // 10 Bit YUV + size = 2; + } + if (decoder->pl_images[i].planes[n].texture) { //#ifdef VAAPI - if (decoder->pl_images[i].planes[n].texture->params.shared_mem.handle.fd) { - close(decoder->pl_images[i].planes[n].texture->params.shared_mem.handle.fd); - } + if (decoder->pl_images[i].planes[n].texture->params.shared_mem.handle.fd) { + close(decoder->pl_images[i].planes[n].texture->params.shared_mem.handle.fd); + } //#endif - pl_tex_destroy(p->gpu,&decoder->pl_images[i].planes[n].texture); // delete old texture - } - - if (p->has_dma_buf == 0) { - decoder->pl_images[i].planes[n].texture = pl_tex_create(p->gpu, &(struct pl_tex_params) { - .w = n==0?size_x:size_x/2, - .h = n==0?size_y:size_y/2, - .d = 0, - .format = fmt, - .sampleable = true, - .host_writable = true, - .sample_mode = PL_TEX_SAMPLE_LINEAR, - .address_mode = PL_TEX_ADDRESS_CLAMP, - .export_handle = PL_HANDLE_FD, - }); - } - // make planes for image - pl = &decoder->pl_images[i].planes[n]; - pl->components = n==0?1:2; - pl->shift_x = 0.0f; - pl->shift_y = 0.0f; - if (n==0) { - pl->component_mapping[0] = PL_CHANNEL_Y; - pl->component_mapping[1] = -1; - pl->component_mapping[2] = -1; - pl->component_mapping[3] = -1; - } else { - pl->component_mapping[0] = PL_CHANNEL_U; - pl->component_mapping[1] = PL_CHANNEL_V; - pl->component_mapping[2] = -1; - pl->component_mapping[3] = -1; - } - if (!ok) { - Fatal(_("Unable to create placebo textures")); - } + pl_tex_destroy(p->gpu, &decoder->pl_images[i].planes[n].texture); // delete old texture + } + + if (p->has_dma_buf == 0) { + decoder->pl_images[i].planes[n].texture = pl_tex_create(p->gpu, &(struct pl_tex_params) { + .w = n == 0 ? size_x : size_x / 2, + .h = n == 0 ? size_y : size_y / 2, + .d = 0, + .format = fmt, + .sampleable = true, + .host_writable = true, + .sample_mode = PL_TEX_SAMPLE_LINEAR, + .address_mode = PL_TEX_ADDRESS_CLAMP, + .export_handle = PL_HANDLE_FD, + }); + } + // make planes for image + pl = &decoder->pl_images[i].planes[n]; + pl->components = n == 0 ? 1 : 2; + pl->shift_x = 0.0f; + pl->shift_y = 0.0f; + if (n == 0) { + pl->component_mapping[0] = PL_CHANNEL_Y; + pl->component_mapping[1] = -1; + pl->component_mapping[2] = -1; + pl->component_mapping[3] = -1; + } else { + pl->component_mapping[0] = PL_CHANNEL_U; + pl->component_mapping[1] = PL_CHANNEL_V; + pl->component_mapping[2] = -1; + pl->component_mapping[3] = -1; + } + if (!ok) { + Fatal(_("Unable to create placebo textures")); + } #ifdef CUVID - fd = dup(decoder->pl_images[i].planes[n].texture->shared_mem.handle.fd); - CUDA_EXTERNAL_MEMORY_HANDLE_DESC ext_desc = { - .type = CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, - .handle.fd = fd, - .size = decoder->pl_images[i].planes[n].texture->shared_mem.size, // image_width * image_height * bytes, - .flags = 0, - }; - checkCudaErrors(cuImportExternalMemory(&decoder->ebuf[i*2+n].mem, &ext_desc)); // Import Memory segment - CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC tex_desc = { - .offset = decoder->pl_images[i].planes[n].texture->shared_mem.offset, - .arrayDesc = { - .Width = n==0?size_x:size_x/2, - .Height = n==0?size_y:size_y/2, - .Depth = 0, - .Format = PixFmt == AV_PIX_FMT_NV12 ? CU_AD_FORMAT_UNSIGNED_INT8:CU_AD_FORMAT_UNSIGNED_INT16, - .NumChannels = n==0?1:2, - .Flags = 0, - }, - .numLevels = 1, - }; - checkCudaErrors(cuExternalMemoryGetMappedMipmappedArray(&decoder->ebuf[i*2+n].mma,decoder->ebuf[i*2+n].mem,&tex_desc)); - checkCudaErrors(cuMipmappedArrayGetLevel(&decoder->cu_array[i][n],decoder->ebuf[i*2+n].mma,0)); + fd = dup(decoder->pl_images[i].planes[n].texture->shared_mem.handle.fd); + CUDA_EXTERNAL_MEMORY_HANDLE_DESC ext_desc = { + .type = CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, + .handle.fd = fd, + .size = decoder->pl_images[i].planes[n].texture->shared_mem.size, // image_width * image_height * bytes, + .flags = 0, + }; + checkCudaErrors(cuImportExternalMemory(&decoder->ebuf[i * 2 + n].mem, &ext_desc)); // Import Memory segment + CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC tex_desc = { + .offset = decoder->pl_images[i].planes[n].texture->shared_mem.offset, + .arrayDesc = { + .Width = n == 0 ? size_x : size_x / 2, + .Height = n == 0 ? size_y : size_y / 2, + .Depth = 0, + .Format = PixFmt == AV_PIX_FMT_NV12 ? CU_AD_FORMAT_UNSIGNED_INT8 : CU_AD_FORMAT_UNSIGNED_INT16, + .NumChannels = n == 0 ? 1 : 2, + .Flags = 0, + }, + .numLevels = 1, + }; + checkCudaErrors(cuExternalMemoryGetMappedMipmappedArray(&decoder->ebuf[i * 2 + n].mma, + decoder->ebuf[i * 2 + n].mem, &tex_desc)); + checkCudaErrors(cuMipmappedArrayGetLevel(&decoder->cu_array[i][n], decoder->ebuf[i * 2 + n].mma, 0)); #endif - } - // make image - img = &decoder->pl_images[i]; - img->signature = i; - img->num_planes = 2; - img->repr.sys = PL_COLOR_SYSTEM_BT_709; // overwritten later - img->repr.levels = PL_COLOR_LEVELS_TV; - img->repr.alpha = PL_ALPHA_UNKNOWN; - img->color.primaries = pl_color_primaries_guess(size_x,size_y); // Gammut overwritten later - img->color.transfer = PL_COLOR_TRC_BT_1886; // overwritten later - img->color.light = PL_COLOR_LIGHT_SCENE_709_1886; // needs config ??? - img->color.sig_peak = 0.0f; // needs config ???? - img->color.sig_avg = 0.0f; - img->width = size_x; - img->height = size_y; - img->num_overlays = 0; - } + } + // make image + img = &decoder->pl_images[i]; + img->signature = i; + img->num_planes = 2; + img->repr.sys = PL_COLOR_SYSTEM_BT_709; // overwritten later + img->repr.levels = PL_COLOR_LEVELS_TV; + img->repr.alpha = PL_ALPHA_UNKNOWN; + img->color.primaries = pl_color_primaries_guess(size_x, size_y); // Gammut overwritten later + img->color.transfer = PL_COLOR_TRC_BT_1886; // overwritten later + img->color.light = PL_COLOR_LIGHT_SCENE_709_1886; // needs config ??? + img->color.sig_peak = 0.0f; // needs config ???? + img->color.sig_avg = 0.0f; + img->width = size_x; + img->height = size_y; + img->num_overlays = 0; + } } + #ifdef VAAPI // copy image and process using CUDA -void generateVAAPIImage(CuvidDecoder * decoder,int index, const AVFrame *frame,int image_width , int image_height) +void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame, int image_width, int image_height) { int n; - VAStatus status; - int toggle = 0; - uint64_t first_time; - VADRMPRIMESurfaceDescriptor desc; - - status = vaExportSurfaceHandle(decoder->VaDisplay, (unsigned int)frame->data[3], - VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, - VA_EXPORT_SURFACE_READ_ONLY | - VA_EXPORT_SURFACE_SEPARATE_LAYERS, - &desc); - - if (status != VA_STATUS_SUCCESS) { - printf("Fehler beim export VAAPI Handle\n"); - return; - } - vaSyncSurface(decoder->VaDisplay,(unsigned int)frame->data[3]); - VideoThreadLock(); - for (n = 0; n < 2; n++) { // Set DMA_BUF from VAAPI decoder to Textures - int id = desc.layers[n].object_index[0]; - int fd = desc.objects[id].fd; - uint32_t size = desc.objects[id].size; - uint32_t offset = desc.layers[n].offset[0]; - const struct pl_fmt *fmt; - - if (fd == -1) { - printf("Fehler beim Import von Surface %d\n",index); - return; - } - - if (decoder->PixFmt == AV_PIX_FMT_NV12) { - fmt = pl_find_named_fmt(p->gpu, n==0?"r8":"rg8"); // 8 Bit YUV - } else { - fmt = pl_find_named_fmt(p->gpu, n==0?"r16":"rg16"); // 10 Bit YUV - } + VAStatus status; + int toggle = 0; + uint64_t first_time; + VADRMPRIMESurfaceDescriptor desc; - struct pl_tex_params tex_params = { - .w = n==0?image_width:image_width/2, - .h = n==0?image_height:image_height/2, - .d = 0, - .format = fmt, - .sampleable = true, - .host_writable = false, - .address_mode = PL_TEX_ADDRESS_CLAMP, - .sample_mode = PL_TEX_SAMPLE_LINEAR, - .import_handle = PL_HANDLE_DMA_BUF, - .shared_mem = (struct pl_shared_mem) { + status = + vaExportSurfaceHandle(decoder->VaDisplay, (unsigned int)frame->data[3], VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, + VA_EXPORT_SURFACE_READ_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &desc); + + if (status != VA_STATUS_SUCCESS) { + printf("Fehler beim export VAAPI Handle\n"); + return; + } + vaSyncSurface(decoder->VaDisplay, (unsigned int)frame->data[3]); + VideoThreadLock(); + for (n = 0; n < 2; n++) { // Set DMA_BUF from VAAPI decoder to Textures + int id = desc.layers[n].object_index[0]; + int fd = desc.objects[id].fd; + uint32_t size = desc.objects[id].size; + uint32_t offset = desc.layers[n].offset[0]; + const struct pl_fmt *fmt; + + if (fd == -1) { + printf("Fehler beim Import von Surface %d\n", index); + return; + } + + if (decoder->PixFmt == AV_PIX_FMT_NV12) { + fmt = pl_find_named_fmt(p->gpu, n == 0 ? "r8" : "rg8"); // 8 Bit YUV + } else { + fmt = pl_find_named_fmt(p->gpu, n == 0 ? "r16" : "rg16"); // 10 Bit YUV + } + + struct pl_tex_params tex_params = { + .w = n == 0 ? image_width : image_width / 2, + .h = n == 0 ? image_height : image_height / 2, + .d = 0, + .format = fmt, + .sampleable = true, + .host_writable = false, + .address_mode = PL_TEX_ADDRESS_CLAMP, + .sample_mode = PL_TEX_SAMPLE_LINEAR, + .import_handle = PL_HANDLE_DMA_BUF, + .shared_mem = (struct pl_shared_mem) { .handle = { - .fd = fd, - }, + .fd = fd, + }, .size = size, .offset = offset, }, - }; + }; //printf("vor create Object %d with fd %d import size %u offset %d %dx%d\n",id,fd,size,offset, tex_params.w,tex_params.h); - if (decoder->pl_images[index].planes[n].texture) { - pl_tex_destroy(p->gpu,&decoder->pl_images[index].planes[n].texture); + if (decoder->pl_images[index].planes[n].texture) { + pl_tex_destroy(p->gpu, &decoder->pl_images[index].planes[n].texture); - } - decoder->pl_images[index].planes[n].texture = pl_tex_create(p->gpu, &tex_params); + } + decoder->pl_images[index].planes[n].texture = pl_tex_create(p->gpu, &tex_params); } - VideoThreadUnlock(); + VideoThreadUnlock(); } #endif #else -void -createTextureDst(CuvidDecoder * decoder,int anz, unsigned int size_x, unsigned int size_y, enum AVPixelFormat PixFmt) +void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsigned int size_y, + enum AVPixelFormat PixFmt) { - - int n,i; - - Debug(3,"video: create %d Textures Format %s w %d h %d \n",anz,PixFmt==AV_PIX_FMT_NV12?"NV12":"P010",size_x,size_y); - -#ifdef CUVID - glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); - GlxCheck(); -#else - OSD_get_shared_context(); -#endif - - glGenBuffers(1,&vao_buffer); - GlxCheck(); - // create texture planes - glGenTextures(CODEC_SURFACES_MAX*2, decoder->gl_textures); - GlxCheck(); - - for (i=0;igl_textures[i*2+n]); - GlxCheck(); - // set basic parameters - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - if (PixFmt == AV_PIX_FMT_NV12) - glTexImage2D(GL_TEXTURE_2D, 0,n==0?GL_R8 :GL_RG8 ,n==0?size_x:size_x/2, n==0?size_y:size_y/2, 0, n==0?GL_RED:GL_RG , GL_UNSIGNED_BYTE , NULL); - else - glTexImage2D(GL_TEXTURE_2D, 0,n==0?GL_R16:GL_RG16 ,n==0?size_x:size_x/2, n==0?size_y:size_y/2, 0, n==0?GL_RED:GL_RG , GL_UNSIGNED_SHORT, NULL); - SDK_CHECK_ERROR_GL(); - // register this texture with CUDA + int n, i; + + Debug(3, "video: create %d Textures Format %s w %d h %d \n", anz, PixFmt == AV_PIX_FMT_NV12 ? "NV12" : "P010", + size_x, size_y); + #ifdef CUVID - checkCudaErrors(cuGraphicsGLRegisterImage(&decoder->cu_res[i][n], decoder->gl_textures[i*2+n],GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD)); - checkCudaErrors(cuGraphicsMapResources(1, &decoder->cu_res[i][n], 0)); - checkCudaErrors(cuGraphicsSubResourceGetMappedArray(&decoder->cu_array[i][n], decoder->cu_res[i][n],0, 0)); - checkCudaErrors(cuGraphicsUnmapResources(1, &decoder->cu_res[i][n], 0)); + glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); + GlxCheck(); +#else + OSD_get_shared_context(); #endif - } - } - glBindTexture(GL_TEXTURE_2D, 0); - GlxCheck(); + + glGenBuffers(1, &vao_buffer); + GlxCheck(); + // create texture planes + glGenTextures(CODEC_SURFACES_MAX * 2, decoder->gl_textures); + GlxCheck(); + + for (i = 0; i < anz; i++) { + for (n = 0; n < 2; n++) { // number of planes + + glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[i * 2 + n]); + GlxCheck(); + // set basic parameters + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + if (PixFmt == AV_PIX_FMT_NV12) + glTexImage2D(GL_TEXTURE_2D, 0, n == 0 ? GL_R8 : GL_RG8, n == 0 ? size_x : size_x / 2, + n == 0 ? size_y : size_y / 2, 0, n == 0 ? GL_RED : GL_RG, GL_UNSIGNED_BYTE, NULL); + else + glTexImage2D(GL_TEXTURE_2D, 0, n == 0 ? GL_R16 : GL_RG16, n == 0 ? size_x : size_x / 2, + n == 0 ? size_y : size_y / 2, 0, n == 0 ? GL_RED : GL_RG, GL_UNSIGNED_SHORT, NULL); + SDK_CHECK_ERROR_GL(); + // register this texture with CUDA +#ifdef CUVID + checkCudaErrors(cuGraphicsGLRegisterImage(&decoder->cu_res[i][n], decoder->gl_textures[i * 2 + n], + GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD)); + checkCudaErrors(cuGraphicsMapResources(1, &decoder->cu_res[i][n], 0)); + checkCudaErrors(cuGraphicsSubResourceGetMappedArray(&decoder->cu_array[i][n], decoder->cu_res[i][n], 0, + 0)); + checkCudaErrors(cuGraphicsUnmapResources(1, &decoder->cu_res[i][n], 0)); +#endif + } + } + glBindTexture(GL_TEXTURE_2D, 0); + GlxCheck(); #ifdef VAAPI - OSD_release_context(); + OSD_release_context(); #endif } #ifdef VAAPI #define MP_ARRAY_SIZE(s) (sizeof(s) / sizeof((s)[0])) -#define ADD_ATTRIB(name, value) \ - do { \ - assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \ - attribs[num_attribs++] = (name); \ - attribs[num_attribs++] = (value); \ - attribs[num_attribs] = EGL_NONE; \ +#define ADD_ATTRIB(name, value) \ + do { \ + assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \ + attribs[num_attribs++] = (name); \ + attribs[num_attribs++] = (value); \ + attribs[num_attribs] = EGL_NONE; \ } while(0) #define ADD_PLANE_ATTRIBS(plane) do { \ ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _FD_EXT, \ - desc.objects[desc.layers[n].object_index[plane]].fd); \ + desc.objects[desc.layers[n].object_index[plane]].fd); \ ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _OFFSET_EXT, \ - desc.layers[n].offset[plane]); \ + desc.layers[n].offset[plane]); \ ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _PITCH_EXT, \ - desc.layers[n].pitch[plane]); \ + desc.layers[n].pitch[plane]); \ } while (0) -void generateVAAPIImage(CuvidDecoder * decoder,int index, const AVFrame *frame,int image_width , int image_height) +void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame, int image_width, int image_height) { - int n,i; - VAStatus status; + int n, i; + VAStatus status; - uint64_t first_time; - VADRMPRIMESurfaceDescriptor desc; - - status = vaExportSurfaceHandle(decoder->VaDisplay, (unsigned int)frame->data[3], - VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, - VA_EXPORT_SURFACE_READ_ONLY | - VA_EXPORT_SURFACE_SEPARATE_LAYERS, - &desc); - - if (status != VA_STATUS_SUCCESS) { - printf("Fehler beim export VAAPI Handle\n"); - return; - } - vaSyncSurface(decoder->VaDisplay,(unsigned int)frame->data[3]); - - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); - EglCheck(); - - for (int n = 0; n < 2; n++) { - int attribs[20] = {EGL_NONE}; - int num_attribs = 0; + uint64_t first_time; + VADRMPRIMESurfaceDescriptor desc; - ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, desc.layers[n].drm_format); - ADD_ATTRIB(EGL_WIDTH, n==0?image_width:image_width/2); - ADD_ATTRIB(EGL_HEIGHT, n==0?image_height:image_height/2); - ADD_PLANE_ATTRIBS(0); + status = + vaExportSurfaceHandle(decoder->VaDisplay, (unsigned int)frame->data[3], VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, + VA_EXPORT_SURFACE_READ_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &desc); - decoder->images[index*2+n] = CreateImageKHR(eglGetCurrentDisplay(),EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs); + if (status != VA_STATUS_SUCCESS) { + printf("Fehler beim export VAAPI Handle\n"); + return; + } + vaSyncSurface(decoder->VaDisplay, (unsigned int)frame->data[3]); - if (!decoder->images[index*2+n]) - goto esh_failed; + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); + EglCheck(); - glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[index*2+n]); - EGLImageTargetTexture2DOES(GL_TEXTURE_2D, decoder->images[index*2+n]); - decoder->fds[index*2+n] = desc.objects[desc.layers[n].object_index[0]].fd; - } - glBindTexture(GL_TEXTURE_2D, 0); - eglMakeCurrent(eglDisplay,EGL_NO_SURFACE,EGL_NO_SURFACE,EGL_NO_CONTEXT); - EglCheck(); + for (int n = 0; n < 2; n++) { + int attribs[20] = { EGL_NONE }; + int num_attribs = 0; + + ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, desc.layers[n].drm_format); + ADD_ATTRIB(EGL_WIDTH, n == 0 ? image_width : image_width / 2); + ADD_ATTRIB(EGL_HEIGHT, n == 0 ? image_height : image_height / 2); + ADD_PLANE_ATTRIBS(0); + + decoder->images[index * 2 + n] = + CreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs); + + if (!decoder->images[index * 2 + n]) + goto esh_failed; + + glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[index * 2 + n]); + EGLImageTargetTexture2DOES(GL_TEXTURE_2D, decoder->images[index * 2 + n]); + decoder->fds[index * 2 + n] = desc.objects[desc.layers[n].object_index[0]].fd; + } + glBindTexture(GL_TEXTURE_2D, 0); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); + EglCheck(); return 0; - -esh_failed: - Debug(3,"Failure in generateVAAPIImage\n"); - for (int n = 0; n < desc.num_objects; n++) - close(desc.objects[n].fd); - eglMakeCurrent(eglDisplay,EGL_NO_SURFACE,EGL_NO_SURFACE,EGL_NO_CONTEXT); - EglCheck(); + + esh_failed: + Debug(3, "Failure in generateVAAPIImage\n"); + for (int n = 0; n < desc.num_objects; n++) + close(desc.objects[n].fd); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); + EglCheck(); } #endif #endif - - /// -/// Configure CUVID for new video format. +/// Configure CUVID for new video format. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// static void CuvidSetupOutput(CuvidDecoder * decoder) { // FIXME: need only to create and destroy surfaces for size changes - // or when number of needed surfaces changed! + // or when number of needed surfaces changed! decoder->Resolution = VideoResolutionGroup(decoder->InputWidth, decoder->InputHeight, decoder->Interlaced); - CuvidCreateSurfaces(decoder, decoder->InputWidth, decoder->InputHeight,decoder->PixFmt); - - CuvidUpdateOutput(decoder); // update aspect/scaling + CuvidCreateSurfaces(decoder, decoder->InputWidth, decoder->InputHeight, decoder->PixFmt); + + CuvidUpdateOutput(decoder); // update aspect/scaling window_width = decoder->OutputWidth; window_height = decoder->OutputHeight; } /// -/// Get a free surface. Called from ffmpeg. +/// Get a free surface. Called from ffmpeg. /// -/// @param decoder CUVID hw decoder -/// @param video_ctx ffmpeg video codec context +/// @param decoder CUVID hw decoder +/// @param video_ctx ffmpeg video codec context /// -/// @returns the oldest free surface +/// @returns the oldest free surface /// -static unsigned CuvidGetVideoSurface(CuvidDecoder * decoder, - const AVCodecContext * video_ctx) +static unsigned CuvidGetVideoSurface(CuvidDecoder * decoder, const AVCodecContext * video_ctx) { (void)video_ctx; @@ -2509,55 +2516,56 @@ static unsigned CuvidGetVideoSurface(CuvidDecoder * decoder, } #if defined (VAAPI) || defined (YADIF) -static void CuvidSyncRenderFrame(CuvidDecoder * decoder, - const AVCodecContext * video_ctx, const AVFrame * frame); +static void CuvidSyncRenderFrame(CuvidDecoder * decoder, const AVCodecContext * video_ctx, const AVFrame * frame); -int push_filters(AVCodecContext * dec_ctx,CuvidDecoder * decoder,AVFrame *frame) { - int ret,i=0; - AVFrame *filt_frame = av_frame_alloc(); - - /* push the decoded frame into the filtergraph */ - if (av_buffersrc_add_frame_flags(decoder->buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { - av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); - } +int push_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * frame) +{ + int ret, i = 0; + AVFrame *filt_frame = av_frame_alloc(); -//printf("Interlaced %d tff %d\n",frame->interlaced_frame,frame->top_field_first); - /* pull filtered frames from the filtergraph */ - while ((ret = av_buffersink_get_frame(decoder->buffersink_ctx, filt_frame)) >= 0) { - filt_frame->pts /= 2; - decoder->Interlaced = 0; -// printf("vaapideint video:new %#012" PRIx64 " old %#012" PRIx64 "\n",filt_frame->pts,frame->pts); - CuvidSyncRenderFrame(decoder, dec_ctx, filt_frame); - filt_frame = av_frame_alloc(); // get new frame + /* push the decoded frame into the filtergraph */ + if (av_buffersrc_add_frame_flags(decoder->buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { + av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); + } - } - av_frame_free(&filt_frame); - av_frame_free(&frame); - return ret; + //printf("Interlaced %d tff %d\n",frame->interlaced_frame,frame->top_field_first); + /* pull filtered frames from the filtergraph */ + while ((ret = av_buffersink_get_frame(decoder->buffersink_ctx, filt_frame)) >= 0) { + filt_frame->pts /= 2; + decoder->Interlaced = 0; +// printf("vaapideint video:new %#012" PRIx64 " old %#012" PRIx64 "\n",filt_frame->pts,frame->pts); + CuvidSyncRenderFrame(decoder, dec_ctx, filt_frame); + filt_frame = av_frame_alloc(); // get new frame + + } + av_frame_free(&filt_frame); + av_frame_free(&frame); + return ret; } -int init_filters(AVCodecContext * dec_ctx,CuvidDecoder * decoder,AVFrame *frame) -{ - enum AVPixelFormat format = PIXEL_FORMAT; +int init_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * frame) +{ + enum AVPixelFormat format = PIXEL_FORMAT; + #ifdef VAAPI - const char *filters_descr = "deinterlace_vaapi=rate=field:auto=1"; // + const char *filters_descr = "deinterlace_vaapi=rate=field:auto=1"; // #endif #ifdef YADIF - const char *filters_descr = "yadif_cuda=1:0:1"; // mode=send_field,parity=tff,deint=interlaced"; - enum AVPixelFormat pix_fmts[] = { format, AV_PIX_FMT_NONE }; + const char *filters_descr = "yadif_cuda=1:0:1"; // mode=send_field,parity=tff,deint=interlaced"; + enum AVPixelFormat pix_fmts[] = { format, AV_PIX_FMT_NONE }; #endif - + char args[512]; int ret = 0; - const AVFilter *buffersrc = avfilter_get_by_name("buffer"); + const AVFilter *buffersrc = avfilter_get_by_name("buffer"); const AVFilter *buffersink = avfilter_get_by_name("buffersink"); AVFilterInOut *outputs = avfilter_inout_alloc(); - AVFilterInOut *inputs = avfilter_inout_alloc(); - AVBufferSrcParameters *src_params; + AVFilterInOut *inputs = avfilter_inout_alloc(); + AVBufferSrcParameters *src_params; + + if (decoder->filter_graph) + avfilter_graph_free(&decoder->filter_graph); - if (decoder->filter_graph) - avfilter_graph_free(&decoder->filter_graph); - decoder->filter_graph = avfilter_graph_alloc(); if (!outputs || !inputs || !decoder->filter_graph) { ret = AVERROR(ENOMEM); @@ -2565,45 +2573,39 @@ int init_filters(AVCodecContext * dec_ctx,CuvidDecoder * decoder,AVFrame *frame) } /* buffer video source: the decoded frames from the decoder will be inserted here. */ - snprintf(args, sizeof(args), - "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", - dec_ctx->width, dec_ctx->height, format, - 1, 90000, - dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); + snprintf(args, sizeof(args), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", dec_ctx->width, + dec_ctx->height, format, 1, 90000, dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); - ret = avfilter_graph_create_filter(&decoder->buffersrc_ctx, buffersrc, "in", - args, NULL, decoder->filter_graph); + ret = avfilter_graph_create_filter(&decoder->buffersrc_ctx, buffersrc, "in", args, NULL, decoder->filter_graph); if (ret < 0) { Debug(3, "Cannot create buffer source\n"); goto end; } - src_params = av_buffersrc_parameters_alloc(); - src_params->hw_frames_ctx = frame->hw_frames_ctx; - src_params->format = format; - src_params->time_base.num = 1; - src_params->time_base.den = 90000; - src_params->width = dec_ctx->width; - src_params->height = dec_ctx->height; - src_params->frame_rate.num = 50; - src_params->frame_rate.den = 1; - src_params->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; - + src_params = av_buffersrc_parameters_alloc(); + src_params->hw_frames_ctx = frame->hw_frames_ctx; + src_params->format = format; + src_params->time_base.num = 1; + src_params->time_base.den = 90000; + src_params->width = dec_ctx->width; + src_params->height = dec_ctx->height; + src_params->frame_rate.num = 50; + src_params->frame_rate.den = 1; + src_params->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; + //printf("width %d height %d hw_frames_ctx %p\n",dec_ctx->width,dec_ctx->height ,frame->hw_frames_ctx); - ret = av_buffersrc_parameters_set(decoder->buffersrc_ctx, src_params); - if (ret < 0) { + ret = av_buffersrc_parameters_set(decoder->buffersrc_ctx, src_params); + if (ret < 0) { Debug(3, "Cannot set hw_frames_ctx to src\n"); goto end; } /* buffer video sink: to terminate the filter chain. */ - ret = avfilter_graph_create_filter(&decoder->buffersink_ctx, buffersink, "out", - NULL, NULL, decoder->filter_graph); + ret = avfilter_graph_create_filter(&decoder->buffersink_ctx, buffersink, "out", NULL, NULL, decoder->filter_graph); if (ret < 0) { Debug(3, "Cannot create buffer sink\n"); goto end; } #ifdef YADIF - ret = av_opt_set_int_list(decoder->buffersink_ctx, "pix_fmts", pix_fmts, - AV_PIX_FMT_NONE , AV_OPT_SEARCH_CHILDREN); + ret = av_opt_set_int_list(decoder->buffersink_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN); if (ret < 0) { Debug(3, "Cannot set output pixel format\n"); goto end; @@ -2620,10 +2622,10 @@ int init_filters(AVCodecContext * dec_ctx,CuvidDecoder * decoder,AVFrame *frame) * filter input label is not specified, it is set to "in" by * default. */ - outputs->name = av_strdup("in"); + outputs->name = av_strdup("in"); outputs->filter_ctx = decoder->buffersrc_ctx; - outputs->pad_idx = 0; - outputs->next = NULL; + outputs->pad_idx = 0; + outputs->next = NULL; /* * The buffer sink input must be connected to the output pad of @@ -2631,22 +2633,22 @@ int init_filters(AVCodecContext * dec_ctx,CuvidDecoder * decoder,AVFrame *frame) * filter output label is not specified, it is set to "out" by * default. */ - inputs->name = av_strdup("out"); + inputs->name = av_strdup("out"); inputs->filter_ctx = decoder->buffersink_ctx; - inputs->pad_idx = 0; - inputs->next = NULL; + inputs->pad_idx = 0; + inputs->next = NULL; if ((ret = avfilter_graph_parse_ptr(decoder->filter_graph, filters_descr, &inputs, &outputs, NULL)) < 0) { - Debug(3,"Cannot set graph parse %d\n",ret); + Debug(3, "Cannot set graph parse %d\n", ret); goto end; - } + } if ((ret = avfilter_graph_config(decoder->filter_graph, NULL)) < 0) { - Debug(3,"Cannot set graph config %d\n",ret); + Debug(3, "Cannot set graph config %d\n", ret); goto end; - } + } -end: + end: avfilter_inout_free(&inputs); avfilter_inout_free(&outputs); @@ -2655,25 +2657,24 @@ end: #endif - /// -/// Callback to negotiate the PixelFormat. +/// Callback to negotiate the PixelFormat. /// -/// @param fmt is the list of formats which are supported by the codec, -/// it is terminated by -1 as 0 is a valid format, the -/// formats are ordered by quality. +/// @param fmt is the list of formats which are supported by the codec, +/// it is terminated by -1 as 0 is a valid format, the +/// formats are ordered by quality. /// -static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, - AVCodecContext * video_ctx, const enum AVPixelFormat *fmt) +static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContext * video_ctx, + const enum AVPixelFormat *fmt) { const enum AVPixelFormat *fmt_idx; - int bitformat16 = 0,deint=0; - + int bitformat16 = 0, deint = 0; + VideoDecoder *ist = video_ctx->opaque; // - // look through formats + // look through formats // Debug(3, "%s: codec %d fmts:\n", __FUNCTION__, video_ctx->codec_id); for (fmt_idx = fmt; *fmt_idx != AV_PIX_FMT_NONE; fmt_idx++) { @@ -2682,299 +2683,301 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, bitformat16 = 1; } #ifdef VAAPI - if (video_ctx->profile == FF_PROFILE_HEVC_MAIN_10) - bitformat16 = 1; + if (video_ctx->profile == FF_PROFILE_HEVC_MAIN_10) + bitformat16 = 1; #endif Debug(3, "%s: codec %d fmts:\n", __FUNCTION__, video_ctx->codec_id); for (fmt_idx = fmt; *fmt_idx != AV_PIX_FMT_NONE; fmt_idx++) { - Debug(3, "\t%#010x %s\n", *fmt_idx, av_get_pix_fmt_name(*fmt_idx)); - // check supported pixel format with entry point - switch (*fmt_idx) { - case PIXEL_FORMAT: - break; - default: - continue; - } - break; + Debug(3, "\t%#010x %s\n", *fmt_idx, av_get_pix_fmt_name(*fmt_idx)); + // check supported pixel format with entry point + switch (*fmt_idx) { + case PIXEL_FORMAT: + break; + default: + continue; + } + break; } - - Debug(3,"video profile %d codec id %d\n",video_ctx->profile,video_ctx->codec_id); + + Debug(3, "video profile %d codec id %d\n", video_ctx->profile, video_ctx->codec_id); if (*fmt_idx == AV_PIX_FMT_NONE) { - Fatal(_("video: no valid pixfmt found\n")); + Fatal(_("video: no valid pixfmt found\n")); } if (*fmt_idx != PIXEL_FORMAT) { - Fatal(_("video: no valid profile found\n")); + Fatal(_("video: no valid profile found\n")); } - decoder->newchannel = 1; + decoder->newchannel = 1; if (ist->GetFormatDone) - return PIXEL_FORMAT; + return PIXEL_FORMAT; - ist->GetFormatDone = 1; - - Debug(3, "video: create decoder 16bit?=%d %dx%d old %d %d\n",bitformat16, video_ctx->width, video_ctx->height,decoder->InputWidth,decoder->InputHeight); + ist->GetFormatDone = 1; - if (*fmt_idx == PIXEL_FORMAT ) { // HWACCEL used + Debug(3, "video: create decoder 16bit?=%d %dx%d old %d %d\n", bitformat16, video_ctx->width, video_ctx->height, + decoder->InputWidth, decoder->InputHeight); - // Check image, format, size - // - if (bitformat16) { - decoder->PixFmt = AV_PIX_FMT_YUV420P; // 10 Bit Planar - ist->hwaccel_output_format = AV_PIX_FMT_YUV420P; + if (*fmt_idx == PIXEL_FORMAT) { // HWACCEL used + + // Check image, format, size + // + if (bitformat16) { + decoder->PixFmt = AV_PIX_FMT_YUV420P; // 10 Bit Planar + ist->hwaccel_output_format = AV_PIX_FMT_YUV420P; } else { - decoder->PixFmt = AV_PIX_FMT_NV12; // 8 Bit Planar - ist->hwaccel_output_format = AV_PIX_FMT_NV12; + decoder->PixFmt = AV_PIX_FMT_NV12; // 8 Bit Planar + ist->hwaccel_output_format = AV_PIX_FMT_NV12; } -// if ((video_ctx->width != decoder->InputWidth -// || video_ctx->height != decoder->InputHeight) && decoder->TrickSpeed == 0) { - - if (decoder->TrickSpeed == 0) { +// if ((video_ctx->width != decoder->InputWidth +// || video_ctx->height != decoder->InputHeight) && decoder->TrickSpeed == 0) { + + if (decoder->TrickSpeed == 0) { #ifdef PLACEBO - VideoThreadLock(); + VideoThreadLock(); #endif - CuvidCleanup(decoder); - decoder->InputAspect = video_ctx->sample_aspect_ratio; - decoder->InputWidth = video_ctx->width; - decoder->InputHeight = video_ctx->height; - decoder->Interlaced = 0; - decoder->SurfacesNeeded = VIDEO_SURFACES_MAX + 1; - CuvidSetupOutput(decoder); + CuvidCleanup(decoder); + decoder->InputAspect = video_ctx->sample_aspect_ratio; + decoder->InputWidth = video_ctx->width; + decoder->InputHeight = video_ctx->height; + decoder->Interlaced = 0; + decoder->SurfacesNeeded = VIDEO_SURFACES_MAX + 1; + CuvidSetupOutput(decoder); #ifdef PLACEBO - VideoThreadUnlock(); + VideoThreadUnlock(); #endif -#ifdef PLACEBO // dont show first frame - decoder->newchannel = 1; +#ifdef PLACEBO // dont show first frame + decoder->newchannel = 1; #endif #ifdef YADIF - if (VideoDeinterlace[decoder->Resolution] == VideoDeinterlaceYadif) { - deint = 0; - ist->filter = 1; // init yadif_cuda - } - else { - deint = 2; - ist->filter = 0; - } - CuvidMessage(2,"deint = %s\n",deint==0?"Yadif":"Cuda"); - if (av_opt_set_int(video_ctx->priv_data, "deint", deint ,0) < 0) { // adaptive - Fatal(_("codec: can't set option deint to video codec!\n")); - } + if (VideoDeinterlace[decoder->Resolution] == VideoDeinterlaceYadif) { + deint = 0; + ist->filter = 1; // init yadif_cuda + } else { + deint = 2; + ist->filter = 0; + } + CuvidMessage(2, "deint = %s\n", deint == 0 ? "Yadif" : "Cuda"); + if (av_opt_set_int(video_ctx->priv_data, "deint", deint, 0) < 0) { // adaptive + Fatal(_("codec: can't set option deint to video codec!\n")); + } #endif - } + } - CuvidMessage(2,"GetFormat Init ok %dx%d\n",video_ctx->width,video_ctx->height); - decoder->InputAspect = video_ctx->sample_aspect_ratio; + CuvidMessage(2, "GetFormat Init ok %dx%d\n", video_ctx->width, video_ctx->height); + decoder->InputAspect = video_ctx->sample_aspect_ratio; #ifdef CUVID ist->active_hwaccel_id = HWACCEL_CUVID; #else - ist->filter = 1; // init deint vaapi + ist->filter = 1; // init deint vaapi ist->active_hwaccel_id = HWACCEL_VAAPI; #endif - ist->hwaccel_pix_fmt = PIXEL_FORMAT; - return PIXEL_FORMAT; + ist->hwaccel_pix_fmt = PIXEL_FORMAT; + return PIXEL_FORMAT; } - Fatal(_("NO Format valid")); + Fatal(_("NO Format valid")); return *fmt_idx; } #ifdef USE_GRAB #ifdef PLACEBO -int get_RGB(CuvidDecoder *decoder,struct pl_overlay *ovl) { +int get_RGB(CuvidDecoder * decoder, struct pl_overlay *ovl) +{ #else -int get_RGB(CuvidDecoder *decoder) { +int get_RGB(CuvidDecoder * decoder) +{ #endif #ifdef PLACEBO - struct pl_render_params render_params = pl_render_default_params; - struct pl_render_target target = {0}; - const struct pl_fmt *fmt; - VkImage Image; - int offset,x1,y1,x0,y0; - float faktorx,faktory; + struct pl_render_params render_params = pl_render_default_params; + struct pl_render_target target = { 0 }; + const struct pl_fmt *fmt; + VkImage Image; + int offset, x1, y1, x0, y0; + float faktorx, faktory; #endif - - uint8_t *base; - int width; - int height; - GLuint fb,texture; - int current; - GLint texLoc; - - base = decoder->grabbase; - width = decoder->grabwidth; - height = decoder->grabheight; - - current = decoder->SurfacesRb[decoder->SurfaceRead]; - -#ifndef PLACEBO -// eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); - glGenTextures(1, &texture); - GlxCheck(); - glBindTexture(GL_TEXTURE_2D, texture); - GlxCheck(); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); - GlxCheck(); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); - GlxCheck(); + uint8_t *base; + int width; + int height; + GLuint fb, texture; + int current; + GLint texLoc; - glGenFramebuffers(1, &fb); - glBindFramebuffer(GL_FRAMEBUFFER, fb); - glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0); - if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { - Debug(3,"video/cuvid: grab Framebuffer is not complete!"); - return 0; - } - - glViewport(0,0,width, height); - GlxCheck(); - - if (gl_prog == 0) - gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm - - glUseProgram(gl_prog); - texLoc = glGetUniformLocation(gl_prog, "texture0"); - glUniform1i(texLoc, 0); - texLoc = glGetUniformLocation(gl_prog, "texture1"); - glUniform1i(texLoc, 1); + base = decoder->grabbase; + width = decoder->grabwidth; + height = decoder->grabheight; - glActiveTexture(GL_TEXTURE0); - glBindTexture(GL_TEXTURE_2D,decoder->gl_textures[current*2+0]); - glActiveTexture(GL_TEXTURE1); - glBindTexture(GL_TEXTURE_2D,decoder->gl_textures[current*2+1]); - - glBindFramebuffer(GL_FRAMEBUFFER, fb); - - render_pass_quad(1,0.0,0.0); - glUseProgram(0); - glActiveTexture(GL_TEXTURE0); - - if (OsdShown && decoder->grab == 2) { - GLint texLoc; -#ifdef CUVID -// glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); - GlxCheck(); -#endif - glEnable(GL_BLEND); - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); - - if (gl_prog_osd == 0) - gl_prog_osd = sc_generate_osd(gl_prog_osd); // generate shader programm + current = decoder->SurfacesRb[decoder->SurfaceRead]; - glUseProgram(gl_prog_osd); - texLoc = glGetUniformLocation(gl_prog_osd, "texture0"); - glUniform1i(texLoc, 0); +#ifndef PLACEBO - glActiveTexture(GL_TEXTURE0); - - pthread_mutex_lock(&OSDMutex); - glBindTexture(GL_TEXTURE_2D,OSDtexture); - glBindFramebuffer(GL_FRAMEBUFFER, fb); - render_pass_quad(1, 0.0, 0.0); - pthread_mutex_unlock(&OSDMutex); - - glUseProgram(0); - glActiveTexture(GL_TEXTURE0); -#ifdef CUVID -// glXMakeCurrent(XlibDisplay, VideoWindow, glxThreadContext); -#else -// eglMakeCurrent(eglDisplay, eglSurface,eglSurface, eglThreadContext); -#endif - } - glFlush(); - Debug(3,"Read pixels %d %d\n",width,height); - - glPixelStorei(GL_UNPACK_ALIGNMENT, 1); - glPixelStorei(GL_PACK_ALIGNMENT, 1); - - glReadPixels(0,0,width,height,GL_BGRA,GL_UNSIGNED_BYTE,base); +// eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); + glGenTextures(1, &texture); + GlxCheck(); + glBindTexture(GL_TEXTURE_2D, texture); + GlxCheck(); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); + GlxCheck(); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); GlxCheck(); - - glBindFramebuffer(GL_FRAMEBUFFER, 0); - glDeleteFramebuffers(1,&fb); - glDeleteTextures(1,&texture); - - -#else - faktorx = (float)width / (float)VideoWindowWidth; - faktory = (float)height / (float) VideoWindowHeight; - fmt = pl_find_named_fmt(p->gpu,"bgra8"); - target.fbo = pl_tex_create(p->gpu, &(struct pl_tex_params) { - .w = width, - .h = height, - .d = 0, - .format = fmt, - .sampleable = true, - .renderable = true, - .host_readable = true, - .sample_mode = PL_TEX_SAMPLE_LINEAR, - .address_mode = PL_TEX_ADDRESS_CLAMP, - }); - target.dst_rect.x0 = (float)decoder->OutputX * faktorx; - target.dst_rect.y0 = (float)decoder->OutputY * faktory; - target.dst_rect.x1 = (float)(decoder->OutputX + decoder->OutputWidth) * faktorx; - target.dst_rect.y1 = (float)(decoder->OutputY + decoder->OutputHeight) * faktory; - target.repr.sys = PL_COLOR_SYSTEM_RGB; - target.repr.levels = PL_COLOR_LEVELS_PC; - target.repr.alpha = PL_ALPHA_UNKNOWN; - target.repr.bits.sample_depth = 8; - target.repr.bits.color_depth = 8; - target.repr.bits.bit_shift =0; - target.color.primaries = PL_COLOR_PRIM_BT_709; - target.color.transfer = PL_COLOR_TRC_BT_1886; - target.color.light = PL_COLOR_LIGHT_DISPLAY; - target.color.sig_peak = 0; - target.color.sig_avg = 0; - - if (ovl) { - target.overlays = ovl; - target.num_overlays = 1; - x0 = ovl->rect.x0; - y0 = ovl->rect.y0; - x1 = ovl->rect.x1; - y1 = ovl->rect.y1; - ovl->rect.x0 = (float)x0 * faktorx; - ovl->rect.y0 = (float)y0 * faktory; - ovl->rect.x1 = (float)x1 * faktorx; - ovl->rect.y1 = (float)y1 * faktory; - - } else { - target.overlays = 0; - target.num_overlays = 0; - } - if (!pl_render_image(p->renderer, &decoder->pl_images[current], &target, &render_params)) { + glGenFramebuffers(1, &fb); + glBindFramebuffer(GL_FRAMEBUFFER, fb); + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0); + if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { + Debug(3, "video/cuvid: grab Framebuffer is not complete!"); + return 0; + } + + glViewport(0, 0, width, height); + GlxCheck(); + + if (gl_prog == 0) + gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm + + glUseProgram(gl_prog); + texLoc = glGetUniformLocation(gl_prog, "texture0"); + glUniform1i(texLoc, 0); + texLoc = glGetUniformLocation(gl_prog, "texture1"); + glUniform1i(texLoc, 1); + + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * 2 + 0]); + glActiveTexture(GL_TEXTURE1); + glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * 2 + 1]); + + glBindFramebuffer(GL_FRAMEBUFFER, fb); + + render_pass_quad(1, 0.0, 0.0); + glUseProgram(0); + glActiveTexture(GL_TEXTURE0); + + if (OsdShown && decoder->grab == 2) { + GLint texLoc; + +#ifdef CUVID +// glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); + GlxCheck(); +#endif + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (gl_prog_osd == 0) + gl_prog_osd = sc_generate_osd(gl_prog_osd); // generate shader programm + + glUseProgram(gl_prog_osd); + texLoc = glGetUniformLocation(gl_prog_osd, "texture0"); + glUniform1i(texLoc, 0); + + glActiveTexture(GL_TEXTURE0); + + pthread_mutex_lock(&OSDMutex); + glBindTexture(GL_TEXTURE_2D, OSDtexture); + glBindFramebuffer(GL_FRAMEBUFFER, fb); + render_pass_quad(1, 0.0, 0.0); + pthread_mutex_unlock(&OSDMutex); + + glUseProgram(0); + glActiveTexture(GL_TEXTURE0); +#ifdef CUVID +// glXMakeCurrent(XlibDisplay, VideoWindow, glxThreadContext); +#else +// eglMakeCurrent(eglDisplay, eglSurface,eglSurface, eglThreadContext); +#endif + } + glFlush(); + Debug(3, "Read pixels %d %d\n", width, height); + + glPixelStorei(GL_UNPACK_ALIGNMENT, 1); + glPixelStorei(GL_PACK_ALIGNMENT, 1); + + glReadPixels(0, 0, width, height, GL_BGRA, GL_UNSIGNED_BYTE, base); + GlxCheck(); + + glBindFramebuffer(GL_FRAMEBUFFER, 0); + glDeleteFramebuffers(1, &fb); + glDeleteTextures(1, &texture); + +#else + faktorx = (float)width / (float)VideoWindowWidth; + faktory = (float)height / (float)VideoWindowHeight; + fmt = pl_find_named_fmt(p->gpu, "bgra8"); + target.fbo = pl_tex_create(p->gpu, &(struct pl_tex_params) { + .w = width, + .h = height, + .d = 0, + .format = fmt, + .sampleable = true, + .renderable = true, + .host_readable = true, + .sample_mode = PL_TEX_SAMPLE_LINEAR, + .address_mode = PL_TEX_ADDRESS_CLAMP, + }); + target.dst_rect.x0 = (float)decoder->OutputX * faktorx; + target.dst_rect.y0 = (float)decoder->OutputY * faktory; + target.dst_rect.x1 = (float)(decoder->OutputX + decoder->OutputWidth) * faktorx; + target.dst_rect.y1 = (float)(decoder->OutputY + decoder->OutputHeight) * faktory; + target.repr.sys = PL_COLOR_SYSTEM_RGB; + target.repr.levels = PL_COLOR_LEVELS_PC; + target.repr.alpha = PL_ALPHA_UNKNOWN; + target.repr.bits.sample_depth = 8; + target.repr.bits.color_depth = 8; + target.repr.bits.bit_shift = 0; + target.color.primaries = PL_COLOR_PRIM_BT_709; + target.color.transfer = PL_COLOR_TRC_BT_1886; + target.color.light = PL_COLOR_LIGHT_DISPLAY; + target.color.sig_peak = 0; + target.color.sig_avg = 0; + + if (ovl) { + target.overlays = ovl; + target.num_overlays = 1; + x0 = ovl->rect.x0; + y0 = ovl->rect.y0; + x1 = ovl->rect.x1; + y1 = ovl->rect.y1; + ovl->rect.x0 = (float)x0 *faktorx; + ovl->rect.y0 = (float)y0 *faktory; + ovl->rect.x1 = (float)x1 *faktorx; + ovl->rect.y1 = (float)y1 *faktory; + + } else { + target.overlays = 0; + target.num_overlays = 0; + } + + if (!pl_render_image(p->renderer, &decoder->pl_images[current], &target, &render_params)) { Fatal(_("Failed rendering frame!\n")); } - pl_gpu_finish(p->gpu); - - if (ovl) { - ovl->rect.x0 = x0; - ovl->rect.y0 = y0; - ovl->rect.x1 = x1; - ovl->rect.y1 = y1; - } - - pl_tex_download(p->gpu,&(struct pl_tex_transfer_params) { // download Data - .tex = target.fbo, - .ptr = base, - }); + pl_gpu_finish(p->gpu); - pl_tex_destroy(p->gpu,&target.fbo); -#endif - return 0; + if (ovl) { + ovl->rect.x0 = x0; + ovl->rect.y0 = y0; + ovl->rect.x1 = x1; + ovl->rect.y1 = y1; + } + + pl_tex_download(p->gpu, &(struct pl_tex_transfer_params) { // download Data + .tex = target.fbo, + .ptr = base, + }); + + pl_tex_destroy(p->gpu, &target.fbo); +#endif + return 0; } /// -/// Grab output surface already locked. +/// Grab output surface already locked. /// -/// @param ret_size[out] size of allocated surface copy -/// @param ret_width[in,out] width of output -/// @param ret_height[in,out] height of output +/// @param ret_size[out] size of allocated surface copy +/// @param ret_width[in,out] width of output +/// @param ret_height[in,out] height of output /// static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int *ret_height, int mitosd) { @@ -2983,23 +2986,23 @@ static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int uint32_t height; uint8_t *base; VdpRect source_rect; - CuvidDecoder *decoder; + CuvidDecoder *decoder; + + decoder = CuvidDecoders[0]; + if (decoder == NULL) // no video aktiv + return NULL; - decoder = CuvidDecoders[0]; - if (decoder == NULL) // no video aktiv - return NULL; - // surface = CuvidSurfacesRb[CuvidOutputSurfaceIndex]; - - // get real surface size + + // get real surface size #ifdef PLACEBO width = decoder->VideoWidth; - height = decoder->VideoHeight; + height = decoder->VideoHeight; #else - width = decoder->InputWidth; - height = decoder->InputHeight; + width = decoder->InputWidth; + height = decoder->InputHeight; #endif - + // Debug(3, "video/cuvid: grab %dx%d\n", width, height); source_rect.x0 = 0; @@ -3008,77 +3011,77 @@ static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int source_rect.y1 = height; if (ret_width && ret_height) { - if (*ret_width <= -64) { // this is an Atmo grab service request - int overscan; + if (*ret_width <= -64) { // this is an Atmo grab service request + int overscan; - // calculate aspect correct size of analyze image - width = *ret_width * -1; - height = (width * source_rect.y1) / source_rect.x1; + // calculate aspect correct size of analyze image + width = *ret_width * -1; + height = (width * source_rect.y1) / source_rect.x1; - // calculate size of grab (sub) window - overscan = *ret_height; + // calculate size of grab (sub) window + overscan = *ret_height; - if (overscan > 0 && overscan <= 200) { - source_rect.x0 = source_rect.x1 * overscan / 1000; - source_rect.x1 -= source_rect.x0; - source_rect.y0 = source_rect.y1 * overscan / 1000; - source_rect.y1 -= source_rect.y0; - } - } else { - if (*ret_width > 0 && (unsigned)*ret_width < width) { - width = *ret_width; - } - if (*ret_height > 0 && (unsigned)*ret_height < height) { - height = *ret_height; - } - } + if (overscan > 0 && overscan <= 200) { + source_rect.x0 = source_rect.x1 * overscan / 1000; + source_rect.x1 -= source_rect.x0; + source_rect.y0 = source_rect.y1 * overscan / 1000; + source_rect.y1 -= source_rect.y0; + } + } else { + if (*ret_width > 0 && (unsigned)*ret_width < width) { + width = *ret_width; + } + if (*ret_height > 0 && (unsigned)*ret_height < height) { + height = *ret_height; + } + } -// printf("video/cuvid: grab source dim %dx%d\n", width, height); +// printf("video/cuvid: grab source dim %dx%d\n", width, height); - size = width * height * sizeof(uint32_t); - - base = malloc(size); - - if (!base) { - Error(_("video/cuvid: out of memory\n")); - return NULL; - } - decoder->grabbase = base; - decoder->grabwidth = width; - decoder->grabheight = height; - if (mitosd) - decoder->grab = 2; - else - decoder->grab = 1; - - while(decoder->grab) { - usleep(1000); // wait for data - } -// Debug(3,"got grab data\n"); + size = width * height * sizeof(uint32_t); - if (ret_size) { - *ret_size = size; - } - if (ret_width) { - *ret_width = width; - } - if (ret_height) { - *ret_height = height; - } - return base; - } + base = malloc(size); + + if (!base) { + Error(_("video/cuvid: out of memory\n")); + return NULL; + } + decoder->grabbase = base; + decoder->grabwidth = width; + decoder->grabheight = height; + if (mitosd) + decoder->grab = 2; + else + decoder->grab = 1; + + while (decoder->grab) { + usleep(1000); // wait for data + } +// Debug(3,"got grab data\n"); + + if (ret_size) { + *ret_size = size; + } + if (ret_width) { + *ret_width = width; + } + if (ret_height) { + *ret_height = height; + } + return base; + } return NULL; } /// -/// Grab output surface. +/// Grab output surface. /// -/// @param ret_size[out] size of allocated surface copy -/// @param ret_width[in,out] width of output -/// @param ret_height[in,out] height of output +/// @param ret_size[out] size of allocated surface copy +/// @param ret_width[in,out] width of output +/// @param ret_height[in,out] height of output /// -static uint8_t *CuvidGrabOutputSurface(int *ret_size, int *ret_width, int *ret_height, int mitosd) +static uint8_t *CuvidGrabOutputSurface(int *ret_size, int *ret_width, int *ret_height, int mitosd) { uint8_t *img; @@ -3089,13 +3092,13 @@ static uint8_t *CuvidGrabOutputSurface(int *ret_size, int *ret_width, int *ret_ #endif /// -/// Queue output surface. +/// Queue output surface. /// -/// @param decoder CUVID hw decoder -/// @param surface output surface -/// @param softdec software decoder +/// @param decoder CUVID hw decoder +/// @param surface output surface +/// @param softdec software decoder /// -/// @note we can't mix software and hardware decoder surfaces +/// @note we can't mix software and hardware decoder surfaces /// static void CuvidQueueVideoSurface(CuvidDecoder * decoder, int surface, int softdec) { @@ -3104,1335 +3107,1331 @@ static void CuvidQueueVideoSurface(CuvidDecoder * decoder, int surface, int soft ++decoder->FrameCounter; // can't wait for output queue empty - if (atomic_read(&decoder->SurfacesFilled) >= VIDEO_SURFACES_MAX) { - Warning(_("video/vdpau: output buffer full, dropping frame (%d/%d)\n"), - ++decoder->FramesDropped, decoder->FrameCounter); - if (!(decoder->FramesDisplayed % 300)) { - CuvidPrintFrames(decoder); - } - // software surfaces only - if (softdec) { - CuvidReleaseSurface(decoder, surface); - } - return; - } + if (atomic_read(&decoder->SurfacesFilled) >= VIDEO_SURFACES_MAX) { + Warning(_("video/vdpau: output buffer full, dropping frame (%d/%d)\n"), ++decoder->FramesDropped, + decoder->FrameCounter); + if (!(decoder->FramesDisplayed % 300)) { + CuvidPrintFrames(decoder); + } + // software surfaces only + if (softdec) { + CuvidReleaseSurface(decoder, surface); + } + return; + } // - // Check and release, old surface + // Check and release, old surface // - if ((old = decoder->SurfacesRb[decoder->SurfaceWrite])!= -1) { - // now we can release the surface, software surfaces only - if (softdec) { - CuvidReleaseSurface(decoder, old); - } + if ((old = decoder->SurfacesRb[decoder->SurfaceWrite]) != -1) { + // now we can release the surface, software surfaces only + if (softdec) { + CuvidReleaseSurface(decoder, old); + } } - Debug(4, "video/vdpau: yy video surface %#08x@%d ready\n", surface, decoder->SurfaceWrite); + Debug(4, "video/vdpau: yy video surface %#08x@%d ready\n", surface, decoder->SurfaceWrite); decoder->SurfacesRb[decoder->SurfaceWrite] = surface; - decoder->SurfaceWrite = (decoder->SurfaceWrite + 1) % VIDEO_SURFACES_MAX; + decoder->SurfaceWrite = (decoder->SurfaceWrite + 1) % VIDEO_SURFACES_MAX; atomic_inc(&decoder->SurfacesFilled); } #if 0 -extern void Nv12ToBgra32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix,cudaStream_t stream); -extern void P016ToBgra32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix,cudaStream_t stream); -extern void ResizeNv12(unsigned char *dpDstNv12, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcNv12, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstNv12UV); -extern void ResizeP016(unsigned char *dpDstP016, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcP016, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstP016UV); -extern void cudaLaunchNV12toARGBDrv(uint32_t *d_srcNV12, size_t nSourcePitch,uint32_t *d_dstARGB, size_t nDestPitch,uint32_t width, uint32_t height,CUstream streamID); -#endif +extern void Nv12ToBgra32(uint8_t * dpNv12, int nNv12Pitch, uint8_t * dpBgra, int nBgraPitch, int nWidth, int nHeight, + int iMatrix, cudaStream_t stream); +extern void P016ToBgra32(uint8_t * dpNv12, int nNv12Pitch, uint8_t * dpBgra, int nBgraPitch, int nWidth, int nHeight, + int iMatrix, cudaStream_t stream); +extern void ResizeNv12(unsigned char *dpDstNv12, int nDstPitch, int nDstWidth, int nDstHeight, + unsigned char *dpSrcNv12, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char *dpDstNv12UV); +extern void ResizeP016(unsigned char *dpDstP016, int nDstPitch, int nDstWidth, int nDstHeight, + unsigned char *dpSrcP016, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char *dpDstP016UV); +extern void cudaLaunchNV12toARGBDrv(uint32_t * d_srcNV12, size_t nSourcePitch, uint32_t * d_dstARGB, size_t nDestPitch, + uint32_t width, uint32_t height, CUstream streamID); +#endif /// -/// Render a ffmpeg frame. +/// Render a ffmpeg frame. /// -/// @param decoder CUVID hw decoder -/// @param video_ctx ffmpeg video codec context -/// @param frame frame to display +/// @param decoder CUVID hw decoder +/// @param video_ctx ffmpeg video codec context +/// @param frame frame to display /// -static void CuvidRenderFrame(CuvidDecoder * decoder, - const AVCodecContext * video_ctx, AVFrame * frame) +static void CuvidRenderFrame(CuvidDecoder * decoder, const AVCodecContext * video_ctx, AVFrame * frame) { - uint64_t first_time; + uint64_t first_time; int surface; - enum AVColorSpace color; + enum AVColorSpace color; - if (decoder->Closing == 1) { - av_frame_free(&frame); - return; - } - - // update aspect ratio changes - if (decoder->InputWidth && decoder->InputHeight - && av_cmp_q(decoder->InputAspect, frame->sample_aspect_ratio)) { - Debug(3, "video/vdpau: aspect ratio changed\n"); - - decoder->InputAspect = frame->sample_aspect_ratio; -//printf("new aspect %d:%d\n",frame->sample_aspect_ratio.num,frame->sample_aspect_ratio.den); - CuvidUpdateOutput(decoder); + if (decoder->Closing == 1) { + av_frame_free(&frame); + return; } - color = frame->colorspace; - if (color == AVCOL_SPC_UNSPECIFIED) // if unknown - color = AVCOL_SPC_BT709; -#if 0 - // - // Check image, format, size - // - if ( // decoder->PixFmt != video_ctx->pix_fmt - video_ctx->width != decoder->InputWidth -// || decoder->ColorSpace != color - || video_ctx->height != decoder->InputHeight) { + // update aspect ratio changes + if (decoder->InputWidth && decoder->InputHeight && av_cmp_q(decoder->InputAspect, frame->sample_aspect_ratio)) { + Debug(3, "video/vdpau: aspect ratio changed\n"); + + decoder->InputAspect = frame->sample_aspect_ratio; +//printf("new aspect %d:%d\n",frame->sample_aspect_ratio.num,frame->sample_aspect_ratio.den); + CuvidUpdateOutput(decoder); + } + + color = frame->colorspace; + if (color == AVCOL_SPC_UNSPECIFIED) // if unknown + color = AVCOL_SPC_BT709; +#if 0 + // + // Check image, format, size + // + if ( // decoder->PixFmt != video_ctx->pix_fmt + video_ctx->width != decoder->InputWidth +// || decoder->ColorSpace != color + || video_ctx->height != decoder->InputHeight) { //Debug(3,"fmt %02d:%02d width %d:%d hight %d:%d\n",decoder->ColorSpace,frame->colorspace ,video_ctx->width, decoder->InputWidth,video_ctx->height, decoder->InputHeight); - decoder->InputWidth = video_ctx->width; - decoder->InputHeight = video_ctx->height; - CuvidCleanup(decoder); - decoder->SurfacesNeeded = VIDEO_SURFACES_MAX + 1; - CuvidSetupOutput(decoder); -#ifdef PLACEBO // dont show first frame - decoder->newchannel = 1; + decoder->InputWidth = video_ctx->width; + decoder->InputHeight = video_ctx->height; + CuvidCleanup(decoder); + decoder->SurfacesNeeded = VIDEO_SURFACES_MAX + 1; + CuvidSetupOutput(decoder); +#ifdef PLACEBO // dont show first frame + decoder->newchannel = 1; #endif - } + } #endif - // - // Copy data from frame to image - // - - if (video_ctx->pix_fmt == PIXEL_FORMAT) { + // + // Copy data from frame to image + // - int w = decoder->InputWidth; - int h = decoder->InputHeight; + if (video_ctx->pix_fmt == PIXEL_FORMAT) { - decoder->ColorSpace = color; // save colorspace - decoder->trc = frame->color_trc; - decoder->color_primaries = frame->color_primaries; - - surface = CuvidGetVideoSurface0(decoder); - - if (surface == -1) { // no free surfaces - Debug(3,"no more surfaces\n"); - av_frame_free(&frame); - return; - } -#if 0 - if (!decoder->Closing) { - VideoSetPts(&decoder->PTS, decoder->Interlaced, video_ctx, frame); - } + int w = decoder->InputWidth; + int h = decoder->InputHeight; + + decoder->ColorSpace = color; // save colorspace + decoder->trc = frame->color_trc; + decoder->color_primaries = frame->color_primaries; + + surface = CuvidGetVideoSurface0(decoder); + + if (surface == -1) { // no free surfaces + Debug(3, "no more surfaces\n"); + av_frame_free(&frame); + return; + } +#if 0 + if (!decoder->Closing) { + VideoSetPts(&decoder->PTS, decoder->Interlaced, video_ctx, frame); + } #endif -#if defined (VAAPI) && defined (PLACEBO) - if (p->has_dma_buf) { // Vulkan supports DMA_BUF no copy required - generateVAAPIImage(decoder,surface,frame,w,h); - } - else { // we need to Copy the frame via RAM - AVFrame *output; - VideoThreadLock(); - vaSyncSurface(decoder->VaDisplay,(unsigned int)frame->data[3]); - output = av_frame_alloc(); - av_hwframe_transfer_data(output,frame,0); - av_frame_copy_props(output,frame); -// printf("Save Surface ID %d %p %p\n",surface,decoder->pl_images[surface].planes[0].texture,decoder->pl_images[surface].planes[1].texture); - bool ok = pl_tex_upload(p->gpu,&(struct pl_tex_transfer_params) { - .tex = decoder->pl_images[surface].planes[0].texture, - .stride_w = output->linesize[0], - .stride_h = h, - .ptr = output->data[0], - .rc.x1 = w, - .rc.y1 = h, - .rc.z1 = 0, - }); - ok &= pl_tex_upload(p->gpu,&(struct pl_tex_transfer_params) { - .tex = decoder->pl_images[surface].planes[1].texture, - .stride_w = output->linesize[0]/2, - .stride_h = h/2, - .ptr = output->data[1], - .rc.x1 = w/2, - .rc.y1 = h/2, - .rc.z1 = 0, - }); - av_frame_free(&output); - VideoThreadUnlock(); - } +#if defined (VAAPI) && defined (PLACEBO) + if (p->has_dma_buf) { // Vulkan supports DMA_BUF no copy required + generateVAAPIImage(decoder, surface, frame, w, h); + } else { // we need to Copy the frame via RAM + AVFrame *output; + + VideoThreadLock(); + vaSyncSurface(decoder->VaDisplay, (unsigned int)frame->data[3]); + output = av_frame_alloc(); + av_hwframe_transfer_data(output, frame, 0); + av_frame_copy_props(output, frame); +// printf("Save Surface ID %d %p %p\n",surface,decoder->pl_images[surface].planes[0].texture,decoder->pl_images[surface].planes[1].texture); + bool ok = pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params) { + .tex = decoder->pl_images[surface].planes[0].texture, + .stride_w = output->linesize[0], + .stride_h = h, + .ptr = output->data[0], + .rc.x1 = w, + .rc.y1 = h, + .rc.z1 = 0, + }); + ok &= pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params) { + .tex = decoder->pl_images[surface].planes[1].texture, + .stride_w = output->linesize[0] / 2, + .stride_h = h / 2, + .ptr = output->data[1], + .rc.x1 = w / 2, + .rc.y1 = h / 2, + .rc.z1 = 0, + }); + av_frame_free(&output); + VideoThreadUnlock(); + } #else #ifdef CUVID - // copy to texture - generateCUDAImage(decoder,surface,frame,w,h,decoder->PixFmt==AV_PIX_FMT_NV12?1:2); -#else - // copy to texture - generateVAAPIImage(decoder,surface,frame,w,h); + // copy to texture + generateCUDAImage(decoder, surface, frame, w, h, decoder->PixFmt == AV_PIX_FMT_NV12 ? 1 : 2); +#else + // copy to texture + generateVAAPIImage(decoder, surface, frame, w, h); #endif #endif - - CuvidQueueVideoSurface(decoder, surface, 1); - decoder->frames[surface] = frame; - return; - } - - Fatal(_("video/vdpau: pixel format %d not supported\n"),video_ctx->pix_fmt); + CuvidQueueVideoSurface(decoder, surface, 1); + decoder->frames[surface] = frame; + return; + + } + + Fatal(_("video/vdpau: pixel format %d not supported\n"), video_ctx->pix_fmt); } /// -/// Get hwaccel context for ffmpeg. +/// Get hwaccel context for ffmpeg. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// static void *CuvidGetHwAccelContext(CuvidDecoder * decoder) { unsigned int version; - Debug(3, "Initializing cuvid hwaccel thread ID:%ld\n",(long int)syscall(186)); + Debug(3, "Initializing cuvid hwaccel thread ID:%ld\n", (long int)syscall(186)); //turn NULL; #ifdef CUVID - if (decoder->cuda_ctx) { - Debug(3,"schon passiert\n"); - return NULL; - } - - checkCudaErrors(cuInit(0)); + if (decoder->cuda_ctx) { + Debug(3, "schon passiert\n"); + return NULL; + } + + checkCudaErrors(cuInit(0)); + + checkCudaErrors(cuCtxCreate(&decoder->cuda_ctx, (unsigned int)CU_CTX_SCHED_BLOCKING_SYNC, (CUdevice) 0)); - checkCudaErrors(cuCtxCreate(&decoder->cuda_ctx, (unsigned int) CU_CTX_SCHED_BLOCKING_SYNC, (CUdevice) 0)); - if (decoder->cuda_ctx == NULL) - Fatal(_("Kein Cuda device gefunden")); + Fatal(_("Kein Cuda device gefunden")); - cuCtxGetApiVersion(decoder->cuda_ctx,&version); - Debug(3, "***********CUDA API Version %d\n",version); + cuCtxGetApiVersion(decoder->cuda_ctx, &version); + Debug(3, "***********CUDA API Version %d\n", version); #endif - return NULL; + return NULL; } /// -/// Create and display a black empty surface. +/// Create and display a black empty surface. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// -/// @FIXME: render only video area, not fullscreen! -/// decoder->Output.. isn't correct setup for radio stations +/// @FIXME: render only video area, not fullscreen! +/// decoder->Output.. isn't correct setup for radio stations /// -static void CuvidBlackSurface(__attribute__((unused))CuvidDecoder * decoder) +static void CuvidBlackSurface( __attribute__((unused)) CuvidDecoder * decoder) { #ifndef PLACEBO - glClear(GL_COLOR_BUFFER_BIT); + glClear(GL_COLOR_BUFFER_BIT); #endif -return; + return; } /// -/// Advance displayed frame of decoder. +/// Advance displayed frame of decoder. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// static void CuvidAdvanceDecoderFrame(CuvidDecoder * decoder) { - // next surface, if complete frame is displayed (1 -> 0) - if (decoder->SurfaceField) { - int filled; + // next surface, if complete frame is displayed (1 -> 0) + if (decoder->SurfaceField) { + int filled; - // FIXME: this should check the caller - // check decoder, if new surface is available - // need 2 frames for progressive - // need 4 frames for interlaced - filled = atomic_read(&decoder->SurfacesFilled); - if (filled <= 1 + 2 * decoder->Interlaced) { - // keep use of last surface - ++decoder->FramesDuped; - // FIXME: don't warn after stream start, don't warn during pause -// printf("video: display buffer empty, duping frame (%d/%d) %d\n", -// decoder->FramesDuped, decoder->FrameCounter, -// VideoGetBuffers(decoder->Stream)); - return; - } + // FIXME: this should check the caller + // check decoder, if new surface is available + // need 2 frames for progressive + // need 4 frames for interlaced + filled = atomic_read(&decoder->SurfacesFilled); + if (filled <= 1 + 2 * decoder->Interlaced) { + // keep use of last surface + ++decoder->FramesDuped; + // FIXME: don't warn after stream start, don't warn during pause +// printf("video: display buffer empty, duping frame (%d/%d) %d\n", +// decoder->FramesDuped, decoder->FrameCounter, +// VideoGetBuffers(decoder->Stream)); + return; + } - decoder->SurfaceRead = (decoder->SurfaceRead + 1) % VIDEO_SURFACES_MAX; - atomic_dec(&decoder->SurfacesFilled); - decoder->SurfaceField = !decoder->Interlaced; - return; - } - // next field - decoder->SurfaceField = 1; + decoder->SurfaceRead = (decoder->SurfaceRead + 1) % VIDEO_SURFACES_MAX; + atomic_dec(&decoder->SurfacesFilled); + decoder->SurfaceField = !decoder->Interlaced; + return; + } + // next field + decoder->SurfaceField = 1; } /// -/// Render video surface to output surface. +/// Render video surface to output surface. /// -/// @param decoder CUVID hw decoder -/// @param level video surface level 0 = bottom +/// @param decoder CUVID hw decoder +/// @param level video surface level 0 = bottom /// #ifdef PLACEBO -static void CuvidMixVideo(CuvidDecoder * decoder, int level,struct pl_render_target *target, struct pl_overlay *ovl ) +static void CuvidMixVideo(CuvidDecoder * decoder, int level, struct pl_render_target *target, struct pl_overlay *ovl) #else -static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused))int level) +static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) + int level) #endif { #ifdef PLACEBO - struct pl_render_params render_params; - struct pl_deband_params deband; - struct pl_color_adjustment colors; - struct pl_cone_params cone; - struct pl_tex_vk *vkp; - const struct pl_fmt *fmt; - VkImage Image; - struct pl_image *img; - bool ok; + struct pl_render_params render_params; + struct pl_deband_params deband; + struct pl_color_adjustment colors; + struct pl_cone_params cone; + struct pl_tex_vk *vkp; + const struct pl_fmt *fmt; + VkImage Image; + struct pl_image *img; + bool ok; - VdpRect video_src_rect; - VdpRect dst_rect; - VdpRect dst_video_rect; + VdpRect video_src_rect; + VdpRect dst_rect; + VdpRect dst_video_rect; #endif - int current; - int y; - float xcropf, ycropf; - GLint texLoc; + int current; + int y; + float xcropf, ycropf; + GLint texLoc; #ifdef PLACEBO - if (level) { - dst_rect.x0 = decoder->VideoX; // video window output (clip) - dst_rect.y0 = decoder->VideoY; - dst_rect.x1 = decoder->VideoX + decoder->VideoWidth; - dst_rect.y1 = decoder->VideoY + decoder->VideoHeight; - } else { - dst_rect.x0 = 0; // complete window (clip) - dst_rect.y0 = 0; - dst_rect.x1 = VideoWindowWidth; - dst_rect.y1 = VideoWindowHeight; - } - - video_src_rect.x0 = decoder->CropX; // video source (crop) - video_src_rect.y0 = decoder->CropY; - video_src_rect.x1 = decoder->CropX + decoder->CropWidth; - video_src_rect.y1 = decoder->CropY + decoder->CropHeight; - - dst_video_rect.x0 = decoder->OutputX; // video output (scale) - dst_video_rect.y0 = decoder->OutputY; - dst_video_rect.x1 = decoder->OutputX + decoder->OutputWidth; - dst_video_rect.y1 = decoder->OutputY + decoder->OutputHeight; -#endif - - xcropf = (float) decoder->CropX / (float) decoder->InputWidth; - ycropf = (float) decoder->CropY / (float) decoder->InputHeight; - - current = decoder->SurfacesRb[decoder->SurfaceRead]; - - if (!decoder->Closing) { - VideoSetPts(&decoder->PTS, decoder->Interlaced, 0, decoder->frames[current]); - } - - // Render Progressive frame -#ifndef PLACEBO - y = VideoWindowHeight - decoder->OutputY - decoder->OutputHeight; - if (y <0 ) - y = 0; - glViewport(decoder->OutputX, y, decoder->OutputWidth, decoder->OutputHeight); - - if (gl_prog == 0) - gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm - - glUseProgram(gl_prog); - texLoc = glGetUniformLocation(gl_prog, "texture0"); - glUniform1i(texLoc, 0); - texLoc = glGetUniformLocation(gl_prog, "texture1"); - glUniform1i(texLoc, 1); - - glActiveTexture(GL_TEXTURE0); - glBindTexture(GL_TEXTURE_2D,decoder->gl_textures[current*2+0]); - glActiveTexture(GL_TEXTURE1); - glBindTexture(GL_TEXTURE_2D,decoder->gl_textures[current*2+1]); - - render_pass_quad(0, xcropf, ycropf); - - glUseProgram(0); - glActiveTexture(GL_TEXTURE0); - -#else - img = &decoder->pl_images[current]; - - memcpy(&deband,&pl_deband_default_params,sizeof(deband)); - memcpy(&render_params,&pl_render_default_params,sizeof(render_params)); - - switch (decoder->ColorSpace) { - case AVCOL_SPC_RGB: - img->repr.sys = PL_COLOR_SYSTEM_BT_601; - img->color.primaries = PL_COLOR_PRIM_BT_601_625; - img->color.transfer = PL_COLOR_TRC_BT_1886; - img->color.light = PL_COLOR_LIGHT_DISPLAY; - break; - case AVCOL_SPC_BT709: - case AVCOL_SPC_UNSPECIFIED: // comes with UHD - img->repr.sys = PL_COLOR_SYSTEM_BT_709; - memcpy(&img->color,&pl_color_space_bt709,sizeof(struct pl_color_space)); -// img->color.primaries = PL_COLOR_PRIM_BT_709; -// img->color.transfer = PL_COLOR_TRC_BT_1886; -// img->color.light = PL_COLOR_LIGHT_SCENE_709_1886; -// img->color.light = PL_COLOR_LIGHT_DISPLAY; - break; - - case AVCOL_SPC_BT2020_NCL: - img->repr.sys = PL_COLOR_SYSTEM_BT_2020_NC; - memcpy(&img->repr,&pl_color_repr_uhdtv,sizeof(struct pl_color_repr)); - memcpy(&img->color,&pl_color_space_bt2020_hlg,sizeof(struct pl_color_space)); - deband.grain = 0.0f; // no grain in HDR - img->color.sig_scale = 2.0f; -#ifdef VAAPI - render_params.peak_detect_params = NULL; -#endif -// img->color.primaries = PL_COLOR_PRIM_BT_2020; -// img->color.transfer = PL_COLOR_TRC_HLG; -// img->color.light = PL_COLOR_LIGHT_SCENE_HLG; - break; - - default: // fallback - img->repr.sys = PL_COLOR_SYSTEM_BT_709; - memcpy(&img->color,&pl_color_space_bt709,sizeof(struct pl_color_space)); -// img->color.primaries = PL_COLOR_PRIM_BT_709; -// img->color.transfer = PL_COLOR_TRC_BT_1886; -// img->color.light = PL_COLOR_LIGHT_DISPLAY; - break; - } - // Source crop - if (VideoScalerTest) { // right side defnied scaler -// pl_tex_clear(p->gpu,target->fbo,(float[4]){0}); // clear frame - img->src_rect.x0 = video_src_rect.x1/2+1; - img->src_rect.y0 = video_src_rect.y0; - img->src_rect.x1 = video_src_rect.x1; - img->src_rect.y1 = video_src_rect.y1; - - // Video aspect ratio - target->dst_rect.x0 = dst_video_rect.x1/2+dst_video_rect.x0/2+1; - target->dst_rect.y0 = dst_video_rect.y0; - target->dst_rect.x1 = dst_video_rect.x1; - target->dst_rect.y1 = dst_video_rect.y1; - } else { - img->src_rect.x0 = video_src_rect.x0; - img->src_rect.y0 = video_src_rect.y0; - img->src_rect.x1 = video_src_rect.x1; - img->src_rect.y1 = video_src_rect.y1; - - // Video aspect ratio - target->dst_rect.x0 = dst_video_rect.x0; - target->dst_rect.y0 = dst_video_rect.y0; - target->dst_rect.x1 = dst_video_rect.x1; - target->dst_rect.y1 = dst_video_rect.y1; - } - if (level == 0) - pl_tex_clear(p->gpu,target->fbo,(float[4]){0}); - - if (VideoColorBlindness) { - switch(VideoColorBlindness) { - case 1: memcpy(&cone,&pl_vision_protanomaly,sizeof(cone)); - break; - case 2: memcpy(&cone,&pl_vision_deuteranomaly,sizeof(cone)); - break; - case 3: memcpy(&cone,&pl_vision_tritanomaly,sizeof(cone)); - break; - case 4: memcpy(&cone,&pl_vision_monochromacy,sizeof(cone)); - break; - default: memcpy(&cone,&pl_vision_normal,sizeof(cone)); - break; - } - cone.strength = VideoColorBlindnessFaktor; - render_params.cone_params = &cone; - } - else { - render_params.cone_params = NULL; - } - -// render_params.upscaler = &pl_filter_ewa_lanczos; - - render_params.upscaler = pl_named_filters[VideoScaling[decoder->Resolution]].filter; - render_params.downscaler = pl_named_filters[VideoScaling[decoder->Resolution]].filter; - render_params.color_adjustment = &colors; - render_params.deband_params = &deband; - - colors.brightness = VideoBrightness; - colors.contrast = VideoContrast; - colors.saturation = VideoSaturation; - colors.hue = VideoHue; - colors.gamma = VideoGamma; - - if (ovl) { - target->overlays = ovl; - target->num_overlays = 1; - } else { - target->overlays = 0; - target->num_overlays = 0; - } - - if (decoder->newchannel && current == 0 ) { - colors.brightness = -1.0f; - colors.contrast = 0.0f; - if (!pl_render_image(p->renderer, &decoder->pl_images[current], target, &render_params)) { - Debug(3,"Failed rendering frame!\n"); - } - return; - } - - decoder->newchannel = 0; - - if (!pl_render_image(p->renderer, &decoder->pl_images[current], target, &render_params)) { - Debug(3,"Failed rendering frame!\n"); + if (level) { + dst_rect.x0 = decoder->VideoX; // video window output (clip) + dst_rect.y0 = decoder->VideoY; + dst_rect.x1 = decoder->VideoX + decoder->VideoWidth; + dst_rect.y1 = decoder->VideoY + decoder->VideoHeight; + } else { + dst_rect.x0 = 0; // complete window (clip) + dst_rect.y0 = 0; + dst_rect.x1 = VideoWindowWidth; + dst_rect.y1 = VideoWindowHeight; } - if (VideoScalerTest) { // left side test scaler - // Source crop - img->src_rect.x0 = video_src_rect.x0; - img->src_rect.y0 = video_src_rect.y0; - img->src_rect.x1 = video_src_rect.x1/2; - img->src_rect.y1 = video_src_rect.y1; + video_src_rect.x0 = decoder->CropX; // video source (crop) + video_src_rect.y0 = decoder->CropY; + video_src_rect.x1 = decoder->CropX + decoder->CropWidth; + video_src_rect.y1 = decoder->CropY + decoder->CropHeight; - // Video aspect ratio - target->dst_rect.x0 = dst_video_rect.x0; - target->dst_rect.y0 = dst_video_rect.y0; - target->dst_rect.x1 = dst_video_rect.x1/2+dst_video_rect.x0/2; - target->dst_rect.y1 = dst_video_rect.y1; - render_params.upscaler = pl_named_filters[VideoScalerTest-1].filter; - render_params.downscaler = pl_named_filters[VideoScalerTest-1].filter; - - if (!p->renderertest) - p->renderertest = pl_renderer_create(p->ctx, p->gpu); - - if (!pl_render_image(p->renderertest, &decoder->pl_images[current], target, &render_params)) { - Debug(3,"Failed rendering frame!\n"); - } - } - else if (p->renderertest) { - pl_renderer_destroy(&p->renderertest); - p->renderertest = NULL; - } + dst_video_rect.x0 = decoder->OutputX; // video output (scale) + dst_video_rect.y0 = decoder->OutputY; + dst_video_rect.x1 = decoder->OutputX + decoder->OutputWidth; + dst_video_rect.y1 = decoder->OutputY + decoder->OutputHeight; #endif - Debug(4, "video/vdpau: yy video surface %p displayed\n", current, decoder->SurfaceRead); + + xcropf = (float)decoder->CropX / (float)decoder->InputWidth; + ycropf = (float)decoder->CropY / (float)decoder->InputHeight; + + current = decoder->SurfacesRb[decoder->SurfaceRead]; + + if (!decoder->Closing) { + VideoSetPts(&decoder->PTS, decoder->Interlaced, 0, decoder->frames[current]); + } + + // Render Progressive frame +#ifndef PLACEBO + y = VideoWindowHeight - decoder->OutputY - decoder->OutputHeight; + if (y < 0) + y = 0; + glViewport(decoder->OutputX, y, decoder->OutputWidth, decoder->OutputHeight); + + if (gl_prog == 0) + gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm + + glUseProgram(gl_prog); + texLoc = glGetUniformLocation(gl_prog, "texture0"); + glUniform1i(texLoc, 0); + texLoc = glGetUniformLocation(gl_prog, "texture1"); + glUniform1i(texLoc, 1); + + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * 2 + 0]); + glActiveTexture(GL_TEXTURE1); + glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[current * 2 + 1]); + + render_pass_quad(0, xcropf, ycropf); + + glUseProgram(0); + glActiveTexture(GL_TEXTURE0); + +#else + img = &decoder->pl_images[current]; + + memcpy(&deband, &pl_deband_default_params, sizeof(deband)); + memcpy(&render_params, &pl_render_default_params, sizeof(render_params)); + + switch (decoder->ColorSpace) { + case AVCOL_SPC_RGB: + img->repr.sys = PL_COLOR_SYSTEM_BT_601; + img->color.primaries = PL_COLOR_PRIM_BT_601_625; + img->color.transfer = PL_COLOR_TRC_BT_1886; + img->color.light = PL_COLOR_LIGHT_DISPLAY; + break; + case AVCOL_SPC_BT709: + case AVCOL_SPC_UNSPECIFIED: // comes with UHD + img->repr.sys = PL_COLOR_SYSTEM_BT_709; + memcpy(&img->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); +// img->color.primaries = PL_COLOR_PRIM_BT_709; +// img->color.transfer = PL_COLOR_TRC_BT_1886; +// img->color.light = PL_COLOR_LIGHT_SCENE_709_1886; +// img->color.light = PL_COLOR_LIGHT_DISPLAY; + break; + + case AVCOL_SPC_BT2020_NCL: + img->repr.sys = PL_COLOR_SYSTEM_BT_2020_NC; + memcpy(&img->repr, &pl_color_repr_uhdtv, sizeof(struct pl_color_repr)); + memcpy(&img->color, &pl_color_space_bt2020_hlg, sizeof(struct pl_color_space)); + deband.grain = 0.0f; // no grain in HDR + img->color.sig_scale = 2.0f; +#ifdef VAAPI + render_params.peak_detect_params = NULL; +#endif +// img->color.primaries = PL_COLOR_PRIM_BT_2020; +// img->color.transfer = PL_COLOR_TRC_HLG; +// img->color.light = PL_COLOR_LIGHT_SCENE_HLG; + break; + + default: // fallback + img->repr.sys = PL_COLOR_SYSTEM_BT_709; + memcpy(&img->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); +// img->color.primaries = PL_COLOR_PRIM_BT_709; +// img->color.transfer = PL_COLOR_TRC_BT_1886; +// img->color.light = PL_COLOR_LIGHT_DISPLAY; + break; + } + // Source crop + if (VideoScalerTest) { // right side defnied scaler +// pl_tex_clear(p->gpu,target->fbo,(float[4]){0}); // clear frame + img->src_rect.x0 = video_src_rect.x1 / 2 + 1; + img->src_rect.y0 = video_src_rect.y0; + img->src_rect.x1 = video_src_rect.x1; + img->src_rect.y1 = video_src_rect.y1; + + // Video aspect ratio + target->dst_rect.x0 = dst_video_rect.x1 / 2 + dst_video_rect.x0 / 2 + 1; + target->dst_rect.y0 = dst_video_rect.y0; + target->dst_rect.x1 = dst_video_rect.x1; + target->dst_rect.y1 = dst_video_rect.y1; + } else { + img->src_rect.x0 = video_src_rect.x0; + img->src_rect.y0 = video_src_rect.y0; + img->src_rect.x1 = video_src_rect.x1; + img->src_rect.y1 = video_src_rect.y1; + + // Video aspect ratio + target->dst_rect.x0 = dst_video_rect.x0; + target->dst_rect.y0 = dst_video_rect.y0; + target->dst_rect.x1 = dst_video_rect.x1; + target->dst_rect.y1 = dst_video_rect.y1; + } + if (level == 0) + pl_tex_clear(p->gpu, target->fbo, (float[4]) { 0 } + ); + + if (VideoColorBlindness) { + switch (VideoColorBlindness) { + case 1: + memcpy(&cone, &pl_vision_protanomaly, sizeof(cone)); + break; + case 2: + memcpy(&cone, &pl_vision_deuteranomaly, sizeof(cone)); + break; + case 3: + memcpy(&cone, &pl_vision_tritanomaly, sizeof(cone)); + break; + case 4: + memcpy(&cone, &pl_vision_monochromacy, sizeof(cone)); + break; + default: + memcpy(&cone, &pl_vision_normal, sizeof(cone)); + break; + } + cone.strength = VideoColorBlindnessFaktor; + render_params.cone_params = &cone; + } else { + render_params.cone_params = NULL; + } + +// render_params.upscaler = &pl_filter_ewa_lanczos; + + render_params.upscaler = pl_named_filters[VideoScaling[decoder->Resolution]].filter; + render_params.downscaler = pl_named_filters[VideoScaling[decoder->Resolution]].filter; + render_params.color_adjustment = &colors; + render_params.deband_params = &deband; + + colors.brightness = VideoBrightness; + colors.contrast = VideoContrast; + colors.saturation = VideoSaturation; + colors.hue = VideoHue; + colors.gamma = VideoGamma; + + if (ovl) { + target->overlays = ovl; + target->num_overlays = 1; + } else { + target->overlays = 0; + target->num_overlays = 0; + } + + if (decoder->newchannel && current == 0) { + colors.brightness = -1.0f; + colors.contrast = 0.0f; + if (!pl_render_image(p->renderer, &decoder->pl_images[current], target, &render_params)) { + Debug(3, "Failed rendering frame!\n"); + } + return; + } + + decoder->newchannel = 0; + + if (!pl_render_image(p->renderer, &decoder->pl_images[current], target, &render_params)) { + Debug(3, "Failed rendering frame!\n"); + } + + if (VideoScalerTest) { // left side test scaler + // Source crop + img->src_rect.x0 = video_src_rect.x0; + img->src_rect.y0 = video_src_rect.y0; + img->src_rect.x1 = video_src_rect.x1 / 2; + img->src_rect.y1 = video_src_rect.y1; + + // Video aspect ratio + target->dst_rect.x0 = dst_video_rect.x0; + target->dst_rect.y0 = dst_video_rect.y0; + target->dst_rect.x1 = dst_video_rect.x1 / 2 + dst_video_rect.x0 / 2; + target->dst_rect.y1 = dst_video_rect.y1; + render_params.upscaler = pl_named_filters[VideoScalerTest - 1].filter; + render_params.downscaler = pl_named_filters[VideoScalerTest - 1].filter; + + if (!p->renderertest) + p->renderertest = pl_renderer_create(p->ctx, p->gpu); + + if (!pl_render_image(p->renderertest, &decoder->pl_images[current], target, &render_params)) { + Debug(3, "Failed rendering frame!\n"); + } + } else if (p->renderertest) { + pl_renderer_destroy(&p->renderertest); + p->renderertest = NULL; + } +#endif + Debug(4, "video/vdpau: yy video surface %p displayed\n", current, decoder->SurfaceRead); } #ifdef PLACEBO -void make_osd_overlay(int x, int y, int width, int height) { - const struct pl_fmt *fmt; - struct pl_overlay *pl; - const float black[4] = { 0.0f,0.0f,0.0f,1.0f}; - - int offset = VideoWindowHeight - (VideoWindowHeight-height-y) - (VideoWindowHeight - y); - - fmt = pl_find_named_fmt(p->gpu, "rgba8"); // 8 Bit RGB - - pl = &osdoverlay; - - if (pl->plane.texture) { - pl_tex_clear(p->gpu,pl->plane.texture,(float[4]){0}); - pl_tex_destroy(p->gpu,&pl->plane.texture); - } - - // make texture for OSD - pl->plane.texture = pl_tex_create(p->gpu, &(struct pl_tex_params) { - .w = width, - .h = height, - .d = 0, - .format = fmt, - .sampleable = true, - .host_writable = true, - .blit_dst = true, - .sample_mode = PL_TEX_SAMPLE_LINEAR, - .address_mode = PL_TEX_ADDRESS_CLAMP, - }); - - // make overlay - pl_tex_clear(p->gpu,pl->plane.texture,(float[4]){0}); - pl->plane.components = 4; - pl->plane.shift_x = 0.0f; - pl->plane.shift_y = 0.0f; - pl->plane.component_mapping[0] = PL_CHANNEL_B; - pl->plane.component_mapping[1] = PL_CHANNEL_G; - pl->plane.component_mapping[2] = PL_CHANNEL_R; - pl->plane.component_mapping[3] = PL_CHANNEL_A; - pl->mode = PL_OVERLAY_NORMAL; - pl->repr.sys = PL_COLOR_SYSTEM_RGB; - pl->repr.levels = PL_COLOR_LEVELS_PC; - pl->repr.alpha = PL_ALPHA_INDEPENDENT; - - memcpy(&osdoverlay.color,&pl_color_space_srgb,sizeof(struct pl_color_space)); - - pl->rect.x0 = x; - pl->rect.y0 = VideoWindowHeight - y + offset; // Boden von oben - pl->rect.x1 = x+width; - pl->rect.y1 = VideoWindowHeight- height - y + offset; +void make_osd_overlay(int x, int y, int width, int height) +{ + const struct pl_fmt *fmt; + struct pl_overlay *pl; + const float black[4] = { 0.0f, 0.0f, 0.0f, 1.0f }; + + int offset = VideoWindowHeight - (VideoWindowHeight - height - y) - (VideoWindowHeight - y); + + fmt = pl_find_named_fmt(p->gpu, "rgba8"); // 8 Bit RGB + + pl = &osdoverlay; + + if (pl->plane.texture) { + pl_tex_clear(p->gpu, pl->plane.texture, (float[4]) { 0 }); + pl_tex_destroy(p->gpu, &pl->plane.texture); + } + + // make texture for OSD + pl->plane.texture = pl_tex_create(p->gpu, &(struct pl_tex_params) { + .w = width, + .h = height, + .d = 0, + .format = fmt, + .sampleable = true, + .host_writable = true, + .blit_dst = true, + .sample_mode = PL_TEX_SAMPLE_LINEAR, + .address_mode = PL_TEX_ADDRESS_CLAMP, + }); + + // make overlay + pl_tex_clear(p->gpu, pl->plane.texture, (float[4]) { 0 }); + pl->plane.components = 4; + pl->plane.shift_x = 0.0f; + pl->plane.shift_y = 0.0f; + pl->plane.component_mapping[0] = PL_CHANNEL_B; + pl->plane.component_mapping[1] = PL_CHANNEL_G; + pl->plane.component_mapping[2] = PL_CHANNEL_R; + pl->plane.component_mapping[3] = PL_CHANNEL_A; + pl->mode = PL_OVERLAY_NORMAL; + pl->repr.sys = PL_COLOR_SYSTEM_RGB; + pl->repr.levels = PL_COLOR_LEVELS_PC; + pl->repr.alpha = PL_ALPHA_INDEPENDENT; + + memcpy(&osdoverlay.color, &pl_color_space_srgb, sizeof(struct pl_color_space)); + + pl->rect.x0 = x; + pl->rect.y0 = VideoWindowHeight - y + offset; // Boden von oben + pl->rect.x1 = x + width; + pl->rect.y1 = VideoWindowHeight - height - y + offset; } #endif /// -/// Display a video frame. +/// Display a video frame. /// static void CuvidDisplayFrame(void) { - static uint64_t first_time = 0, round_time=0; - static uint64_t last_time = 0; - int i; - static unsigned int Count; - int filled; - CuvidDecoder *decoder; - int RTS_flag; - int valid_frame = 0; - float ldiff; - + static uint64_t first_time = 0, round_time = 0; + static uint64_t last_time = 0; + int i; + static unsigned int Count; + int filled; + CuvidDecoder *decoder; + int RTS_flag; + int valid_frame = 0; + float ldiff; + #ifdef PLACEBO - uint64_t diff; - static float fdiff = 23000.0; - struct pl_swapchain_frame frame; - struct pl_render_target target; - bool ok; - static int first = 1; - VkImage Image; - const struct pl_fmt *fmt; - const float black[4] = { 0.0f,0.0f,0.0f,1.0f}; + uint64_t diff; + static float fdiff = 23000.0; + struct pl_swapchain_frame frame; + struct pl_render_target target; + bool ok; + static int first = 1; + VkImage Image; + const struct pl_fmt *fmt; + const float black[4] = { 0.0f, 0.0f, 0.0f, 1.0f }; #endif - + #ifndef PLACEBO - if (CuvidDecoderN) - CuvidDecoders[0]->Frameproc = (float)(GetusTicks()-last_time)/1000000.0; + if (CuvidDecoderN) + CuvidDecoders[0]->Frameproc = (float)(GetusTicks() - last_time) / 1000000.0; #ifdef CUVID - glXMakeCurrent(XlibDisplay, VideoWindow, glxThreadContext); - glXWaitVideoSyncSGI (2, (Count + 1) % 2, &Count); // wait for previous frame to swap - last_time = GetusTicks(); + glXMakeCurrent(XlibDisplay, VideoWindow, glxThreadContext); + glXWaitVideoSyncSGI(2, (Count + 1) % 2, &Count); // wait for previous frame to swap + last_time = GetusTicks(); #else - eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); - EglCheck(); + eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); + EglCheck(); #endif - glClear(GL_COLOR_BUFFER_BIT); - + glClear(GL_COLOR_BUFFER_BIT); + #else - if (CuvidDecoderN) { - ldiff = (float)(GetusTicks()-round_time)/1000000.0; - if (ldiff < 100.0 && ldiff > 0.0) - CuvidDecoders[0]->Frameproc = (CuvidDecoders[0]->Frameproc + ldiff + ldiff) / 3.0; - } - round_time = GetusTicks(); + if (CuvidDecoderN) { + ldiff = (float)(GetusTicks() - round_time) / 1000000.0; + if (ldiff < 100.0 && ldiff > 0.0) + CuvidDecoders[0]->Frameproc = (CuvidDecoders[0]->Frameproc + ldiff + ldiff) / 3.0; + } + round_time = GetusTicks(); #if 1 - diff = (GetusTicks()-last_time)/1000; + diff = (GetusTicks() - last_time) / 1000; -// last_time = GetusTicks(); +// last_time = GetusTicks(); //printf("Roundtrip Displayframe %d\n",diff); - if (diff < 15000 && diff > 0) { + if (diff < 15000 && diff > 0) { //printf("Sleep %d\n",15000-diff); - usleep((15000 - diff));// * 1000); - } + usleep((15000 - diff)); // * 1000); + } + +#endif + if (!p->swapchain) + return; -#endif - if (!p->swapchain) - return; - //last_time = GetusTicks(); - + #ifdef CUVID - //first_time = GetusTicks(); - VideoThreadLock(); - if (!first) { -// last_time = GetusTicks(); - if (!pl_swapchain_submit_frame(p->swapchain)) - Error(_("Failed to submit swapchain buffer\n")); - pl_swapchain_swap_buffers(p->swapchain); // swap buffers -// printf("submit and swap %d\n",(GetusTicks()-last_time)/1000000); - } - -#endif - first = 0; + //first_time = GetusTicks(); + VideoThreadLock(); + if (!first) { +// last_time = GetusTicks(); + if (!pl_swapchain_submit_frame(p->swapchain)) + Error(_("Failed to submit swapchain buffer\n")); + pl_swapchain_swap_buffers(p->swapchain); // swap buffers +// printf("submit and swap %d\n",(GetusTicks()-last_time)/1000000); + } - last_time = GetusTicks(); - - while (!pl_swapchain_start_frame(p->swapchain, &frame)) { // get new frame wait for previous to swap - usleep(5); - } +#endif + first = 0; - if (!frame.fbo) { + last_time = GetusTicks(); + + while (!pl_swapchain_start_frame(p->swapchain, &frame)) { // get new frame wait for previous to swap + usleep(5); + } + + if (!frame.fbo) { #ifdef CUVID - VideoThreadUnlock(); + VideoThreadUnlock(); #endif - return; - } -#ifdef VAAPI - VideoThreadLock(); + return; + } +#ifdef VAAPI + VideoThreadLock(); #endif - pl_render_target_from_swapchain(&target, &frame); // make target frame + pl_render_target_from_swapchain(&target, &frame); // make target frame - if (VideoSurfaceModesChanged){ - pl_renderer_destroy(&p->renderer); - p->renderer = pl_renderer_create(p->ctx, p->gpu); - if (p->renderertest) { - pl_renderer_destroy(&p->renderertest); - p->renderertest = NULL; - } - VideoSurfaceModesChanged = 0; - } - - target.repr.sys = PL_COLOR_SYSTEM_RGB; - if (VideoStudioLevels) - target.repr.levels = PL_COLOR_LEVELS_PC; - else - target.repr.levels = PL_COLOR_LEVELS_TV; - target.repr.alpha = PL_ALPHA_UNKNOWN; -// target.repr.bits.sample_depth = 16; -// target.repr.bits.color_depth = 16; -// target.repr.bits.bit_shift =0; - - switch (VulkanTargetColorSpace) { - case 0: - memcpy(&target.color,&pl_color_space_monitor,sizeof(struct pl_color_space)); - break; - case 1: - memcpy(&target.color,&pl_color_space_srgb,sizeof(struct pl_color_space)); - break; - case 2: - memcpy(&target.color,&pl_color_space_bt709,sizeof(struct pl_color_space)); - break; - case 3: - memcpy(&target.color,&pl_color_space_bt2020_hlg,sizeof(struct pl_color_space)); - break; - case 4: - memcpy(&target.color,&pl_color_space_hdr10,sizeof(struct pl_color_space)); - break; - default: - memcpy(&target.color,&pl_color_space_monitor,sizeof(struct pl_color_space)); - break; - } + if (VideoSurfaceModesChanged) { + pl_renderer_destroy(&p->renderer); + p->renderer = pl_renderer_create(p->ctx, p->gpu); + if (p->renderertest) { + pl_renderer_destroy(&p->renderertest); + p->renderertest = NULL; + } + VideoSurfaceModesChanged = 0; + } + + target.repr.sys = PL_COLOR_SYSTEM_RGB; + if (VideoStudioLevels) + target.repr.levels = PL_COLOR_LEVELS_PC; + else + target.repr.levels = PL_COLOR_LEVELS_TV; + target.repr.alpha = PL_ALPHA_UNKNOWN; +// target.repr.bits.sample_depth = 16; +// target.repr.bits.color_depth = 16; +// target.repr.bits.bit_shift =0; + + switch (VulkanTargetColorSpace) { + case 0: + memcpy(&target.color, &pl_color_space_monitor, sizeof(struct pl_color_space)); + break; + case 1: + memcpy(&target.color, &pl_color_space_srgb, sizeof(struct pl_color_space)); + break; + case 2: + memcpy(&target.color, &pl_color_space_bt709, sizeof(struct pl_color_space)); + break; + case 3: + memcpy(&target.color, &pl_color_space_bt2020_hlg, sizeof(struct pl_color_space)); + break; + case 4: + memcpy(&target.color, &pl_color_space_hdr10, sizeof(struct pl_color_space)); + break; + default: + memcpy(&target.color, &pl_color_space_monitor, sizeof(struct pl_color_space)); + break; + } #endif - // - // Render videos into output - // - /// + // + // Render videos into output + // + /// - for (i = 0; i < CuvidDecoderN; ++i) { + for (i = 0; i < CuvidDecoderN; ++i) { - decoder = CuvidDecoders[i]; - decoder->FramesDisplayed++; - decoder->StartCounter++; + decoder = CuvidDecoders[i]; + decoder->FramesDisplayed++; + decoder->StartCounter++; - filled = atomic_read(&decoder->SurfacesFilled); + filled = atomic_read(&decoder->SurfacesFilled); - // need 1 frame for progressive, 3 frames for interlaced - if (filled < 1 + 2 * decoder->Interlaced) { - // FIXME: rewrite MixVideo to support less surfaces - if ((VideoShowBlackPicture && !decoder->TrickSpeed) || - (VideoShowBlackPicture && decoder->Closing < -300)) { - CuvidBlackSurface(decoder); - CuvidMessage(4, "video/cuvid: black surface displayed\n"); - } - continue; - } - valid_frame = 1; -#ifdef PLACEBO - if (OsdShown == 1) { // New OSD opened - pthread_mutex_lock(&OSDMutex); - make_osd_overlay(OSDx,OSDy,OSDxsize,OSDysize); - if (posd) { - pl_tex_upload(p->gpu,&(struct pl_tex_transfer_params) { // upload OSD - .tex = osdoverlay.plane.texture, - .ptr = posd, - }); - } - OsdShown = 2; - pthread_mutex_unlock(&OSDMutex); - - } - if (OsdShown == 2) { - CuvidMixVideo(decoder, i, &target, &osdoverlay); - } else { - CuvidMixVideo(decoder, i, &target, NULL); - } - -#else - CuvidMixVideo(decoder, i); -#endif - if (i==0 && decoder->grab) { // Grab frame + // need 1 frame for progressive, 3 frames for interlaced + if (filled < 1 + 2 * decoder->Interlaced) { + // FIXME: rewrite MixVideo to support less surfaces + if ((VideoShowBlackPicture && !decoder->TrickSpeed) || (VideoShowBlackPicture && decoder->Closing < -300)) { + CuvidBlackSurface(decoder); + CuvidMessage(4, "video/cuvid: black surface displayed\n"); + } + continue; + } + valid_frame = 1; #ifdef PLACEBO - if (decoder->grab == 2 && OsdShown == 2) { - get_RGB(decoder,&osdoverlay); - } else { - get_RGB(decoder,NULL); - } + if (OsdShown == 1) { // New OSD opened + pthread_mutex_lock(&OSDMutex); + make_osd_overlay(OSDx, OSDy, OSDxsize, OSDysize); + if (posd) { + pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params) { // upload OSD + .tex = osdoverlay.plane.texture, + .ptr = posd, + }); + } + OsdShown = 2; + pthread_mutex_unlock(&OSDMutex); + + } + if (OsdShown == 2) { + CuvidMixVideo(decoder, i, &target, &osdoverlay); + } else { + CuvidMixVideo(decoder, i, &target, NULL); + } + #else - get_RGB(decoder); + CuvidMixVideo(decoder, i); #endif - decoder->grab = 0; - } - } - -#ifndef PLACEBO - // add osd to surface - if (OsdShown && valid_frame) { - GLint texLoc; + if (i == 0 && decoder->grab) { // Grab frame +#ifdef PLACEBO + if (decoder->grab == 2 && OsdShown == 2) { + get_RGB(decoder, &osdoverlay); + } else { + get_RGB(decoder, NULL); + } +#else + get_RGB(decoder); +#endif + decoder->grab = 0; + } + } + +#ifndef PLACEBO + // add osd to surface + if (OsdShown && valid_frame) { + GLint texLoc; + #ifdef CUVID - glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); - GlxCheck(); + glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); + GlxCheck(); #endif #ifdef VAAPI -// eglMakeCurrent(eglDisplay, eglSurface, eglSurface, OSDcontext); +// eglMakeCurrent(eglDisplay, eglSurface, eglSurface, OSDcontext); #endif - glEnable(GL_BLEND); - GlxCheck(); - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); - GlxCheck(); - glViewport(0, 0, VideoWindowWidth,VideoWindowHeight); - GlxCheck(); - if (gl_prog_osd == 0) - gl_prog_osd = sc_generate_osd(gl_prog_osd); // generate shader programm + glEnable(GL_BLEND); + GlxCheck(); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + GlxCheck(); + glViewport(0, 0, VideoWindowWidth, VideoWindowHeight); + GlxCheck(); + if (gl_prog_osd == 0) + gl_prog_osd = sc_generate_osd(gl_prog_osd); // generate shader programm - glUseProgram(gl_prog_osd); - texLoc = glGetUniformLocation(gl_prog_osd, "texture0"); - glUniform1i(texLoc, 0); + glUseProgram(gl_prog_osd); + texLoc = glGetUniformLocation(gl_prog_osd, "texture0"); + glUniform1i(texLoc, 0); - glActiveTexture(GL_TEXTURE0); + glActiveTexture(GL_TEXTURE0); - pthread_mutex_lock(&OSDMutex); - glBindTexture(GL_TEXTURE_2D,OSDtexture); - render_pass_quad(0, 0, 0); - pthread_mutex_unlock(&OSDMutex); - - glUseProgram(0); - glActiveTexture(GL_TEXTURE0); + pthread_mutex_lock(&OSDMutex); + glBindTexture(GL_TEXTURE_2D, OSDtexture); + render_pass_quad(0, 0, 0); + pthread_mutex_unlock(&OSDMutex); + + glUseProgram(0); + glActiveTexture(GL_TEXTURE0); #ifdef CUVID - glXMakeCurrent(XlibDisplay, VideoWindow, glxThreadContext); + glXMakeCurrent(XlibDisplay, VideoWindow, glxThreadContext); #endif #ifdef VAAPI -// eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); +// eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); +#endif + } #endif - } -#endif -#ifdef PLACEBO -#ifdef VAAPI - // first_time = GetusTicks(); - if (!pl_swapchain_submit_frame(p->swapchain)) - Fatal(_("Failed to submit swapchain buffer\n")); - pl_swapchain_swap_buffers(p->swapchain); // swap buffers - // printf("submit and swap %d us\n",(GetusTicks()-first_time)/1000); -#endif - VideoThreadUnlock(); -// printf("Display time %d\n",(GetusTicks()-first_time)/1000000); +#ifdef PLACEBO +#ifdef VAAPI + // first_time = GetusTicks(); + if (!pl_swapchain_submit_frame(p->swapchain)) + Fatal(_("Failed to submit swapchain buffer\n")); + pl_swapchain_swap_buffers(p->swapchain); // swap buffers + // printf("submit and swap %d us\n",(GetusTicks()-first_time)/1000); +#endif + VideoThreadUnlock(); +// printf("Display time %d\n",(GetusTicks()-first_time)/1000000); #else #ifdef CUVID - glXGetVideoSyncSGI (&Count); // get current frame - glXSwapBuffers(XlibDisplay, VideoWindow); - glXMakeCurrent(XlibDisplay, None, NULL); + glXGetVideoSyncSGI(&Count); // get current frame + glXSwapBuffers(XlibDisplay, VideoWindow); + glXMakeCurrent(XlibDisplay, None, NULL); #else - eglSwapBuffers(eglDisplay, eglSurface); - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); + eglSwapBuffers(eglDisplay, eglSurface); + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); #endif #endif - - // FIXME: CLOCK_MONOTONIC_RAW - clock_gettime(CLOCK_MONOTONIC, &CuvidFrameTime); - for (i = 0; i < CuvidDecoderN; ++i) { - // remember time of last shown surface - CuvidDecoders[i]->FrameTime = CuvidFrameTime; - } + + // FIXME: CLOCK_MONOTONIC_RAW + clock_gettime(CLOCK_MONOTONIC, &CuvidFrameTime); + for (i = 0; i < CuvidDecoderN; ++i) { + // remember time of last shown surface + CuvidDecoders[i]->FrameTime = CuvidFrameTime; + } } /// -/// Set CUVID decoder video clock. +/// Set CUVID decoder video clock. /// -/// @param decoder CUVID hardware decoder -/// @param pts audio presentation timestamp +/// @param decoder CUVID hardware decoder +/// @param pts audio presentation timestamp /// void CuvidSetClock(CuvidDecoder * decoder, int64_t pts) { - decoder->PTS = pts; + decoder->PTS = pts; } /// -/// Get CUVID decoder video clock. +/// Get CUVID decoder video clock. /// -/// @param decoder CUVID hw decoder +/// @param decoder CUVID hw decoder /// -/// FIXME: 20 wrong for 60hz dvb streams +/// FIXME: 20 wrong for 60hz dvb streams /// static int64_t CuvidGetClock(const CuvidDecoder * decoder) { - // pts is the timestamp of the latest decoded frame - if (decoder->PTS == (int64_t) AV_NOPTS_VALUE) { - return AV_NOPTS_VALUE; - } - // subtract buffered decoded frames - if (decoder->Interlaced) { - /* - Info("video: %s =pts field%d #%d\n", - Timestamp2String(decoder->PTS), - decoder->SurfaceField, - atomic_read(&decoder->SurfacesFilled)); - */ - // 1 field is future, 2 fields are past, + 2 in driver queue - return decoder->PTS - 20 * 90 * (2 * atomic_read(&decoder->SurfacesFilled) - decoder->SurfaceField - 2 + 2); - } - // + 2 in driver queue - return decoder->PTS - 20 * 90 * (atomic_read(&decoder->SurfacesFilled)+SWAP_BUFFER_SIZE-1 ); // +2 + // pts is the timestamp of the latest decoded frame + if (decoder->PTS == (int64_t) AV_NOPTS_VALUE) { + return AV_NOPTS_VALUE; + } + // subtract buffered decoded frames + if (decoder->Interlaced) { + /* + Info("video: %s =pts field%d #%d\n", + Timestamp2String(decoder->PTS), + decoder->SurfaceField, + atomic_read(&decoder->SurfacesFilled)); + */ + // 1 field is future, 2 fields are past, + 2 in driver queue + return decoder->PTS - 20 * 90 * (2 * atomic_read(&decoder->SurfacesFilled) - decoder->SurfaceField - 2 + 2); + } + // + 2 in driver queue + return decoder->PTS - 20 * 90 * (atomic_read(&decoder->SurfacesFilled) + SWAP_BUFFER_SIZE - 1); // +2 } /// -/// Set CUVID decoder closing stream flag. +/// Set CUVID decoder closing stream flag. /// -/// @param decoder CUVID decoder +/// @param decoder CUVID decoder /// static void CuvidSetClosing(CuvidDecoder * decoder) { - decoder->Closing = 1; - OsdShown = 0; + decoder->Closing = 1; + OsdShown = 0; } /// -/// Reset start of frame counter. +/// Reset start of frame counter. /// -/// @param decoder CUVID decoder +/// @param decoder CUVID decoder /// static void CuvidResetStart(CuvidDecoder * decoder) { - decoder->StartCounter = 0; + decoder->StartCounter = 0; } /// -/// Set trick play speed. +/// Set trick play speed. /// -/// @param decoder CUVID decoder -/// @param speed trick speed (0 = normal) +/// @param decoder CUVID decoder +/// @param speed trick speed (0 = normal) /// static void CuvidSetTrickSpeed(CuvidDecoder * decoder, int speed) { - decoder->TrickSpeed = speed; - decoder->TrickCounter = speed; - if (speed) { - decoder->Closing = 0; - } + decoder->TrickSpeed = speed; + decoder->TrickCounter = speed; + if (speed) { + decoder->Closing = 0; + } } /// -/// Get CUVID decoder statistics. +/// Get CUVID decoder statistics. /// -/// @param decoder CUVID decoder -/// @param[out] missed missed frames -/// @param[out] duped duped frames -/// @param[out] dropped dropped frames -/// @param[out] count number of decoded frames +/// @param decoder CUVID decoder +/// @param[out] missed missed frames +/// @param[out] duped duped frames +/// @param[out] dropped dropped frames +/// @param[out] count number of decoded frames /// -void CuvidGetStats(CuvidDecoder * decoder, int *missed, int *duped, - int *dropped, int *counter, float *frametime) +void CuvidGetStats(CuvidDecoder * decoder, int *missed, int *duped, int *dropped, int *counter, float *frametime) { - *missed = decoder->FramesMissed; - *duped = decoder->FramesDuped; - *dropped = decoder->FramesDropped; - *counter = decoder->FrameCounter; - *frametime = decoder->Frameproc; + *missed = decoder->FramesMissed; + *duped = decoder->FramesDuped; + *dropped = decoder->FramesDropped; + *counter = decoder->FrameCounter; + *frametime = decoder->Frameproc; } /// -/// Sync decoder output to audio. +/// Sync decoder output to audio. /// -/// trick-speed show frame times -/// still-picture show frame until new frame arrives -/// 60hz-mode repeat every 5th picture -/// video>audio slow down video by duplicating frames -/// video