diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..5573f25 --- /dev/null +++ b/.clang-format @@ -0,0 +1,6 @@ +--- +BasedOnStyle: LLVM +UseTab: Never +IndentWidth: 4 +IndentCaseLabels: true +ColumnLimit: 119 diff --git a/.indent.pro b/.indent.pro deleted file mode 100644 index 05998d9..0000000 --- a/.indent.pro +++ /dev/null @@ -1,37 +0,0 @@ ---blank-lines-before-block-comments ---blank-lines-after-declarations ---blank-lines-after-procedures ---no-blank-lines-after-commas ---braces-on-if-line ---no-blank-before-sizeof ---comment-indentation41 ---declaration-comment-column41 ---no-comment-delimiters-on-blank-lines ---swallow-optional-blank-lines ---dont-format-comments ---parameter-indentation4 ---indent-level4 ---line-comments-indentation0 ---cuddle-else ---cuddle-do-while ---brace-indent0 ---case-brace-indentation0 -//--start-left-side-of-comments ---leave-preprocessor-space -//--continuation-indentation8 ---case-indentation4 ---else-endif-column0 ---no-space-after-casts ---declaration-indentation1 ---dont-line-up-parentheses ---no-space-after-function-call-names ---space-special-semicolon ---tab-size4 ---no-tabs ---line-length119 ---comment-line-length119 ---honour-newlines ---dont-break-procedure-type ---break-before-boolean-operator ---continuation-indentation4 ---ignore-newlines diff --git a/Makefile b/Makefile index d9f4705..f5d2491 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # # Makefile for a Video Disk Recorder plugin -# +# # $Id: 2a41981a57e5e83036463c6a08c84b86ed9d2be3 $ # The official name of this plugin. @@ -11,7 +11,7 @@ ### Configuration (edit this for your needs) # comment out if not needed -# what kind of decoder do we make - +# what kind of decoder do we make - # if VAAPI is enabled the pluginname is softhdvaapi # if CUVID is enabled the pluginname is softhdcuvid # if DRM is enabled the pluginname is softhddrm @@ -19,28 +19,21 @@ VAAPI ?= 0 CUVID ?= 0 # if you enable DRM then the plugin will only run without X server -# only valid for VAAPI +# only valid for VAAPI DRM ?= 0 - -# use libplacebo - -# available for all decoders but for DRM you need LIBPLACEBO_GL +# use libplacebo - +# available for all decoders but for DRM you need LIBPLACEBO_GL LIBPLACEBO ?= 1 LIBPLACEBO_GL ?= 0 # use YADIF deint - only available with cuvid -#YADIF=1 +#YADIF = 1 # use gamma correction #GAMMA ?= 0 - - -CONFIG := -DDEBUG # remove # to enable debug output - - - - +CONFIG := -DDEBUG # remove '#' to enable debug output #--------------------- no more config needed past this point-------------------------------- @@ -61,7 +54,7 @@ endif ifeq ($(CUVID),1) ifeq ($(DRM),1) -$(error Missmatch in Plugin selection) +$(error Mismatch in Plugin selection) exit 1; endif endif @@ -69,42 +62,27 @@ endif ifeq ($(CUVID),1) ifeq ($(VAAPI),1) -$(error Missmatch in Plugin selection) +$(error Mismatch in Plugin selection) exit 1; endif endif endif # MAKECMDGOALS!=indent endif # MAKECMDGOALS!=clean + + #-------------------------- - - PLUGIN = softhdcuvid # support OPENGLOSD always needed OPENGLOSD=1 -# support alsa audio output module -ALSA ?= $(shell pkg-config --exists alsa && echo 1) - # support OSS audio output module -OSS ?= 1 - # use DMPS SCREENSAVER=1 OPENGL=1 -# use ffmpeg libswresample -SWRESAMPLE ?= $(shell pkg-config --exists libswresample && echo 1) -SWRESAMPLE = 1 - -# use libav libavresample -#ifneq ($(SWRESAMPLE),1) -#AVRESAMPLE ?= $(shell pkg-config --exists libavresample && echo 1#) -#AVRESAMPLE = 1 -#endif - CONFIG += -DHAVE_GL # needed for mpv libs #CONFIG += -DSTILL_DEBUG=2 # still picture debug verbose level CONFIG += -DAV_INFO -DAV_INFO_TIME=3000 # info/debug a/v sync @@ -136,7 +114,7 @@ TMPDIR ?= /tmp ### The compiler options: -export CFLAGS = $(call PKGCFG,cflags) +export CFLAGS = $(call PKGCFG,cflags) export CXXFLAGS = $(call PKGCFG,cxxflags) ifeq ($(CFLAGS),) @@ -154,20 +132,8 @@ APIVERSION = $(call PKGCFG,apiversion) -include $(PLGCFG) - - ### Parse softhddevice config -ifeq ($(ALSA),1) -CONFIG += -DUSE_ALSA -_CFLAGS += $(shell pkg-config --cflags alsa) -LIBS += $(shell pkg-config --libs alsa) -endif - -ifeq ($(OSS),1) -CONFIG += -DUSE_OSS -endif - ifeq ($(OPENGL),1) #_CFLAGS += $(shell pkg-config --cflags libva-glx) #LIBS += $(shell pkg-config --libs libva-glx) @@ -180,29 +146,29 @@ endif ifeq ($(OPENGL),1) CONFIG += -DUSE_GLX _CFLAGS += $(shell pkg-config --cflags gl glu glew) -#LIBS += $(shell pkg-config --libs glu glew) +#LIBS += $(shell pkg-config --libs glu glew) _CFLAGS += $(shell pkg-config --cflags freetype2) LIBS += $(shell pkg-config --libs freetype2) endif ifeq ($(VAAPI),1) -CONFIG += -DVAAPI +CONFIG += -DVAAPI #LIBPLACEBO=1 PLUGIN = softhdvaapi endif ifeq ($(LIBPLACEBO_GL),1) CONFIG += -DPLACEBO_GL -DPLACEBO -LIBS += -lepoxy -LIBS += -lplacebo +LIBS += -lepoxy +LIBS += -lplacebo else -LIBS += -lEGL +LIBS += -lEGL endif ifeq ($(LIBPLACEBO),1) CONFIG += -DPLACEBO -LIBS += -lEGL -LIBS += -lplacebo +LIBS += -lEGL +LIBS += -lplacebo endif ifeq ($(DRM),1) @@ -212,21 +178,19 @@ _CFLAGS += $(shell pkg-config --cflags libdrm) LIBS += -lgbm -ldrm -lEGL endif - ifeq ($(CUVID),1) #CONFIG += -DUSE_PIP # PIP support CONFIG += -DCUVID # enable CUVID decoder -LIBS += -lEGL -lGL +LIBS += -lEGL -lGL ifeq ($(YADIF),1) CONFIG += -DYADIF # Yadif only with CUVID endif endif ifeq ($(GAMMA),1) -CONFIG += -DGAMMA +CONFIG += -DGAMMA endif - ARCHIVE = $(PLUGIN)-$(VERSION) PACKAGE = vdr-$(ARCHIVE) @@ -234,18 +198,19 @@ PACKAGE = vdr-$(ARCHIVE) SOFILE = libvdr-$(PLUGIN).so +# +# Test that libswresample is available +# +ifneq (exists, $(shell pkg-config libswresample && echo exists)) + $(warning ******************************************************************) + $(warning 'libswresample' not found!) + $(error ******************************************************************) +endif +_CFLAGS += $(shell pkg-config --cflags libswresample) +LIBS += $(shell pkg-config --libs libswresample) # -# Test that libswresample is available -# -#ifneq (exists, $(shell pkg-config libswresample && echo exists)) -# $(warning ******************************************************************) -# $(warning 'libswresample' not found!) -# $(error ******************************************************************) -#endif - -# -# Test and set config for libavutil +# Test and set config for libavutil # ifneq (exists, $(shell pkg-config libavutil && echo exists)) $(warning ******************************************************************) @@ -256,7 +221,7 @@ _CFLAGS += $(shell pkg-config --cflags libavutil) LIBS += $(shell pkg-config --libs libavutil) # -# Test and set config for libswscale +# Test and set config for libswscale # ifneq (exists, $(shell pkg-config libswscale && echo exists)) $(warning ******************************************************************) @@ -277,27 +242,27 @@ endif _CFLAGS += $(shell pkg-config --cflags libavcodec) LIBS += $(shell pkg-config --libs libavcodec libavfilter) +# +# Test and set config for alsa +# +ifneq (exists, $(shell pkg-config alsa && echo exists)) + $(warning ******************************************************************) + $(warning 'alsa' not found!) + $(error ******************************************************************) +endif +_CFLAGS += $(shell pkg-config --cflags alsa) +LIBS += $(shell pkg-config --libs alsa) ifeq ($(SCREENSAVER),1) CONFIG += -DUSE_SCREENSAVER _CFLAGS += $(shell pkg-config --cflags xcb-screensaver xcb-dpms) LIBS += $(shell pkg-config --libs xcb-screensaver xcb-dpms) endif -ifeq ($(SWRESAMPLE),1) -CONFIG += -DUSE_SWRESAMPLE -_CFLAGS += $(shell pkg-config --cflags libswresample) -LIBS += $(shell pkg-config --libs libswresample) -endif -ifeq ($(AVRESAMPLE),1) -CONFIG += -DUSE_AVRESAMPLE -_CFLAGS += $(shell pkg-config --cflags libavresample) -LIBS += $(shell pkg-config --libs libavresample) -endif #_CFLAGS += $(shell pkg-config --cflags libavcodec x11 x11-xcb xcb xcb-icccm) #LIBS += -lrt $(shell pkg-config --libs libavcodec x11 x11-xcb xcb xcb-icccm) -_CFLAGS += $(shell pkg-config --cflags x11 x11-xcb xcb xcb-icccm) -LIBS += -lrt $(shell pkg-config --libs x11 x11-xcb xcb xcb-icccm) +_CFLAGS += $(shell pkg-config --cflags x11 x11-xcb xcb xcb-icccm) +LIBS += -lrt $(shell pkg-config --libs x11 x11-xcb xcb xcb-icccm) _CFLAGS += -I./opengl -I./ @@ -305,10 +270,11 @@ LIBS += -L/usr/lib64 ifeq ($(CUVID),1) -LIBS += -lcuda -lnvcuvid +LIBS += -lcuda -lnvcuvid endif -LIBS += -lGLEW -lGLU -ldl -lglut +LIBS += -lGLEW -lGLU -ldl -lglut + ### Includes and Defines (add further entries here): INCLUDES += @@ -319,16 +285,15 @@ DEFINES += -DPLUGIN_NAME_I18N='"$(PLUGIN)"' -D_GNU_SOURCE $(CONFIG) \ ### Make it standard override CXXFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \ - -g -W -Wextra -Winit-self -Werror=overloaded-virtual -Wno-unused-parameter + -g -W -Wextra -Werror=overloaded-virtual -Wno-unused-parameter override CFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \ - -g -W -Wextra -Winit-self -std=gnu99 - + -g -W -Wextra ### The object files (add further files here): -OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o openglosd.o +OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o openglosd.o ifeq ($(GAMMA),1) -OBJS += colorramp.o +OBJS += colorramp.o ifeq ($(DRM),1) OBJS += gamma-drm.o else @@ -383,9 +348,8 @@ install-i18n: $(I18Nmsgs) $(OBJS): Makefile - $(SOFILE): $(OBJS) shaders.h - $(CXX) $(CXXFLAGS) $(LDFLAGS) -shared $(OBJS) $(LIBS) -o $@ + $(CXX) $(CXXFLAGS) $(LDFLAGS) -shared $(OBJS) $(LIBS) -o $@ install-lib: $(SOFILE) install -D $^ $(DESTDIR)$(LIBDIR)/$^.$(APIVERSION) @@ -404,17 +368,14 @@ clean: @-rm -f $(PODIR)/*.mo $(PODIR)/*.pot @-rm -f $(OBJS) $(DEPFILE) *.so *.tgz core* *~ -## Private Targets: - -HDRS= $(wildcard *.h) - +HDRS = $(wildcard *.h) indent: - for i in $(SRCS) $(HDRS); do \ - indent $$i; \ - unexpand -a $$i | sed -e s/constconst/const/ > $$i.up; \ - mv $$i.up $$i; \ + for i in $(SRCS) drm.c hdr.c $(HDRS); do \ + clang-format -i $$i; \ done +## Private Targets: + video_test: video.c Makefile $(CC) -DVIDEO_TEST -DVERSION='"$(VERSION)"' $(CFLAGS) $(LDFLAGS) $< \ $(LIBS) -o $@ diff --git a/README.md b/README.md index 9bd7f1f..bb77e1a 100644 --- a/README.md +++ b/README.md @@ -25,14 +25,13 @@ A software and GPU emulated UHD output device plugin for VDR. o Video decoder CUVID or VAAPI o Video output opengl or DRM - o Audio FFMpeg / Alsa / Analog - o Audio FFMpeg / Alsa / Digital - o Audio FFMpeg / OSS / Analog + o Audio FFMpeg / ALSA / Analog + o Audio FFMpeg / ALSA / Digital o HDMI/SPDIF pass-through o Software volume, compression, normalize and channel resample o VDR ScaleVideo API o CUDA deinterlacer - o Suspend / Dettach + o Suspend / Detach o Support for ambilight o Support for Screencopy o PIP (Picture-in-Picture) (only for CUVID) @@ -83,10 +82,10 @@ You have to adapt the Makefile. There are 3 possible Version that you can build: This is for INTEL cards and also uses Vaapi as decoder. It uses the DRM API for output and runs without X Server. There are several commandline options to select the resolution and refresh rate. I recommend to use libplacebo and set LIBPLACEBO_GL=1 in the Makefile. - + Libplacebo API Version >= 113 is needed. - - + + Install: -------- 1a) git @@ -96,9 +95,6 @@ Install: make make install - You can edit Makefile to enable/disable Alsa / OSS - support. The default is to autodetect as much as possible. - You have to start vdr with -P 'softhdcuvid -d :0.0 .... ' Beginners Guide for libplacebo: @@ -125,22 +121,22 @@ Beginners Guide for libplacebo: in rare cases. All other settings can be in their default state. - + Beginning with libplacebo API 58 user shaders from mpv are supported. Use -S parameter to set the shader. - The plugins searches the shaders in $ConfigDir/plugins/shaders for the shaders. One example shader is - provided in the shader subdirectory. Copy it to e.g.: /etc/vdr/plugins/shaders and then start + The plugins searches the shaders in $ConfigDir/plugins/shaders for the shaders. One example shader is + provided in the shader subdirectory. Copy it to e.g.: /etc/vdr/plugins/shaders and then start vdr -P 'softhdcuvid -S filmgrain.glsl ...' - I use KrigBilateral for UV scaling and then adaptive-sharpen for sharpening. This results in a perfect + I use KrigBilateral for UV scaling and then adaptive-sharpen for sharpening. This results in a perfect picture for me. - - You can also use a custon LUT File. It is located in $ConfigDir/shaders/lut/lut.cube. If you provide there + + You can also use a custon LUT File. It is located in $ConfigDir/shaders/lut/lut.cube. If you provide there a lut file it will be automaticly used. In the Mainmenue you can switch LUT on and off. - + Konfig Guide for softhddrm Version ---------------------------------- You should set the Monitor Type to HD TV or UHD-HDR TV depending on your TV Set With softhddrm and a HDR TV Set you can view HDR-HLG content. This is tested with Kernel 5.12 and a Intel NUC. - + Setup: environment @@ -148,27 +144,17 @@ Setup: environment Following is supported: DISPLAY=:0.0 - x11 display name + X11 display name - only if alsa is configured + ALSA configuration: ALSA_DEVICE=default - alsa PCM device name + ALSA PCM device name ALSA_PASSTHROUGH_DEVICE= - alsa pass-though (AC-3,E-AC-3,DTS,...) device name + ALSA pass-though (AC-3,E-AC-3,DTS,...) device name ALSA_MIXER=default - alsa control device name + ALSA control device name ALSA_MIXER_CHANNEL=PCM - alsa control channel name - - only if oss is configured - OSS_AUDIODEV=/dev/dsp - oss dsp device name - OSS_PASSTHROUGHDEV= - oss pass-though (AC-3,E-AC-3,DTS,...) device name - OSS_MIXERDEV=/dev/mixer - oss mixer device name - OSS_MIXER_CHANNEL=pcm - oss mixer channel name + ALSA control channel name Setup: /etc/vdr/setup.conf ------ @@ -197,7 +183,7 @@ Setup: /etc/vdr/setup.conf (only 0, 1, 4 supported with VA-API) softhddevice..SkipChromaDeinterlace = 0 - 0 = disabled, 1 = enabled (for slower cards, poor qualität) + 0 = disabled, 1 = enabled (for slower cards, poor qualit�t) softhddevice..InverseTelecine = 0 0 = disabled, 1 = enabled @@ -336,10 +322,7 @@ Commandline: Selects audio output module and device. "" to disable audio output - /... to use oss audio module (if compiled with oss - support) - other to use alsa audio module (if compiled with alsa - support) + other to use ALSA audio module SVDRP: ------ diff --git a/AGPL-3.0.txt b/agpl-3.0.txt similarity index 93% rename from AGPL-3.0.txt rename to agpl-3.0.txt index 1489b72..be3f7b2 100644 --- a/AGPL-3.0.txt +++ b/agpl-3.0.txt @@ -1,21 +1,21 @@ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble - The GNU Affero General Public License is a free, copyleft license -for software and other kinds of works, specifically designed to ensure + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. - The licenses for most software and other practical works are -designed to take away your freedom to share and change the works. By -contrast, our General Public Licenses are intended to guarantee your -freedom to share and change all versions of a program--to make sure it -remains free software for all its users. + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you @@ -60,11 +60,10 @@ modification follow. 0. Definitions. - "This License" refers to version 3 of the GNU Affero General Public -License. + "This License" refers to version 3 of the GNU Affero General Public License. - "Copyright" also means copyright-like laws that apply to other kinds -of works, such as semiconductor masks. + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and @@ -377,12 +376,12 @@ that material) supplement the terms of this License with terms: All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further restriction, -you may remove that term. If a license document contains a further -restriction but permits relicensing or conveying under this License, you -may add to a covered work material governed by the terms of that license -document, provided that the further restriction does not survive such -relicensing or conveying. +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the @@ -551,34 +550,34 @@ shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. - Notwithstanding any other provision of this License, you have permission -to link or combine any covered work with a work licensed under version 3 -of the GNU General Public License into a single combined work, and to -convey the resulting work. The terms of this License will continue to -apply to the part which is the covered work, but the work with which it is -combined will remain governed by version 3 of the GNU General Public -License. + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may differ -in detail to address new problems or concerns. +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero -General Public License "or any later version" applies to it, you have -the option of following the terms and conditions either of that -numbered version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number -of the GNU Affero General Public License, you may choose any version -ever published by the Free Software Foundation. +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that -proxy's public statement of acceptance of a version permanently -authorizes you to choose that version for the Program. +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any @@ -617,9 +616,9 @@ an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - END OF TERMS AND CONDITIONS + END OF TERMS AND CONDITIONS - How to Apply These Terms to Your New Programs + How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it @@ -634,9 +633,9 @@ the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as - published by the Free Software Foundation, either version 3 of the - License, or (at your option) any later version. + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -644,7 +643,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. @@ -659,4 +658,4 @@ specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see -. +. diff --git a/audio.c b/audio.c index c8fd966..d5a21c2 100644 --- a/audio.c +++ b/audio.c @@ -1,7 +1,7 @@ /// /// @file audio.c @brief Audio module /// -/// Copyright (c) 2009 - 2014 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2014 by Johns. All Rights Reserved. /// /// Contributor(s): /// @@ -30,75 +30,41 @@ /// /// @note alsa async playback is broken, don't use it! /// -/// OSS PCM/Mixer api is supported. -/// @see http://manuals.opensound.com/developer/ -/// /// /// @todo FIXME: there can be problems with little/big endian. /// #ifdef DEBUG #undef DEBUG #endif -//#define USE_ALSA ///< enable alsa support -//#define USE_OSS ///< enable OSS support -#define USE_AUDIO_THREAD ///< use thread for audio playback -#define USE_AUDIO_MIXER ///< use audio module mixer +#define USE_AUDIO_THREAD ///< use thread for audio playback +#define USE_AUDIO_MIXER ///< use audio module mixer -#include -#include -#include #include -#include #include -#include #include +#include +#include +#include +#include +#include #include -#define _(str) gettext(str) ///< gettext shortcut -#define _N(str) str ///< gettext_noop shortcut +#define _(str) gettext(str) ///< gettext shortcut +#define _N(str) str ///< gettext_noop shortcut -#ifdef USE_ALSA #include -#endif -#ifdef USE_OSS -#include -#include -#include -#include -// SNDCTL_DSP_HALT_OUTPUT compatibility -#ifndef SNDCTL_DSP_HALT_OUTPUT -# if defined(SNDCTL_DSP_RESET_OUTPUT) -# define SNDCTL_DSP_HALT_OUTPUT SNDCTL_DSP_RESET_OUTPUT -# elif defined(SNDCTL_DSP_RESET) -# define SNDCTL_DSP_HALT_OUTPUT SNDCTL_DSP_RESET -# else -# error "No valid SNDCTL_DSP_HALT_OUTPUT found." -# endif -#endif -#include -#include -#include -#include -#endif #ifdef USE_AUDIO_THREAD -#ifndef __USE_GNU -#define __USE_GNU -#endif #include -#include #include -#ifndef HAVE_PTHREAD_NAME -/// only available with newer glibc -#define pthread_setname_np(thread, name) -#endif +#include #endif -#include "iatomic.h" // portable atomic_t +#include "iatomic.h" // portable atomic_t -#include "ringbuffer.h" -#include "misc.h" #include "audio.h" +#include "misc.h" +#include "ringbuffer.h" //---------------------------------------------------------------------------- // Declarations @@ -107,89 +73,87 @@ /** ** Audio output module structure and typedef. */ -typedef struct _audio_module_ -{ - const char *Name; ///< audio output module name +typedef struct _audio_module_ { + const char *Name; ///< audio output module name - int (*const Thread)(void); ///< module thread handler - void (*const FlushBuffers)(void); ///< flush sample buffers - int64_t(*const GetDelay) (void); ///< get current audio delay - void (*const SetVolume)(int); ///< set output volume - int (*const Setup)(int *, int *, int); ///< setup channels, samplerate - void (*const Play)(void); ///< play audio - void (*const Pause)(void); ///< pause audio - void (*const Init)(void); ///< initialize audio output module - void (*const Exit)(void); ///< cleanup audio output module + int (*const Thread)(void); ///< module thread handler + void (*const FlushBuffers)(void); ///< flush sample buffers + int64_t (*const GetDelay)(void); ///< get current audio delay + void (*const SetVolume)(int); ///< set output volume + int (*const Setup)(int *, int *, int); ///< setup channels, samplerate + void (*const Play)(void); ///< play audio + void (*const Pause)(void); ///< pause audio + void (*const Init)(void); ///< initialize audio output module + void (*const Exit)(void); ///< cleanup audio output module } AudioModule; -static const AudioModule NoopModule; ///< forward definition of noop module +static const AudioModule NoopModule; ///< forward definition of noop module //---------------------------------------------------------------------------- // Variables //---------------------------------------------------------------------------- -char AudioAlsaDriverBroken; ///< disable broken driver message -char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix -char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix +char AudioAlsaDriverBroken; ///< disable broken driver message +char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix +char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix -static const char *AudioModuleName; ///< which audio module to use +static const char *AudioModuleName; ///< which audio module to use /// Selected audio module. static const AudioModule *AudioUsedModule = &NoopModule; -static const char *AudioPCMDevice; ///< PCM device name -static const char *AudioPassthroughDevice; ///< Passthrough device name -static char AudioAppendAES; ///< flag automatic append AES -static const char *AudioMixerDevice; ///< mixer device name -static const char *AudioMixerChannel; ///< mixer channel name -static char AudioDoingInit; ///> flag in init, reduce error -static volatile char AudioRunning; ///< thread running / stopped -static volatile char AudioPaused; ///< audio paused -static volatile char AudioVideoIsReady; ///< video ready start early -static int AudioSkip; ///< skip audio to sync to video +static const char *AudioPCMDevice; ///< PCM device name +static const char *AudioPassthroughDevice; ///< Passthrough device name +static char AudioAppendAES; ///< flag automatic append AES +static const char *AudioMixerDevice; ///< mixer device name +static const char *AudioMixerChannel; ///< mixer channel name +static char AudioDoingInit; ///> flag in init, reduce error +static volatile char AudioRunning; ///< thread running / stopped +static volatile char AudioPaused; ///< audio paused +static volatile char AudioVideoIsReady; ///< video ready start early +static int AudioSkip; ///< skip audio to sync to video -static const int AudioBytesProSample = 2; ///< number of bytes per sample +static const int AudioBytesProSample = 2; ///< number of bytes per sample -static int AudioBufferTime = 336; ///< audio buffer time in ms +static int AudioBufferTime = 336; ///< audio buffer time in ms #ifdef USE_AUDIO_THREAD -static pthread_t AudioThread; ///< audio play thread -static pthread_mutex_t AudioMutex; ///< audio condition mutex -static pthread_cond_t AudioStartCond; ///< condition variable -static char AudioThreadStop; ///< stop audio thread +static pthread_t AudioThread; ///< audio play thread +static pthread_mutex_t AudioMutex; ///< audio condition mutex +static pthread_cond_t AudioStartCond; ///< condition variable +static char AudioThreadStop; ///< stop audio thread #else -static const int AudioThread; ///< dummy audio thread +static const int AudioThread; ///< dummy audio thread #endif -static char AudioSoftVolume; ///< flag use soft volume -static char AudioNormalize; ///< flag use volume normalize -static char AudioCompression; ///< flag use compress volume -static char AudioMute; ///< flag muted -static int AudioAmplifier; ///< software volume factor -static int AudioNormalizeFactor; ///< current normalize factor -static const int AudioMinNormalize = 100; ///< min. normalize factor -static int AudioMaxNormalize; ///< max. normalize factor -static int AudioCompressionFactor; ///< current compression factor -static int AudioMaxCompression; ///< max. compression factor -static int AudioStereoDescent; ///< volume descent for stereo -static int AudioVolume; ///< current volume (0 .. 1000) +static char AudioSoftVolume; ///< flag use soft volume +static char AudioNormalize; ///< flag use volume normalize +static char AudioCompression; ///< flag use compress volume +static char AudioMute; ///< flag muted +static int AudioAmplifier; ///< software volume factor +static int AudioNormalizeFactor; ///< current normalize factor +static const int AudioMinNormalize = 100; ///< min. normalize factor +static int AudioMaxNormalize; ///< max. normalize factor +static int AudioCompressionFactor; ///< current compression factor +static int AudioMaxCompression; ///< max. compression factor +static int AudioStereoDescent; ///< volume descent for stereo +static int AudioVolume; ///< current volume (0 .. 1000) -extern int VideoAudioDelay; ///< import audio/video delay +extern int VideoAudioDelay; ///< import audio/video delay /// default ring buffer size ~2s 8ch 16bit (3 * 5 * 7 * 8) static const unsigned AudioRingBufferSize = 3 * 5 * 7 * 8 * 2 * 1000; -static int AudioChannelsInHw[9]; ///< table which channels are supported -enum _audio_rates -{ ///< sample rates enumeration - // HW: 32000 44100 48000 88200 96000 176400 192000 - // Audio32000, ///< 32.0Khz - Audio44100, ///< 44.1Khz - Audio48000, ///< 48.0Khz - // Audio88200, ///< 88.2Khz - // Audio96000, ///< 96.0Khz - // Audio176400, ///< 176.4Khz - Audio192000, ///< 192.0Khz - AudioRatesMax ///< max index +static int AudioChannelsInHw[9]; ///< table which channels are supported +enum _audio_rates { ///< sample rates enumeration + // HW: 32000 44100 48000 88200 96000 176400 192000 + // Audio32000, ///< 32.0Khz + Audio44100, ///< 44.1Khz + Audio48000, ///< 48.0Khz + // Audio88200, ///< 88.2Khz + // Audio96000, ///< 96.0Khz + // Audio176400, ///< 176.4Khz + Audio192000, ///< 192.0Khz + AudioRatesMax ///< max index }; /// table which rates are supported @@ -199,22 +163,20 @@ static int AudioRatesInHw[AudioRatesMax]; static int AudioChannelMatrix[AudioRatesMax][9]; /// rates tables (must be sorted by frequency) -static const unsigned AudioRatesTable[AudioRatesMax] = { - 44100, 48000, 192000 -}; +static const unsigned AudioRatesTable[AudioRatesMax] = {44100, 48000, 192000}; //---------------------------------------------------------------------------- // filter //---------------------------------------------------------------------------- -static const int AudioNormSamples = 4096; ///< number of samples +static const int AudioNormSamples = 4096; ///< number of samples -#define AudioNormMaxIndex 128 ///< number of average values +#define AudioNormMaxIndex 128 ///< number of average values /// average of n last sample blocks static uint32_t AudioNormAverage[AudioNormMaxIndex]; -static int AudioNormIndex; ///< index into average table -static int AudioNormReady; ///< index counter -static int AudioNormCounter; ///< sample counter +static int AudioNormIndex; ///< index into average table +static int AudioNormReady; ///< index counter +static int AudioNormCounter; ///< sample counter /** ** Audio normalizer. @@ -222,8 +184,7 @@ static int AudioNormCounter; ///< sample counter ** @param samples sample buffer ** @param count number of bytes in sample buffer */ -static void AudioNormalizer(int16_t * samples, int count) -{ +static void AudioNormalizer(int16_t *samples, int count) { int i; int l; int n; @@ -259,7 +220,7 @@ static void AudioNormalizer(int16_t * samples, int count) // calculate normalize factor if (avg > 0) { - factor = ((INT16_MAX / 8) * 1000U) / (uint32_t) sqrt(avg); + factor = ((INT16_MAX / 8) * 1000U) / (uint32_t)sqrt(avg); // smooth normalize AudioNormalizeFactor = (AudioNormalizeFactor * 500 + factor * 500) / 1000; if (AudioNormalizeFactor < AudioMinNormalize) { @@ -272,7 +233,7 @@ static void AudioNormalizer(int16_t * samples, int count) factor = 1000; } Debug(4, "audio/noramlize: avg %8d, fac=%6.3f, norm=%6.3f\n", avg, factor / 1000.0, - AudioNormalizeFactor / 1000.0); + AudioNormalizeFactor / 1000.0); } AudioNormIndex = (AudioNormIndex + 1) % AudioNormMaxIndex; @@ -300,8 +261,7 @@ static void AudioNormalizer(int16_t * samples, int count) /** ** Reset normalizer. */ -static void AudioResetNormalizer(void) -{ +static void AudioResetNormalizer(void) { int i; AudioNormCounter = 0; @@ -318,8 +278,7 @@ static void AudioResetNormalizer(void) ** @param samples sample buffer ** @param count number of bytes in sample buffer */ -static void AudioCompressor(int16_t * samples, int count) -{ +static void AudioCompressor(int16_t *samples, int count) { int max_sample; int i; int factor; @@ -341,17 +300,17 @@ static void AudioCompressor(int16_t * samples, int count) // smooth compression (FIXME: make configurable?) AudioCompressionFactor = (AudioCompressionFactor * 950 + factor * 50) / 1000; if (AudioCompressionFactor > factor) { - AudioCompressionFactor = factor; // no clipping + AudioCompressionFactor = factor; // no clipping } if (AudioCompressionFactor > AudioMaxCompression) { AudioCompressionFactor = AudioMaxCompression; } } else { - return; // silent nothing todo + return; // silent nothing todo } Debug(4, "audio/compress: max %5d, fac=%6.3f, com=%6.3f\n", max_sample, factor / 1000.0, - AudioCompressionFactor / 1000.0); + AudioCompressionFactor / 1000.0); // apply compression factor for (i = 0; i < count / AudioBytesProSample; ++i) { @@ -370,8 +329,7 @@ static void AudioCompressor(int16_t * samples, int count) /** ** Reset compressor. */ -static void AudioResetCompressor(void) -{ +static void AudioResetCompressor(void) { AudioCompressionFactor = 2000; if (AudioCompressionFactor > AudioMaxCompression) { AudioCompressionFactor = AudioMaxCompression; @@ -386,8 +344,7 @@ static void AudioResetCompressor(void) ** ** @todo FIXME: this does hard clipping */ -static void AudioSoftAmplifier(int16_t * samples, int count) -{ +static void AudioSoftAmplifier(int16_t *samples, int count) { int i; // silence @@ -418,8 +375,7 @@ static void AudioSoftAmplifier(int16_t * samples, int count) ** @param frames number of frames in sample buffer ** @param out output sample buffer */ -static void AudioMono2Stereo(const int16_t * in, int frames, int16_t * out) -{ +static void AudioMono2Stereo(const int16_t *in, int frames, int16_t *out) { int i; for (i = 0; i < frames; ++i) { @@ -438,8 +394,7 @@ static void AudioMono2Stereo(const int16_t * in, int frames, int16_t * out) ** @param frames number of frames in sample buffer ** @param out output sample buffer */ -static void AudioStereo2Mono(const int16_t * in, int frames, int16_t * out) -{ +static void AudioStereo2Mono(const int16_t *in, int frames, int16_t *out) { int i; for (i = 0; i < frames; i += 2) { @@ -459,64 +414,63 @@ static void AudioStereo2Mono(const int16_t * in, int frames, int16_t * out) ** @param frames number of frames in sample buffer ** @param out output sample buffer */ -static void AudioSurround2Stereo(const int16_t * in, int in_chan, int frames, int16_t * out) -{ +static void AudioSurround2Stereo(const int16_t *in, int in_chan, int frames, int16_t *out) { while (frames--) { int l; int r; switch (in_chan) { - case 3: // stereo or surround? =>stereo - l = in[0] * 600; // L - r = in[1] * 600; // R - l += in[2] * 400; // C + case 3: // stereo or surround? =>stereo + l = in[0] * 600; // L + r = in[1] * 600; // R + l += in[2] * 400; // C r += in[2] * 400; break; - case 4: // quad or surround? =>quad - l = in[0] * 600; // L - r = in[1] * 600; // R - l += in[2] * 400; // Ls - r += in[3] * 400; // Rs + case 4: // quad or surround? =>quad + l = in[0] * 600; // L + r = in[1] * 600; // R + l += in[2] * 400; // Ls + r += in[3] * 400; // Rs break; - case 5: // 5.0 - l = in[0] * 500; // L - r = in[1] * 500; // R - l += in[2] * 200; // Ls - r += in[3] * 200; // Rs - l += in[4] * 300; // C + case 5: // 5.0 + l = in[0] * 500; // L + r = in[1] * 500; // R + l += in[2] * 200; // Ls + r += in[3] * 200; // Rs + l += in[4] * 300; // C r += in[4] * 300; break; - case 6: // 5.1 - l = in[0] * 400; // L - r = in[1] * 400; // R - l += in[2] * 200; // Ls - r += in[3] * 200; // Rs - l += in[4] * 300; // C + case 6: // 5.1 + l = in[0] * 400; // L + r = in[1] * 400; // R + l += in[2] * 200; // Ls + r += in[3] * 200; // Rs + l += in[4] * 300; // C r += in[4] * 300; - l += in[5] * 100; // LFE + l += in[5] * 100; // LFE r += in[5] * 100; break; - case 7: // 7.0 - l = in[0] * 400; // L - r = in[1] * 400; // R - l += in[2] * 200; // Ls - r += in[3] * 200; // Rs - l += in[4] * 300; // C + case 7: // 7.0 + l = in[0] * 400; // L + r = in[1] * 400; // R + l += in[2] * 200; // Ls + r += in[3] * 200; // Rs + l += in[4] * 300; // C r += in[4] * 300; - l += in[5] * 100; // RL - r += in[6] * 100; // RR + l += in[5] * 100; // RL + r += in[6] * 100; // RR break; - case 8: // 7.1 - l = in[0] * 400; // L - r = in[1] * 400; // R - l += in[2] * 150; // Ls - r += in[3] * 150; // Rs - l += in[4] * 250; // C + case 8: // 7.1 + l = in[0] * 400; // L + r = in[1] * 400; // R + l += in[2] * 150; // Ls + r += in[3] * 150; // Rs + l += in[4] * 250; // C r += in[4] * 250; - l += in[5] * 100; // LFE + l += in[5] * 100; // LFE r += in[5] * 100; - l += in[6] * 100; // RL - r += in[7] * 100; // RR + l += in[6] * 100; // RL + r += in[7] * 100; // RR break; default: abort(); @@ -538,15 +492,14 @@ static void AudioSurround2Stereo(const int16_t * in, int in_chan, int frames, in ** @param out output sample buffer ** @param out_chan nr. of output channels */ -static void AudioUpmix(const int16_t * in, int in_chan, int frames, int16_t * out, int out_chan) -{ +static void AudioUpmix(const int16_t *in, int in_chan, int frames, int16_t *out, int out_chan) { while (frames--) { int i; for (i = 0; i < in_chan; ++i) { // copy existing channels *out++ = *in++; } - for (; i < out_chan; ++i) { // silents missing channels + for (; i < out_chan; ++i) { // silents missing channels *out++ = 0; } } @@ -568,8 +521,7 @@ static void AudioUpmix(const int16_t * in, int in_chan, int frames, int16_t * ou ** @param out output sample buffer ** @param out_chan nr. of output channels */ -static void AudioResample(const int16_t * in, int in_chan, int frames, int16_t * out, int out_chan) -{ +static void AudioResample(const int16_t *in, int in_chan, int frames, int16_t *out, int out_chan) { switch (in_chan * 8 + out_chan) { case 1 * 8 + 1: case 2 * 8 + 2: @@ -578,7 +530,7 @@ static void AudioResample(const int16_t * in, int in_chan, int frames, int16_t * case 5 * 8 + 5: case 6 * 8 + 6: case 7 * 8 + 7: - case 8 * 8 + 8: // input = output channels + case 8 * 8 + 8: // input = output channels memcpy(out, in, frames * in_chan * AudioBytesProSample); break; case 2 * 8 + 1: @@ -616,30 +568,29 @@ static void AudioResample(const int16_t * in, int in_chan, int frames, int16_t * // ring buffer //---------------------------------------------------------------------------- -#define AUDIO_RING_MAX 8 ///< number of audio ring buffers +#define AUDIO_RING_MAX 8 ///< number of audio ring buffers /** ** Audio ring buffer. */ -typedef struct _audio_ring_ring_ -{ - char FlushBuffers; ///< flag: flush buffers - char Passthrough; ///< flag: use pass-through (AC-3, ...) - int16_t PacketSize; ///< packet size - unsigned HwSampleRate; ///< hardware sample rate in Hz - unsigned HwChannels; ///< hardware number of channels - unsigned InSampleRate; ///< input sample rate in Hz - unsigned InChannels; ///< input number of channels - int64_t PTS; ///< pts clock - RingBuffer *RingBuffer; ///< sample ring buffer +typedef struct _audio_ring_ring_ { + char FlushBuffers; ///< flag: flush buffers + char Passthrough; ///< flag: use pass-through (AC-3, ...) + int16_t PacketSize; ///< packet size + unsigned HwSampleRate; ///< hardware sample rate in Hz + unsigned HwChannels; ///< hardware number of channels + unsigned InSampleRate; ///< input sample rate in Hz + unsigned InChannels; ///< input number of channels + int64_t PTS; ///< pts clock + RingBuffer *RingBuffer; ///< sample ring buffer } AudioRingRing; - /// ring of audio ring buffers +/// ring of audio ring buffers static AudioRingRing AudioRing[AUDIO_RING_MAX]; -static int AudioRingWrite; ///< audio ring write pointer -static int AudioRingRead; ///< audio ring read pointer -static atomic_t AudioRingFilled; ///< how many of the ring is used -static unsigned AudioStartThreshold; ///< start play, if filled +static int AudioRingWrite; ///< audio ring write pointer +static int AudioRingRead; ///< audio ring read pointer +static atomic_t AudioRingFilled; ///< how many of the ring is used +static unsigned AudioStartThreshold; ///< start play, if filled /** ** Add sample-rate, number of channels change to ring. @@ -653,8 +604,7 @@ static unsigned AudioStartThreshold; ///< start play, if filled ** ** @note this function shouldn't fail. Checks are done during AudoInit. */ -static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough) -{ +static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough) { unsigned u; // search supported sample-rates @@ -667,15 +617,15 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough) } } Error(_("audio: %dHz sample-rate unsupported\n"), sample_rate); - return -1; // unsupported sample-rate + return -1; // unsupported sample-rate - found: +found: if (!AudioChannelMatrix[u][channels]) { Error(_("audio: %d channels unsupported\n"), channels); - return -1; // unsupported nr. of channels + return -1; // unsupported nr. of channels } - if (atomic_read(&AudioRingFilled) == AUDIO_RING_MAX) { // no free slot + if (atomic_read(&AudioRingFilled) == AUDIO_RING_MAX) { // no free slot // FIXME: can wait for ring buffer empty Error(_("audio: out of ring buffers\n")); return -1; @@ -711,8 +661,7 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough) /** ** Setup audio ring. */ -static void AudioRingInit(void) -{ +static void AudioRingInit(void) { int i; for (i = 0; i < AUDIO_RING_MAX; ++i) { @@ -725,8 +674,7 @@ static void AudioRingInit(void) /** ** Cleanup audio ring. */ -static void AudioRingExit(void) -{ +static void AudioRingExit(void) { int i; for (i = 0; i < AUDIO_RING_MAX; ++i) { @@ -734,15 +682,13 @@ static void AudioRingExit(void) RingBufferDel(AudioRing[i].RingBuffer); AudioRing[i].RingBuffer = NULL; } - AudioRing[i].HwSampleRate = 0; // checked for valid setup + AudioRing[i].HwSampleRate = 0; // checked for valid setup AudioRing[i].InSampleRate = 0; } AudioRingRead = 0; AudioRingWrite = 0; } -#ifdef USE_ALSA - //============================================================================ // A L S A //============================================================================ @@ -751,9 +697,9 @@ static void AudioRingExit(void) // Alsa variables //---------------------------------------------------------------------------- -static snd_pcm_t *AlsaPCMHandle; ///< alsa pcm handle -static char AlsaCanPause; ///< hw supports pause -static int AlsaUseMmap; ///< use mmap +static snd_pcm_t *AlsaPCMHandle; ///< alsa pcm handle +static char AlsaCanPause; ///< hw supports pause +static int AlsaUseMmap; ///< use mmap static snd_mixer_t *AlsaMixer; ///< alsa mixer handle static snd_mixer_elem_t *AlsaMixerElem; ///< alsa pcm mixer element @@ -772,12 +718,11 @@ static int AlsaRatio; ///< internal -> mixer ratio * 1000 ** @retval 1 ring buffer empty ** @retval -1 underrun error */ -static int AlsaPlayRingbuffer(void) -{ +static int AlsaPlayRingbuffer(void) { int first; first = 1; - for (;;) { // loop for ring buffer wrap + for (;;) { // loop for ring buffer wrap int avail; int n; int err; @@ -799,17 +744,16 @@ static int AlsaPlayRingbuffer(void) return -1; } avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, n); - if (avail < 256) { // too much overhead + if (avail < 256) { // too much overhead if (first) { // happens with broken alsa drivers if (AudioThread) { if (!AudioAlsaDriverBroken) { Error(_("audio/alsa: broken driver %d state '%s'\n"), avail, - snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); + snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); } // try to recover - if (snd_pcm_state(AlsaPCMHandle) - == SND_PCM_STATE_PREPARED) { + if (snd_pcm_state(AlsaPCMHandle) == SND_PCM_STATE_PREPARED) { if ((err = snd_pcm_start(AlsaPCMHandle)) < 0) { Error(_("audio/alsa: snd_pcm_start(): %s\n"), snd_strerror(err)); } @@ -822,8 +766,8 @@ static int AlsaPlayRingbuffer(void) } n = RingBufferGetReadPointer(AudioRing[AudioRingRead].RingBuffer, &p); - if (!n) { // ring buffer empty - if (first) { // only error on first loop + if (!n) { // ring buffer empty + if (first) { // only error on first loop Debug(4, "audio/alsa: empty buffers %d\n", avail); // ring buffer empty // AlsaLowWaterMark = 1; @@ -831,16 +775,16 @@ static int AlsaPlayRingbuffer(void) } return 0; } - if (n < avail) { // not enough bytes in ring buffer + if (n < avail) { // not enough bytes in ring buffer avail = n; } - if (!avail) { // full or buffer empty + if (!avail) { // full or buffer empty break; } // muting pass-through AC-3, can produce disturbance if (AudioMute || (AudioSoftVolume && !AudioRing[AudioRingRead].Passthrough)) { // FIXME: quick&dirty cast - AudioSoftAmplifier((int16_t *) p, avail); + AudioSoftAmplifier((int16_t *)p, avail); // FIXME: if not all are written, we double amplify them } frames = snd_pcm_bytes_to_frames(AlsaPCMHandle, avail); @@ -856,7 +800,7 @@ static int AlsaPlayRingbuffer(void) } else { err = snd_pcm_writei(AlsaPCMHandle, p, frames); } - //Debug(3, "audio/alsa: wrote %d/%d frames\n", err, frames); + // Debug(3, "audio/alsa: wrote %d/%d frames\n", err, frames); if (err != frames) { if (err < 0) { if (err == -EAGAIN) { @@ -883,7 +827,6 @@ static int AlsaPlayRingbuffer(void) } RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, avail); first = 0; - } return 0; } @@ -891,8 +834,7 @@ static int AlsaPlayRingbuffer(void) /** ** Flush alsa buffers. */ -static void AlsaFlushBuffers(void) -{ +static void AlsaFlushBuffers(void) { if (AlsaPCMHandle) { int err; snd_pcm_state_t state; @@ -926,8 +868,7 @@ static void AlsaFlushBuffers(void) ** @retval 0 underrun ** @retval 1 running */ -static int AlsaThread(void) -{ +static int AlsaThread(void) { int err; if (!AlsaPCMHandle) { @@ -951,14 +892,14 @@ static int AlsaThread(void) } break; } - if (!err || AudioPaused) { // timeout or some commands + if (!err || AudioPaused) { // timeout or some commands return 1; } if ((err = AlsaPlayRingbuffer())) { // empty or error snd_pcm_state_t state; - if (err < 0) { // underrun error + if (err < 0) { // underrun error return -1; } @@ -968,7 +909,7 @@ static int AlsaThread(void) return 0; } - usleep(24 * 1000); // let fill/empty the buffers + usleep(24 * 1000); // let fill/empty the buffers } return 1; } @@ -982,19 +923,17 @@ static int AlsaThread(void) ** ** @param passthrough use pass-through (AC-3, ...) device */ -static snd_pcm_t *AlsaOpenPCM(int passthrough) -{ +static snd_pcm_t *AlsaOpenPCM(int passthrough) { const char *device; snd_pcm_t *handle; int err; // &&|| hell - if (!(passthrough && ((device = AudioPassthroughDevice) - || (device = getenv("ALSA_PASSTHROUGH_DEVICE")))) - && !(device = AudioPCMDevice) && !(device = getenv("ALSA_DEVICE"))) { + if (!(passthrough && ((device = AudioPassthroughDevice) || (device = getenv("ALSA_PASSTHROUGH_DEVICE")))) && + !(device = AudioPCMDevice) && !(device = getenv("ALSA_DEVICE"))) { device = "default"; } - if (!AudioDoingInit) { // reduce blabla during init + if (!AudioDoingInit) { // reduce blabla during init Info(_("audio/alsa: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device); } // @@ -1002,19 +941,19 @@ static snd_pcm_t *AlsaOpenPCM(int passthrough) // if (passthrough && AudioAppendAES) { #if 0 - // FIXME: not yet finished - char *buf; - const char *s; - int n; + // FIXME: not yet finished + char *buf; + const char *s; + int n; - n = strlen(device); - buf = alloca(n + sizeof(":AES0=6") + 1); - strcpy(buf, device); - if (!(s = strchr(buf, ':'))) { - // no alsa parameters - strcpy(buf + n, ":AES=6"); - } - Debug(3, "audio/alsa: try '%s'\n", buf); + n = strlen(device); + buf = alloca(n + sizeof(":AES0=6") + 1); + strcpy(buf, device); + if (!(s = strchr(buf, ':'))) { + // no alsa parameters + strcpy(buf + n, ":AES=6"); + } + Debug(3, "audio/alsa: try '%s'\n", buf); #endif } // open none blocking; if device is already used, we don't want wait @@ -1034,8 +973,7 @@ static snd_pcm_t *AlsaOpenPCM(int passthrough) ** ** @see AudioPCMDevice */ -static void AlsaInitPCM(void) -{ +static void AlsaInitPCM(void) { snd_pcm_t *handle; snd_pcm_hw_params_t *hw_params; int err; @@ -1064,8 +1002,7 @@ static void AlsaInitPCM(void) ** ** @param volume volume (0 .. 1000) */ -static void AlsaSetVolume(int volume) -{ +static void AlsaSetVolume(int volume) { int v; if (AlsaMixer && AlsaMixerElem) { @@ -1078,8 +1015,7 @@ static void AlsaSetVolume(int volume) /** ** Initialize alsa mixer. */ -static void AlsaInitMixer(void) -{ +static void AlsaInitMixer(void) { const char *device; const char *channel; snd_mixer_t *alsa_mixer; @@ -1099,8 +1035,8 @@ static void AlsaInitMixer(void) } Debug(3, "audio/alsa: mixer %s - %s open\n", device, channel); snd_mixer_open(&alsa_mixer, 0); - if (alsa_mixer && snd_mixer_attach(alsa_mixer, device) >= 0 - && snd_mixer_selem_register(alsa_mixer, NULL, NULL) >= 0 && snd_mixer_load(alsa_mixer) >= 0) { + if (alsa_mixer && snd_mixer_attach(alsa_mixer, device) >= 0 && + snd_mixer_selem_register(alsa_mixer, NULL, NULL) >= 0 && snd_mixer_load(alsa_mixer) >= 0) { const char *const alsa_mixer_elem_name = channel; @@ -1113,7 +1049,7 @@ static void AlsaInitMixer(void) snd_mixer_selem_get_playback_volume_range(alsa_mixer_elem, &alsa_mixer_elem_min, &alsa_mixer_elem_max); AlsaRatio = 1000 * (alsa_mixer_elem_max - alsa_mixer_elem_min); Debug(3, "audio/alsa: PCM mixer found %ld - %ld ratio %d\n", alsa_mixer_elem_min, alsa_mixer_elem_max, - AlsaRatio); + AlsaRatio); break; } @@ -1138,8 +1074,7 @@ static void AlsaInitMixer(void) ** ** @todo FIXME: handle the case no audio running */ -static int64_t AlsaGetDelay(void) -{ +static int64_t AlsaGetDelay(void) { int err; snd_pcm_sframes_t delay; int64_t pts; @@ -1154,7 +1089,7 @@ static int64_t AlsaGetDelay(void) delay = 0L; #ifdef DEBUG } else if (snd_pcm_state(AlsaPCMHandle) != SND_PCM_STATE_RUNNING) { - //Debug(3, "audio/alsa: %ld frames delay ok, but not running\n", delay); + // Debug(3, "audio/alsa: %ld frames delay ok, but not running\n", delay); #endif } Debug(4, "audio/alsa: %ld frames hw delay\n", delay); @@ -1164,7 +1099,7 @@ static int64_t AlsaGetDelay(void) delay = 0L; } - pts = ((int64_t) delay * 90 * 1000) / AudioRing[AudioRingRead].HwSampleRate; + pts = ((int64_t)delay * 90 * 1000) / AudioRing[AudioRingRead].HwSampleRate; return pts; } @@ -1182,47 +1117,45 @@ static int64_t AlsaGetDelay(void) ** ** @todo FIXME: remove pointer for freq + channels */ -static int AlsaSetup(int *freq, int *channels, int passthrough) -{ +static int AlsaSetup(int *freq, int *channels, int passthrough) { snd_pcm_uframes_t buffer_size; snd_pcm_uframes_t period_size; int err; int delay; - if (!AlsaPCMHandle) { // alsa not running yet + if (!AlsaPCMHandle) { // alsa not running yet // FIXME: if open fails for fe. pass-through, we never recover return -1; } - if (!AudioAlsaNoCloseOpen) { // close+open to fix HDMI no sound bug + if (!AudioAlsaNoCloseOpen) { // close+open to fix HDMI no sound bug snd_pcm_t *handle; handle = AlsaPCMHandle; // no lock needed, thread exit in main loop only - //Debug(3, "audio: %s [\n", __FUNCTION__); - AlsaPCMHandle = NULL; // other threads should check handle + // Debug(3, "audio: %s [\n", __FUNCTION__); + AlsaPCMHandle = NULL; // other threads should check handle snd_pcm_close(handle); if (AudioAlsaCloseOpenDelay) { - usleep(50 * 1000); // 50ms delay for alsa recovery + usleep(50 * 1000); // 50ms delay for alsa recovery } // FIXME: can use multiple retries if (!(handle = AlsaOpenPCM(passthrough))) { return -1; } AlsaPCMHandle = handle; - //Debug(3, "audio: %s ]\n", __FUNCTION__); + // Debug(3, "audio: %s ]\n", __FUNCTION__); } for (;;) { - if ((err = - snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, - AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1, - 96 * 1000))) { + if ((err = snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, + AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : SND_PCM_ACCESS_RW_INTERLEAVED, + *channels, *freq, 1, 96 * 1000))) { // try reduced buffer size (needed for sunxi) // FIXME: alternativ make this configurable if ((err = - snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, - AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : SND_PCM_ACCESS_RW_INTERLEAVED, *channels, - *freq, 1, 72 * 1000))) { + snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16, + AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : SND_PCM_ACCESS_RW_INTERLEAVED, + *channels, *freq, 1, 72 * 1000))) { /* if ( err == -EBADFD ) { @@ -1243,7 +1176,7 @@ static int AlsaSetup(int *freq, int *channels, int passthrough) } // this is disabled, no advantages! - if (0) { // no underruns allowed, play silence + if (0) { // no underruns allowed, play silence snd_pcm_sw_params_t *sw_params; snd_pcm_uframes_t boundary; @@ -1270,9 +1203,9 @@ static int AlsaSetup(int *freq, int *channels, int passthrough) snd_pcm_get_params(AlsaPCMHandle, &buffer_size, &period_size); Debug(3, "audio/alsa: buffer size %lu %zdms, period size %lu %zdms\n", buffer_size, - snd_pcm_frames_to_bytes(AlsaPCMHandle, buffer_size) * 1000 / (*freq * *channels * AudioBytesProSample), - period_size, snd_pcm_frames_to_bytes(AlsaPCMHandle, - period_size) * 1000 / (*freq * *channels * AudioBytesProSample)); + snd_pcm_frames_to_bytes(AlsaPCMHandle, buffer_size) * 1000 / (*freq * *channels * AudioBytesProSample), + period_size, + snd_pcm_frames_to_bytes(AlsaPCMHandle, period_size) * 1000 / (*freq * *channels * AudioBytesProSample)); Debug(3, "audio/alsa: state %s\n", snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle))); AudioStartThreshold = snd_pcm_frames_to_bytes(AlsaPCMHandle, period_size); @@ -1289,8 +1222,8 @@ static int AlsaSetup(int *freq, int *channels, int passthrough) AudioStartThreshold = AudioRingBufferSize / 3; } if (!AudioDoingInit) { - Info(_("audio/alsa: start delay %ums\n"), (AudioStartThreshold * 1000) - / (*freq * *channels * AudioBytesProSample)); + Info(_("audio/alsa: start delay %ums\n"), + (AudioStartThreshold * 1000) / (*freq * *channels * AudioBytesProSample)); } return 0; } @@ -1298,8 +1231,7 @@ static int AlsaSetup(int *freq, int *channels, int passthrough) /** ** Play audio. */ -static void AlsaPlay(void) -{ +static void AlsaPlay(void) { int err; if (AlsaCanPause) { @@ -1321,8 +1253,7 @@ static void AlsaPlay(void) /** ** Pause audio. */ -static void AlsaPause(void) -{ +static void AlsaPause(void) { int err; if (AlsaCanPause) { @@ -1339,20 +1270,14 @@ static void AlsaPause(void) /** ** Empty log callback */ -static void AlsaNoopCallback( __attribute__((unused)) - const char *file, __attribute__((unused)) - int line, __attribute__((unused)) - const char *function, __attribute__((unused)) - int err, __attribute__((unused)) - const char *fmt, ...) -{ -} +static void AlsaNoopCallback(__attribute__((unused)) const char *file, __attribute__((unused)) int line, + __attribute__((unused)) const char *function, __attribute__((unused)) int err, + __attribute__((unused)) const char *fmt, ...) {} /** ** Initialize alsa audio output module. */ -static void AlsaInit(void) -{ +static void AlsaInit(void) { #ifdef DEBUG (void)AlsaNoopCallback; #else @@ -1367,8 +1292,7 @@ static void AlsaInit(void) /** ** Cleanup alsa audio output module. */ -static void AlsaExit(void) -{ +static void AlsaExit(void) { if (AlsaPCMHandle) { snd_pcm_close(AlsaPCMHandle); AlsaPCMHandle = NULL; @@ -1398,497 +1322,6 @@ static const AudioModule AlsaModule = { .Exit = AlsaExit, }; -#endif // USE_ALSA - -#ifdef USE_OSS - -//============================================================================ -// O S S -//============================================================================ - -//---------------------------------------------------------------------------- -// OSS variables -//---------------------------------------------------------------------------- - -static int OssPcmFildes = -1; ///< pcm file descriptor -static int OssMixerFildes = -1; ///< mixer file descriptor -static int OssMixerChannel; ///< mixer channel index -static int OssFragmentTime; ///< fragment time in ms - -//---------------------------------------------------------------------------- -// OSS pcm -//---------------------------------------------------------------------------- - -/** -** Play samples from ringbuffer. -** -** @retval 0 ok -** @retval 1 ring buffer empty -** @retval -1 underrun error -*/ -static int OssPlayRingbuffer(void) -{ - int first; - - first = 1; - for (;;) { - audio_buf_info bi; - const void *p; - int n; - - if (ioctl(OssPcmFildes, SNDCTL_DSP_GETOSPACE, &bi) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"), strerror(errno)); - return -1; - } - Debug(4, "audio/oss: %d bytes free\n", bi.bytes); - - n = RingBufferGetReadPointer(AudioRing[AudioRingRead].RingBuffer, &p); - if (!n) { // ring buffer empty - if (first) { // only error on first loop - return 1; - } - return 0; - } - if (n < bi.bytes) { // not enough bytes in ring buffer - bi.bytes = n; - } - if (bi.bytes <= 0) { // full or buffer empty - break; // bi.bytes could become negative! - } - - if (AudioSoftVolume && !AudioRing[AudioRingRead].Passthrough) { - // FIXME: quick&dirty cast - AudioSoftAmplifier((int16_t *) p, bi.bytes); - // FIXME: if not all are written, we double amplify them - } - for (;;) { - n = write(OssPcmFildes, p, bi.bytes); - if (n != bi.bytes) { - if (n < 0) { - if (n == EAGAIN) { - continue; - } - Error(_("audio/oss: write error: %s\n"), strerror(errno)); - return 1; - } - Warning(_("audio/oss: error not all bytes written\n")); - } - break; - } - // advance how many could written - RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, n); - first = 0; - } - - return 0; -} - -/** -** Flush OSS buffers. -*/ -static void OssFlushBuffers(void) -{ - if (OssPcmFildes != -1) { - // flush kernel buffers - if (ioctl(OssPcmFildes, SNDCTL_DSP_HALT_OUTPUT, NULL) < 0) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_HALT_OUTPUT): %s\n"), strerror(errno)); - } - } -} - -#ifdef USE_AUDIO_THREAD - -//---------------------------------------------------------------------------- -// thread playback -//---------------------------------------------------------------------------- - -/** -** OSS thread -** -** @retval -1 error -** @retval 0 underrun -** @retval 1 running -*/ -static int OssThread(void) -{ - int err; - - if (!OssPcmFildes) { - usleep(OssFragmentTime * 1000); - return -1; - } - for (;;) { - struct pollfd fds[1]; - - if (AudioPaused) { - return 1; - } - // wait for space in kernel buffers - fds[0].fd = OssPcmFildes; - fds[0].events = POLLOUT | POLLERR; - // wait for space in kernel buffers - err = poll(fds, 1, OssFragmentTime); - if (err < 0) { - if (err == EAGAIN) { - continue; - } - Error(_("audio/oss: error poll %s\n"), strerror(errno)); - usleep(OssFragmentTime * 1000); - return -1; - } - break; - } - if (!err || AudioPaused) { // timeout or some commands - return 1; - } - - if ((err = OssPlayRingbuffer())) { // empty / error - if (err < 0) { // underrun error - return -1; - } - sched_yield(); - usleep(OssFragmentTime * 1000); // let fill/empty the buffers - return 0; - } - - return 1; -} - -#endif - -//---------------------------------------------------------------------------- - -/** -** Open OSS pcm device. -** -** @param passthrough use pass-through (AC-3, ...) device -*/ -static int OssOpenPCM(int passthrough) -{ - const char *device; - int fildes; - - // &&|| hell - if (!(passthrough && ((device = AudioPassthroughDevice) - || (device = getenv("OSS_PASSTHROUGHDEV")))) - && !(device = AudioPCMDevice) && !(device = getenv("OSS_AUDIODEV"))) { - device = "/dev/dsp"; - } - if (!AudioDoingInit) { - Info(_("audio/oss: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device); - } - - if ((fildes = open(device, O_WRONLY)) < 0) { - Error(_("audio/oss: can't open dsp device '%s': %s\n"), device, strerror(errno)); - return -1; - } - return fildes; -} - -/** -** Initialize OSS pcm device. -** -** @see AudioPCMDevice -*/ -static void OssInitPCM(void) -{ - int fildes; - - fildes = OssOpenPCM(0); - - OssPcmFildes = fildes; -} - -//---------------------------------------------------------------------------- -// OSS Mixer -//---------------------------------------------------------------------------- - -/** -** Set OSS mixer volume (0-1000) -** -** @param volume volume (0 .. 1000) -*/ -static void OssSetVolume(int volume) -{ - int v; - - if (OssMixerFildes != -1) { - v = (volume * 255) / 1000; - v &= 0xff; - v = (v << 8) | v; - if (ioctl(OssMixerFildes, MIXER_WRITE(OssMixerChannel), &v) < 0) { - Error(_("audio/oss: ioctl(MIXER_WRITE): %s\n"), strerror(errno)); - } - } -} - -/** -** Mixer channel name table. -*/ -static const char *OssMixerChannelNames[SOUND_MIXER_NRDEVICES] = SOUND_DEVICE_NAMES; - -/** -** Initialize OSS mixer. -*/ -static void OssInitMixer(void) -{ - const char *device; - const char *channel; - int fildes; - int devmask; - int i; - - if (!(device = AudioMixerDevice)) { - if (!(device = getenv("OSS_MIXERDEV"))) { - device = "/dev/mixer"; - } - } - if (!(channel = AudioMixerChannel)) { - if (!(channel = getenv("OSS_MIXER_CHANNEL"))) { - channel = "pcm"; - } - } - Debug(3, "audio/oss: mixer %s - %s open\n", device, channel); - - if ((fildes = open(device, O_RDWR)) < 0) { - Error(_("audio/oss: can't open mixer device '%s': %s\n"), device, strerror(errno)); - return; - } - // search channel name - if (ioctl(fildes, SOUND_MIXER_READ_DEVMASK, &devmask) < 0) { - Error(_("audio/oss: ioctl(SOUND_MIXER_READ_DEVMASK): %s\n"), strerror(errno)); - close(fildes); - return; - } - for (i = 0; i < SOUND_MIXER_NRDEVICES; ++i) { - if (!strcasecmp(OssMixerChannelNames[i], channel)) { - if (devmask & (1 << i)) { - OssMixerFildes = fildes; - OssMixerChannel = i; - return; - } - Error(_("audio/oss: channel '%s' not supported\n"), channel); - break; - } - } - Error(_("audio/oss: channel '%s' not found\n"), channel); - close(fildes); -} - -//---------------------------------------------------------------------------- -// OSS API -//---------------------------------------------------------------------------- - -/** -** Get OSS audio delay in time stamps. -** -** @returns audio delay in time stamps. -*/ -static int64_t OssGetDelay(void) -{ - int delay; - int64_t pts; - - // setup failure - if (OssPcmFildes == -1 || !AudioRing[AudioRingRead].HwSampleRate) { - return 0L; - } - if (!AudioRunning) { // audio not running - Error(_("audio/oss: should not happen\n")); - return 0L; - } - // delay in bytes in kernel buffers - delay = -1; - if (ioctl(OssPcmFildes, SNDCTL_DSP_GETODELAY, &delay) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n"), strerror(errno)); - return 0L; - } - if (delay < 0) { - delay = 0; - } - - pts = ((int64_t) delay * 90 * 1000) - / (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels * AudioBytesProSample); - - return pts; -} - -/** -** Setup OSS audio for requested format. -** -** @param sample_rate sample rate/frequency -** @param channels number of channels -** @param passthrough use pass-through (AC-3, ...) device -** -** @retval 0 everything ok -** @retval 1 didn't support frequency/channels combination -** @retval -1 something gone wrong -*/ -static int OssSetup(int *sample_rate, int *channels, int passthrough) -{ - int ret; - int tmp; - int delay; - audio_buf_info bi; - - if (OssPcmFildes == -1) { // OSS not ready - // FIXME: if open fails for fe. pass-through, we never recover - return -1; - } - - if (1) { // close+open for pcm / AC-3 - int fildes; - - fildes = OssPcmFildes; - OssPcmFildes = -1; - close(fildes); - if (!(fildes = OssOpenPCM(passthrough))) { - return -1; - } - OssPcmFildes = fildes; - } - - ret = 0; - - tmp = AFMT_S16_NE; // native 16 bits - if (ioctl(OssPcmFildes, SNDCTL_DSP_SETFMT, &tmp) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_SETFMT): %s\n"), strerror(errno)); - // FIXME: stop player, set setup failed flag - return -1; - } - if (tmp != AFMT_S16_NE) { - Error(_("audio/oss: device doesn't support 16 bit sample format.\n")); - // FIXME: stop player, set setup failed flag - return -1; - } - - tmp = *channels; - if (ioctl(OssPcmFildes, SNDCTL_DSP_CHANNELS, &tmp) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_CHANNELS): %s\n"), strerror(errno)); - return -1; - } - if (tmp != *channels) { - Warning(_("audio/oss: device doesn't support %d channels.\n"), *channels); - *channels = tmp; - ret = 1; - } - - tmp = *sample_rate; - if (ioctl(OssPcmFildes, SNDCTL_DSP_SPEED, &tmp) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_SPEED): %s\n"), strerror(errno)); - return -1; - } - if (tmp != *sample_rate) { - Warning(_("audio/oss: device doesn't support %dHz sample rate.\n"), *sample_rate); - *sample_rate = tmp; - ret = 1; - } -#ifdef SNDCTL_DSP_POLICY - tmp = 3; - if (ioctl(OssPcmFildes, SNDCTL_DSP_POLICY, &tmp) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_POLICY): %s\n"), strerror(errno)); - } else { - Info("audio/oss: set policy to %d\n", tmp); - } -#endif - - if (ioctl(OssPcmFildes, SNDCTL_DSP_GETOSPACE, &bi) == -1) { - Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"), strerror(errno)); - bi.fragsize = 4096; - bi.fragstotal = 16; - } else { - Debug(3, "audio/oss: %d bytes buffered\n", bi.bytes); - } - - OssFragmentTime = (bi.fragsize * 1000) - / (*sample_rate * *channels * AudioBytesProSample); - - Debug(3, "audio/oss: buffer size %d %dms, fragment size %d %dms\n", bi.fragsize * bi.fragstotal, - (bi.fragsize * bi.fragstotal * 1000) - / (*sample_rate * *channels * AudioBytesProSample), bi.fragsize, OssFragmentTime); - - // start when enough bytes for initial write - AudioStartThreshold = (bi.fragsize - 1) * bi.fragstotal; - - // buffer time/delay in ms - delay = AudioBufferTime + 300; - if (VideoAudioDelay > 0) { - delay += VideoAudioDelay / 90; - } - if (AudioStartThreshold < (*sample_rate * *channels * AudioBytesProSample * delay) / 1000U) { - AudioStartThreshold = (*sample_rate * *channels * AudioBytesProSample * delay) / 1000U; - } - // no bigger, than 1/3 the buffer - if (AudioStartThreshold > AudioRingBufferSize / 3) { - AudioStartThreshold = AudioRingBufferSize / 3; - } - - if (!AudioDoingInit) { - Info(_("audio/oss: delay %ums\n"), (AudioStartThreshold * 1000) - / (*sample_rate * *channels * AudioBytesProSample)); - } - - return ret; -} - -/** -** Play audio. -*/ -static void OssPlay(void) -{ -} - -/** -** Pause audio. -*/ -void OssPause(void) -{ -} - -/** -** Initialize OSS audio output module. -*/ -static void OssInit(void) -{ - OssInitPCM(); - OssInitMixer(); -} - -/** -** Cleanup OSS audio output module. -*/ -static void OssExit(void) -{ - if (OssPcmFildes != -1) { - close(OssPcmFildes); - OssPcmFildes = -1; - } - if (OssMixerFildes != -1) { - close(OssMixerFildes); - OssMixerFildes = -1; - } -} - -/** -** OSS module. -*/ -static const AudioModule OssModule = { - .Name = "oss", -#ifdef USE_AUDIO_THREAD - .Thread = OssThread, -#endif - .FlushBuffers = OssFlushBuffers, - .GetDelay = OssGetDelay, - .SetVolume = OssSetVolume, - .Setup = OssSetup, - .Play = OssPlay, - .Pause = OssPause, - .Init = OssInit, - .Exit = OssExit, -}; - -#endif // USE_OSS - //============================================================================ // Noop //============================================================================ @@ -1898,20 +1331,14 @@ static const AudioModule OssModule = { ** ** @returns audio delay in time stamps. */ -static int64_t NoopGetDelay(void) -{ - return 0L; -} +static int64_t NoopGetDelay(void) { return 0L; } /** ** Set mixer volume (0-1000) ** ** @param volume volume (0 .. 1000) */ -static void NoopSetVolume( __attribute__((unused)) - int volume) -{ -} +static void NoopSetVolume(__attribute__((unused)) int volume) {} /** ** Noop setup. @@ -1920,20 +1347,15 @@ static void NoopSetVolume( __attribute__((unused)) ** @param channels number of channels ** @param passthrough use pass-through (AC-3, ...) device */ -static int NoopSetup( __attribute__((unused)) - int *channels, __attribute__((unused)) - int *freq, __attribute__((unused)) - int passthrough) -{ +static int NoopSetup(__attribute__((unused)) int *channels, __attribute__((unused)) int *freq, + __attribute__((unused)) int passthrough) { return -1; } /** ** Noop void */ -static void NoopVoid(void) -{ -} +static void NoopVoid(void) {} /** ** Noop module. @@ -1959,8 +1381,7 @@ static const AudioModule NoopModule = { /** ** Prepare next ring buffer. */ -static int AudioNextRing(void) -{ +static int AudioNextRing(void) { int passthrough; int sample_rate; int channels; @@ -1979,13 +1400,13 @@ static int AudioNextRing(void) return -1; } - AudioSetVolume(AudioVolume); // update channel delta + AudioSetVolume(AudioVolume); // update channel delta AudioResetCompressor(); AudioResetNormalizer(); Debug(3, "audio: a/v next buf(%d,%4zdms)\n", atomic_read(&AudioRingFilled), - (RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer) * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); + (RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer) * 1000) / + (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); // stop, if not enough in next buffer used = RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer); @@ -2000,8 +1421,7 @@ static int AudioNextRing(void) ** ** @param dummy unused thread argument */ -static void *AudioPlayHandlerThread(void *dummy) -{ +static void *AudioPlayHandlerThread(void *dummy) { Debug(3, "audio: play thread started\n"); prctl(PR_SET_NAME, "cuvid audio", 0, 0, 0); @@ -2021,9 +1441,11 @@ static void *AudioPlayHandlerThread(void *dummy) } while (!AudioRunning); pthread_mutex_unlock(&AudioMutex); - Debug(3, "audio: ----> %dms %d start\n", (AudioUsedBytes() * 1000) - / (!AudioRing[AudioRingWrite].HwSampleRate + !AudioRing[AudioRingWrite].HwChannels + - AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), + Debug( + 3, "audio: ----> %dms %d start\n", + (AudioUsedBytes() * 1000) / + (!AudioRing[AudioRingWrite].HwSampleRate + !AudioRing[AudioRingWrite].HwChannels + + AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), AudioUsedBytes()); do { @@ -2093,7 +1515,7 @@ static void *AudioPlayHandlerThread(void *dummy) sample_rate = AudioRing[AudioRingRead].HwSampleRate; channels = AudioRing[AudioRingRead].HwChannels; Debug(3, "audio: thread channels %d frequency %dHz %s\n", channels, sample_rate, - passthrough ? "pass-through" : ""); + passthrough ? "pass-through" : ""); // audio config changed? if (old_passthrough != passthrough || old_sample_rate != sample_rate || old_channels != channels) { // FIXME: wait for buffer drain @@ -2119,8 +1541,7 @@ static void *AudioPlayHandlerThread(void *dummy) /** ** Initialize audio thread. */ -static void AudioInitThread(void) -{ +static void AudioInitThread(void) { AudioThreadStop = 0; pthread_mutex_init(&AudioMutex, NULL); pthread_cond_init(&AudioStartCond, NULL); @@ -2131,15 +1552,14 @@ static void AudioInitThread(void) /** ** Cleanup audio thread. */ -static void AudioExitThread(void) -{ +static void AudioExitThread(void) { void *retval; Debug(3, "audio: %s\n", __FUNCTION__); if (AudioThread) { AudioThreadStop = 1; - AudioRunning = 1; // wakeup thread, if needed + AudioRunning = 1; // wakeup thread, if needed pthread_cond_signal(&AudioStartCond); if (pthread_join(AudioThread, &retval) || retval != PTHREAD_CANCELED) { Error(_("audio: can't cancel play thread\n")); @@ -2155,34 +1575,27 @@ static void AudioExitThread(void) //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- - /** - ** Table of all audio modules. - */ +/** +** Table of all audio modules. +*/ static const AudioModule *AudioModules[] = { -#ifdef USE_ALSA &AlsaModule, -#endif -#ifdef USE_OSS - &OssModule, -#endif &NoopModule, }; -void AudioDelayms(int delayms) -{ +void AudioDelayms(int delayms) { int count; unsigned char *p; #ifdef DEBUG printf("Try Delay Audio for %d ms Samplerate %d Channels %d bps %d\n", delayms, - AudioRing[AudioRingWrite].HwSampleRate, AudioRing[AudioRingWrite].HwChannels, AudioBytesProSample); + AudioRing[AudioRingWrite].HwSampleRate, AudioRing[AudioRingWrite].HwChannels, AudioBytesProSample); #endif - count = - delayms * AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample / - 1000; + count = delayms * AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample / 1000; - if (delayms < 5000 && delayms > 0) { // not more than 5seconds + if (delayms < 5000 && delayms > 0) { // not more than 5seconds p = calloc(1, count); RingBufferWrite(AudioRing[AudioRingWrite].RingBuffer, p, count); free(p); @@ -2195,8 +1608,7 @@ void AudioDelayms(int delayms) ** @param samples sample buffer ** @param count number of bytes in sample buffer */ -void AudioEnqueue(const void *samples, int count) -{ +void AudioEnqueue(const void *samples, int count) { size_t n; int16_t *buffer; @@ -2213,7 +1625,7 @@ void AudioEnqueue(const void *samples, int count) if (!AudioRing[AudioRingWrite].HwSampleRate) { Debug(3, "audio: enqueue not ready\n"); - return; // no setup yet + return; // no setup yet } // save packet size if (!AudioRing[AudioRingWrite].PacketSize) { @@ -2222,8 +1634,9 @@ void AudioEnqueue(const void *samples, int count) } // audio sample modification allowed and needed? buffer = (void *)samples; - if (!AudioRing[AudioRingWrite].Passthrough && (AudioCompression || AudioNormalize - || AudioRing[AudioRingWrite].InChannels != AudioRing[AudioRingWrite].HwChannels)) { + if (!AudioRing[AudioRingWrite].Passthrough && + (AudioCompression || AudioNormalize || + AudioRing[AudioRingWrite].InChannels != AudioRing[AudioRingWrite].HwChannels)) { int frames; // resample into ring-buffer is too complex in the case of a roundabout @@ -2233,7 +1646,7 @@ void AudioEnqueue(const void *samples, int count) #ifdef USE_AUDIO_MIXER // Convert / resample input to hardware format AudioResample(samples, AudioRing[AudioRingWrite].InChannels, frames, buffer, - AudioRing[AudioRingWrite].HwChannels); + AudioRing[AudioRingWrite].HwChannels); #else #ifdef DEBUG if (AudioRing[AudioRingWrite].InChannels != AudioRing[AudioRingWrite].HwChannels) { @@ -2245,10 +1658,10 @@ void AudioEnqueue(const void *samples, int count) #endif count = frames * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample; - if (AudioCompression) { // in place operation + if (AudioCompression) { // in place operation AudioCompressor(buffer, count); } - if (AudioNormalize) { // in place operation + if (AudioNormalize) { // in place operation AudioNormalizer(buffer, count); } } @@ -2262,17 +1675,18 @@ void AudioEnqueue(const void *samples, int count) // FIXME: round to channel + sample border } - if (!AudioRunning) { // check, if we can start the thread + if (!AudioRunning) { // check, if we can start the thread int skip; n = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); skip = AudioSkip; // FIXME: round to packet size - Debug(4, "audio: start? %4zdms skip %dms\n", (n * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), - (skip * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample)); + Debug(4, "audio: start? %4zdms skip %dms\n", + (n * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample), + (skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample)); if (skip) { if (n < (unsigned)skip) { @@ -2285,8 +1699,8 @@ void AudioEnqueue(const void *samples, int count) // forced start or enough video + audio buffered // for some exotic channels * 4 too small if (AudioStartThreshold * 4 < n || (AudioVideoIsReady - // if ((AudioVideoIsReady - && AudioStartThreshold < n)) { + // if ((AudioVideoIsReady + && AudioStartThreshold < n)) { // restart play-back // no lock needed, can wakeup next time AudioRunning = 1; @@ -2295,9 +1709,10 @@ void AudioEnqueue(const void *samples, int count) } } // Update audio clock (stupid gcc developers thinks INT64_C is unsigned) - if (AudioRing[AudioRingWrite].PTS != (int64_t) AV_NOPTS_VALUE) { - AudioRing[AudioRingWrite].PTS += ((int64_t) count * 90 * 1000) - / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); + if (AudioRing[AudioRingWrite].PTS != (int64_t)AV_NOPTS_VALUE) { + AudioRing[AudioRingWrite].PTS += + ((int64_t)count * 90 * 1000) / + (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample); } } @@ -2306,18 +1721,17 @@ void AudioEnqueue(const void *samples, int count) ** ** @param pts video presentation timestamp */ -void AudioVideoReady(int64_t pts) -{ +void AudioVideoReady(int64_t pts) { int64_t audio_pts; size_t used; - if (pts == (int64_t) AV_NOPTS_VALUE) { + if (pts == (int64_t)AV_NOPTS_VALUE) { Debug(3, "audio: a/v start, no valid video\n"); return; } // no valid audio known - if (!AudioRing[AudioRingWrite].HwSampleRate || !AudioRing[AudioRingWrite].HwChannels - || AudioRing[AudioRingWrite].PTS == (int64_t) AV_NOPTS_VALUE) { + if (!AudioRing[AudioRingWrite].HwSampleRate || !AudioRing[AudioRingWrite].HwChannels || + AudioRing[AudioRingWrite].PTS == (int64_t)AV_NOPTS_VALUE) { Debug(3, "audio: a/v start, no valid audio\n"); AudioVideoIsReady = 1; return; @@ -2325,15 +1739,15 @@ void AudioVideoReady(int64_t pts) // Audio.PTS = next written sample time stamp used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); - audio_pts = - AudioRing[AudioRingWrite].PTS - - (used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample); + audio_pts = AudioRing[AudioRingWrite].PTS - + (used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample); Debug(3, "audio: a/v sync buf(%d,%4zdms) %s | %s = %dms %s\n", atomic_read(&AudioRingFilled), - (used * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample), Timestamp2String(pts), Timestamp2String(audio_pts), (int)(pts - audio_pts) / 90, - AudioRunning ? "running" : "ready"); + (used * 1000) / + (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample), + Timestamp2String(pts), Timestamp2String(audio_pts), (int)(pts - audio_pts) / 90, + AudioRunning ? "running" : "ready"); if (!AudioRunning) { int skip; @@ -2342,20 +1756,23 @@ void AudioVideoReady(int64_t pts) // FIXME: HDTV can use smaller video buffer skip = pts - 0 * 20 * 90 - AudioBufferTime * 90 - audio_pts + VideoAudioDelay; #ifdef DEBUG - // fprintf(stderr, "a/v-diff %dms a/v-delay %dms skip %dms Audiobuffer %d\n", (int)(pts - audio_pts) / 90, VideoAudioDelay / 90, skip / 90,AudioBufferTime); + // fprintf(stderr, "a/v-diff %dms a/v-delay %dms skip %dms Audiobuffer + //%d\n", (int)(pts - audio_pts) / 90, VideoAudioDelay / 90, skip / + // 90,AudioBufferTime); #endif // guard against old PTS if (skip > 0 && skip < 4000 * 90) { - skip = (((int64_t) skip * AudioRing[AudioRingWrite].HwSampleRate) / (1000 * 90)) - * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample; + skip = (((int64_t)skip * AudioRing[AudioRingWrite].HwSampleRate) / (1000 * 90)) * + AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample; // FIXME: round to packet size if ((unsigned)skip > used) { AudioSkip = skip - used; skip = used; } Debug(3, "audio: sync advance %dms %d/%zd Rest %d\n", - (skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * - AudioBytesProSample), skip, used, AudioSkip); + (skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * + AudioBytesProSample), + skip, used, AudioSkip); RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip); used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer); @@ -2372,14 +1789,12 @@ void AudioVideoReady(int64_t pts) } AudioVideoIsReady = 1; - } /** ** Flush audio buffers. */ -void AudioFlushBuffers(void) -{ +void AudioFlushBuffers(void) { int old; int i; @@ -2390,7 +1805,7 @@ void AudioFlushBuffers(void) break; } Debug(3, "audio: flush out of ring buffers\n"); - usleep(1 * 1000); // avoid hot polling + usleep(1 * 1000); // avoid hot polling } if (atomic_read(&AudioRingFilled) >= AUDIO_RING_MAX) { // FIXME: We can set the flush flag in the last wrote ring buffer @@ -2409,7 +1824,7 @@ void AudioFlushBuffers(void) AudioRing[AudioRingWrite].InChannels = AudioRing[old].InChannels; AudioRing[AudioRingWrite].PTS = AV_NOPTS_VALUE; RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, - RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer)); + RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer)); Debug(3, "audio: reset video ready\n"); AudioVideoIsReady = 0; AudioSkip = 0; @@ -2418,7 +1833,7 @@ void AudioFlushBuffers(void) // FIXME: wait for flush complete needed? for (i = 0; i < 24 * 2; ++i) { - if (!AudioRunning) { // wakeup thread to flush buffers + if (!AudioRunning) { // wakeup thread to flush buffers AudioRunning = 1; pthread_cond_signal(&AudioStartCond); Debug(3, "Start on Flush\n"); @@ -2427,7 +1842,7 @@ void AudioFlushBuffers(void) if (!atomic_read(&AudioRingFilled)) { break; } - usleep(1 * 1000); // avoid hot polling + usleep(1 * 1000); // avoid hot polling } Debug(3, "audio: audio flush %dms\n", i); } @@ -2435,25 +1850,22 @@ void AudioFlushBuffers(void) /** ** Call back to play audio polled. */ -void AudioPoller(void) -{ +void AudioPoller(void) { // FIXME: write poller } /** ** Get free bytes in audio output. */ -int AudioFreeBytes(void) -{ +int AudioFreeBytes(void) { return AudioRing[AudioRingWrite].RingBuffer ? RingBufferFreeBytes(AudioRing[AudioRingWrite].RingBuffer) - : INT32_MAX; + : INT32_MAX; } /** ** Get used bytes in audio output. */ -int AudioUsedBytes(void) -{ +int AudioUsedBytes(void) { // FIXME: not correct, if multiple buffer are in use return AudioRing[AudioRingWrite].RingBuffer ? RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) : 0; } @@ -2463,25 +1875,23 @@ int AudioUsedBytes(void) ** ** @returns audio delay in time stamps. */ -int64_t AudioGetDelay(void) -{ +int64_t AudioGetDelay(void) { int64_t pts; if (!AudioRunning) { - return 0L; // audio not running + return 0L; // audio not running } if (!AudioRing[AudioRingRead].HwSampleRate) { - return 0L; // audio not setup + return 0L; // audio not setup } if (atomic_read(&AudioRingFilled)) { - return 0L; // multiple buffers, invalid delay + return 0L; // multiple buffers, invalid delay } pts = AudioUsedModule->GetDelay(); - pts += ((int64_t) RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer) - * 90 * 1000) / (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels * - AudioBytesProSample); + pts += ((int64_t)RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer) * 90 * 1000) / + (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels * AudioBytesProSample); Debug(4, "audio: hw+sw delay %zd %" PRId64 "ms\n", RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer), - pts / 90); + pts / 90); return pts; } @@ -2491,13 +1901,13 @@ int64_t AudioGetDelay(void) ** ** @param pts audio presentation timestamp */ -void AudioSetClock(int64_t pts) -{ +void AudioSetClock(int64_t pts) { if (AudioRing[AudioRingWrite].PTS != pts) { Debug(4, "audio: set clock %s -> %s pts\n", Timestamp2String(AudioRing[AudioRingWrite].PTS), - Timestamp2String(pts)); + Timestamp2String(pts)); } -// printf("Audiosetclock pts %#012" PRIx64 " %d\n",pts,RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer)); + // printf("Audiosetclock pts %#012" PRIx64 " + // %d\n",pts,RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer)); AudioRing[AudioRingWrite].PTS = pts; } @@ -2506,10 +1916,9 @@ void AudioSetClock(int64_t pts) ** ** @returns the audio clock in time stamps. */ -int64_t AudioGetClock(void) -{ +int64_t AudioGetClock(void) { // (cast) needed for the evil gcc - if (AudioRing[AudioRingRead].PTS != (int64_t) AV_NOPTS_VALUE) { + if (AudioRing[AudioRingRead].PTS != (int64_t)AV_NOPTS_VALUE) { int64_t delay; // delay zero, if no valid time stamp @@ -2528,8 +1937,7 @@ int64_t AudioGetClock(void) ** ** @param volume volume (0 .. 1000) */ -void AudioSetVolume(int volume) -{ +void AudioSetVolume(int volume) { AudioVolume = volume; AudioMute = !volume; // reduce loudness for stereo output @@ -2560,8 +1968,7 @@ void AudioSetVolume(int volume) ** ** @todo add support to report best fitting format. */ -int AudioSetup(int *freq, int *channels, int passthrough) -{ +int AudioSetup(int *freq, int *channels, int passthrough) { Debug(3, "audio: setup channels %d frequency %dHz %s\n", *channels, *freq, passthrough ? "pass-through" : ""); // invalid parameter @@ -2576,22 +1983,20 @@ int AudioSetup(int *freq, int *channels, int passthrough) /** ** Play audio. */ -void AudioPlay(void) -{ +void AudioPlay(void) { if (!AudioPaused) { Debug(3, "audio: not paused, check the code\n"); return; } Debug(3, "audio: resumed\n"); AudioPaused = 0; - AudioEnqueue(NULL, 0); // wakeup thread + AudioEnqueue(NULL, 0); // wakeup thread } /** ** Pause audio. */ -void AudioPause(void) -{ +void AudioPause(void) { if (AudioPaused) { Debug(3, "audio: already paused, check the code\n"); return; @@ -2608,8 +2013,7 @@ void AudioPause(void) ** The period size of the audio buffer is 24 ms. ** With streamdev sometimes extra +100ms are needed. */ -void AudioSetBufferTime(int delay) -{ +void AudioSetBufferTime(int delay) { if (!delay) { delay = 336; } @@ -2621,8 +2025,7 @@ void AudioSetBufferTime(int delay) ** ** @param onoff -1 toggle, true turn on, false turn off */ -void AudioSetSoftvol(int onoff) -{ +void AudioSetSoftvol(int onoff) { if (onoff < 0) { AudioSoftVolume ^= 1; } else { @@ -2636,8 +2039,7 @@ void AudioSetSoftvol(int onoff) ** @param onoff -1 toggle, true turn on, false turn off ** @param maxfac max. factor of normalize /1000 */ -void AudioSetNormalize(int onoff, int maxfac) -{ +void AudioSetNormalize(int onoff, int maxfac) { if (onoff < 0) { AudioNormalize ^= 1; } else { @@ -2652,8 +2054,7 @@ void AudioSetNormalize(int onoff, int maxfac) ** @param onoff -1 toggle, true turn on, false turn off ** @param maxfac max. factor of compression /1000 */ -void AudioSetCompression(int onoff, int maxfac) -{ +void AudioSetCompression(int onoff, int maxfac) { if (onoff < 0) { AudioCompression ^= 1; } else { @@ -2673,10 +2074,9 @@ void AudioSetCompression(int onoff, int maxfac) ** ** @param delta value (/1000) to reduce stereo volume */ -void AudioSetStereoDescent(int delta) -{ +void AudioSetStereoDescent(int delta) { AudioStereoDescent = delta; - AudioSetVolume(AudioVolume); // update channel delta + AudioSetVolume(AudioVolume); // update channel delta } /** @@ -2686,10 +2086,9 @@ void AudioSetStereoDescent(int delta) ** ** @note this is currently used to select alsa/OSS output module. */ -void AudioSetDevice(const char *device) -{ +void AudioSetDevice(const char *device) { if (!AudioModuleName) { - AudioModuleName = "alsa"; // detect alsa/OSS + AudioModuleName = "alsa"; // detect alsa/OSS if (!device[0]) { AudioModuleName = "noop"; } else if (device[0] == '/') { @@ -2706,10 +2105,9 @@ void AudioSetDevice(const char *device) ** ** @note this is currently usable with alsa only. */ -void AudioSetPassthroughDevice(const char *device) -{ +void AudioSetPassthroughDevice(const char *device) { if (!AudioModuleName) { - AudioModuleName = "alsa"; // detect alsa/OSS + AudioModuleName = "alsa"; // detect alsa/OSS if (!device[0]) { AudioModuleName = "noop"; } else if (device[0] == '/') { @@ -2726,18 +2124,14 @@ void AudioSetPassthroughDevice(const char *device) ** ** @note this is currently used to select alsa/OSS output module. */ -void AudioSetChannel(const char *channel) -{ - AudioMixerChannel = channel; -} +void AudioSetChannel(const char *channel) { AudioMixerChannel = channel; } /** ** Set automatic AES flag handling. ** ** @param onoff turn setting AES flag on or off */ -void AudioSetAutoAES(int onoff) -{ +void AudioSetAutoAES(int onoff) { if (onoff < 0) { AudioAppendAES ^= 1; } else { @@ -2750,25 +2144,19 @@ void AudioSetAutoAES(int onoff) ** ** @todo FIXME: make audio output module selectable. */ -void AudioInit(void) -{ +void AudioInit(void) { unsigned u; const char *name; int freq; int chan; name = "noop"; -#ifdef USE_OSS - name = "oss"; -#endif -#ifdef USE_ALSA name = "alsa"; -#endif if (AudioModuleName) { name = AudioModuleName; } // - // search selected audio module. + // search selected audio module. // for (u = 0; u < sizeof(AudioModules) / sizeof(*AudioModules); ++u) { if (!strcasecmp(name, AudioModules[u]->Name)) { @@ -2781,15 +2169,15 @@ void AudioInit(void) AudioUsedModule = &NoopModule; return; - found: +found: AudioDoingInit = 1; AudioRingInit(); AudioUsedModule->Init(); // - // Check which channels/rates/formats are supported - // FIXME: we force 44.1Khz and 48Khz must be supported equal - // FIXME: should use bitmap of channels supported in RatesInHw - // FIXME: use loop over sample-rates + // Check which channels/rates/formats are supported + // FIXME: we force 44.1Khz and 48Khz must be supported equal + // FIXME: should use bitmap of channels supported in RatesInHw + // FIXME: use loop over sample-rates freq = 44100; AudioRatesInHw[Audio44100] = 0; for (chan = 1; chan < 9; ++chan) { @@ -2817,7 +2205,7 @@ void AudioInit(void) tchan = chan; tfreq = freq; if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) { - //AudioChannelsInHw[chan] = 0; + // AudioChannelsInHw[chan] = 0; } else { AudioChannelsInHw[chan] = chan; AudioRatesInHw[Audio48000] |= (1 << chan); @@ -2835,17 +2223,17 @@ void AudioInit(void) tchan = chan; tfreq = freq; if (AudioUsedModule->Setup(&tfreq, &tchan, 0)) { - //AudioChannelsInHw[chan] = 0; + // AudioChannelsInHw[chan] = 0; } else { AudioChannelsInHw[chan] = chan; AudioRatesInHw[Audio192000] |= (1 << chan); } } - // build channel support and conversion table + // build channel support and conversion table for (u = 0; u < AudioRatesMax; ++u) { for (chan = 1; chan < 9; ++chan) { AudioChannelMatrix[u][chan] = 0; - if (!AudioRatesInHw[u]) { // rate unsupported + if (!AudioRatesInHw[u]) { // rate unsupported continue; } if (AudioChannelsInHw[chan]) { @@ -2903,11 +2291,11 @@ void AudioInit(void) } for (u = 0; u < AudioRatesMax; ++u) { Info(_("audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n"), AudioRatesTable[u], - AudioChannelMatrix[u][1], AudioChannelMatrix[u][2], AudioChannelMatrix[u][3], AudioChannelMatrix[u][4], - AudioChannelMatrix[u][5], AudioChannelMatrix[u][6], AudioChannelMatrix[u][7], AudioChannelMatrix[u][8]); + AudioChannelMatrix[u][1], AudioChannelMatrix[u][2], AudioChannelMatrix[u][3], AudioChannelMatrix[u][4], + AudioChannelMatrix[u][5], AudioChannelMatrix[u][6], AudioChannelMatrix[u][7], AudioChannelMatrix[u][8]); } #ifdef USE_AUDIO_THREAD - if (AudioUsedModule->Thread) { // supports threads + if (AudioUsedModule->Thread) { // supports threads AudioInitThread(); } #endif @@ -2917,14 +2305,13 @@ void AudioInit(void) /** ** Cleanup audio output module. */ -void AudioExit(void) -{ +void AudioExit(void) { const AudioModule *module; Debug(3, "audio: %s\n", __FUNCTION__); #ifdef USE_AUDIO_THREAD - if (AudioUsedModule->Thread) { // supports threads + if (AudioUsedModule->Thread) { // supports threads AudioExitThread(); } #endif @@ -2942,11 +2329,10 @@ void AudioExit(void) // Test //---------------------------------------------------------------------------- -void AudioTest(void) -{ +void AudioTest(void) { for (;;) { unsigned u; - uint8_t buffer[16 * 1024]; // some random data + uint8_t buffer[16 * 1024]; // some random data int i; for (u = 0; u < sizeof(buffer); u++) { @@ -2966,28 +2352,29 @@ void AudioTest(void) #include -int SysLogLevel; ///< show additional debug informations +int SysLogLevel; ///< show additional debug informations /** ** Print version. */ -static void PrintVersion(void) -{ +static void PrintVersion(void) { printf("audio_test: audio tester Version " VERSION #ifdef GIT_REV - "(GIT-" GIT_REV ")" + "(GIT-" GIT_REV ")" #endif - ",\n\t(c) 2009 - 2013 by Johns\n" "\tLicense AGPLv3: GNU Affero General Public License version 3\n"); + ",\n\t(c) 2009 - 2013 by Johns\n" + "\tLicense AGPLv3: GNU Affero General Public License version 3\n"); } /** ** Print usage. */ -static void PrintUsage(void) -{ - printf("Usage: audio_test [-?dhv]\n" "\t-d\tenable debug, more -d increase the verbosity\n" - "\t-? -h\tdisplay this message\n" "\t-v\tdisplay version information\n" - "Only idiots print usage on stderr!\n"); +static void PrintUsage(void) { + printf("Usage: audio_test [-?dhv]\n" + "\t-d\tenable debug, more -d increase the verbosity\n" + "\t-? -h\tdisplay this message\n" + "\t-v\tdisplay version information\n" + "Only idiots print usage on stderr!\n"); } /** @@ -2998,26 +2385,25 @@ static void PrintUsage(void) ** ** @returns -1 on failures, 0 clean exit. */ -int main(int argc, char *const argv[]) -{ +int main(int argc, char *const argv[]) { SysLogLevel = 0; // - // Parse command line arguments + // Parse command line arguments // for (;;) { switch (getopt(argc, argv, "hv?-c:d")) { - case 'd': // enabled debug + case 'd': // enabled debug ++SysLogLevel; continue; case EOF: break; - case 'v': // print version + case 'v': // print version PrintVersion(); return 0; case '?': - case 'h': // help usage + case 'h': // help usage PrintVersion(); PrintUsage(); return 0; @@ -3045,12 +2431,12 @@ int main(int argc, char *const argv[]) return -1; } // - // main loop + // main loop // AudioInit(); for (;;) { unsigned u; - uint8_t buffer[16 * 1024]; // some random data + uint8_t buffer[16 * 1024]; // some random data for (u = 0; u < sizeof(buffer); u++) { buffer[u] = random() & 0xffff; diff --git a/audio.h b/audio.h index 46a8d90..a1272be 100644 --- a/audio.h +++ b/audio.h @@ -1,7 +1,7 @@ /// /// @file audio.h @brief Audio module headerfile /// -/// Copyright (c) 2009 - 2014 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2014 by Johns. All Rights Reserved. /// /// Contributor(s): /// @@ -27,41 +27,41 @@ // Prototypes //---------------------------------------------------------------------------- -extern void AudioEnqueue(const void *, int); ///< buffer audio samples -extern void AudioFlushBuffers(void); ///< flush audio buffers -extern void AudioPoller(void); ///< poll audio events/handling -extern int AudioFreeBytes(void); ///< free bytes in audio output -extern int AudioUsedBytes(void); ///< used bytes in audio output -extern int64_t AudioGetDelay(void); ///< get current audio delay -extern void AudioSetClock(int64_t); ///< set audio clock base -extern int64_t AudioGetClock(); ///< get current audio clock -extern void AudioSetVolume(int); ///< set volume -extern int AudioSetup(int *, int *, int); ///< setup audio output +extern void AudioEnqueue(const void *, int); ///< buffer audio samples +extern void AudioFlushBuffers(void); ///< flush audio buffers +extern void AudioPoller(void); ///< poll audio events/handling +extern int AudioFreeBytes(void); ///< free bytes in audio output +extern int AudioUsedBytes(void); ///< used bytes in audio output +extern int64_t AudioGetDelay(void); ///< get current audio delay +extern void AudioSetClock(int64_t); ///< set audio clock base +extern int64_t AudioGetClock(); ///< get current audio clock +extern void AudioSetVolume(int); ///< set volume +extern int AudioSetup(int *, int *, int); ///< setup audio output -extern void AudioPlay(void); ///< play audio -extern void AudioPause(void); ///< pause audio +extern void AudioPlay(void); ///< play audio +extern void AudioPause(void); ///< pause audio -extern void AudioSetBufferTime(int); ///< set audio buffer time -extern void AudioSetSoftvol(int); ///< enable/disable softvol -extern void AudioSetNormalize(int, int); ///< set normalize parameters -extern void AudioSetCompression(int, int); ///< set compression parameters -extern void AudioSetStereoDescent(int); ///< set stereo loudness descent +extern void AudioSetBufferTime(int); ///< set audio buffer time +extern void AudioSetSoftvol(int); ///< enable/disable softvol +extern void AudioSetNormalize(int, int); ///< set normalize parameters +extern void AudioSetCompression(int, int); ///< set compression parameters +extern void AudioSetStereoDescent(int); ///< set stereo loudness descent -extern void AudioSetDevice(const char *); ///< set PCM audio device +extern void AudioSetDevice(const char *); ///< set PCM audio device - /// set pass-through device +/// set pass-through device extern void AudioSetPassthroughDevice(const char *); -extern void AudioSetChannel(const char *); ///< set mixer channel -extern void AudioSetAutoAES(int); ///< set automatic AES flag handling -extern void AudioInit(void); ///< setup audio module -extern void AudioExit(void); ///< cleanup and exit audio module +extern void AudioSetChannel(const char *); ///< set mixer channel +extern void AudioSetAutoAES(int); ///< set automatic AES flag handling +extern void AudioInit(void); ///< setup audio module +extern void AudioExit(void); ///< cleanup and exit audio module //---------------------------------------------------------------------------- // Variables //---------------------------------------------------------------------------- -extern char AudioAlsaDriverBroken; ///< disable broken driver message -extern char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix -extern char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix +extern char AudioAlsaDriverBroken; ///< disable broken driver message +extern char AudioAlsaNoCloseOpen; ///< disable alsa close/open fix +extern char AudioAlsaCloseOpenDelay; ///< enable alsa close/open delay fix /// @} diff --git a/codec.c b/codec.c index fc49cd2..bf3a6ed 100644 --- a/codec.c +++ b/codec.c @@ -1,7 +1,7 @@ /// /// @file codec.c @brief Codec functions /// -/// Copyright (c) 2009 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2015 by Johns. All Rights Reserved. /// /// Contributor(s): /// @@ -27,7 +27,7 @@ /// It is uses ffmpeg (http://ffmpeg.org) as backend. /// /// It may work with libav (http://libav.org), but the tests show -/// many bugs and incompatiblity in it. Don't use this shit. +/// many bugs and incompatiblity in it. Don't use this shit. /// /// compile with pass-through support (stable, AC-3, E-AC-3 only) @@ -36,10 +36,6 @@ #define USE_AUDIO_DRIFT_CORRECTION /// compile AC-3 audio drift correction support (very experimental) #define USE_AC3_DRIFT_CORRECTION -/// use ffmpeg libswresample API (autodected, Makefile) -#define noUSE_SWRESAMPLE -/// use libav libavresample API (autodected, Makefile) -#define noUSE_AVRESAMPLE #include #include @@ -51,35 +47,28 @@ #include #endif -#include -#include #include #include -#define _(str) gettext(str) ///< gettext shortcut -#define _N(str) str ///< gettext_noop shortcut +#include +#include +#define _(str) gettext(str) ///< gettext shortcut +#define _N(str) str ///< gettext_noop shortcut #include -#include #include - -#ifdef USE_SWRESAMPLE -#include -#endif -#ifdef USE_AVRESAMPLE -#include #include -#endif -#ifndef __USE_GNU -#define __USE_GNU -#endif +#include + #include +// clang-format off #include "iatomic.h" #include "misc.h" #include "video.h" #include "audio.h" #include "codec.h" +// clang-format on //---------------------------------------------------------------------------- // Global @@ -113,12 +102,12 @@ AVBufferRef *hw_device_ctx; /// struct _video_decoder_ { - VideoHwDecoder *HwDecoder; ///< video hardware decoder + VideoHwDecoder *HwDecoder; ///< video hardware decoder - int GetFormatDone; ///< flag get format called! - AVCodec *VideoCodec; ///< video codec - AVCodecContext *VideoCtx; ///< video codec context - AVFrame *Frame; ///< decoded video frame + int GetFormatDone; ///< flag get format called! + AVCodec *VideoCodec; ///< video codec + AVCodecContext *VideoCtx; ///< video codec context + AVFrame *Frame; ///< decoded video frame }; #endif //---------------------------------------------------------------------------- @@ -128,14 +117,13 @@ struct _video_decoder_ /** ** Callback to negotiate the PixelFormat. ** -** @param video_ctx codec context -** @param fmt is the list of formats which are supported by -** the codec, it is terminated by -1 as 0 is a -** valid format, the formats are ordered by -** quality. +** @param video_ctx codec context +** @param fmt is the list of formats which are supported by +** the codec, it is terminated by -1 as 0 is a +** valid format, the formats are ordered by +** quality. */ -static enum AVPixelFormat Codec_get_format(AVCodecContext * video_ctx, const enum AVPixelFormat *fmt) -{ +static enum AVPixelFormat Codec_get_format(AVCodecContext *video_ctx, const enum AVPixelFormat *fmt) { VideoDecoder *decoder; enum AVPixelFormat fmt1; @@ -145,9 +133,8 @@ static enum AVPixelFormat Codec_get_format(AVCodecContext * video_ctx, const enu Error("codec/video: ffmpeg/libav buggy: width or height zero\n"); } - // decoder->GetFormatDone = 1; + // decoder->GetFormatDone = 1; return Video_get_format(decoder->HwDecoder, video_ctx, fmt); - } // static void Codec_free_buffer(void *opaque, uint8_t *data); @@ -157,16 +144,15 @@ static enum AVPixelFormat Codec_get_format(AVCodecContext * video_ctx, const enu ** ** Called at the beginning of each frame to get a buffer for it. ** -** @param video_ctx Codec context -** @param frame Get buffer for this frame +** @param video_ctx Codec context +** @param frame Get buffer for this frame */ -static int Codec_get_buffer2(AVCodecContext * video_ctx, AVFrame * frame, int flags) -{ +static int Codec_get_buffer2(AVCodecContext *video_ctx, AVFrame *frame, int flags) { VideoDecoder *decoder; decoder = video_ctx->opaque; - if (!decoder->GetFormatDone) { // get_format missing + if (!decoder->GetFormatDone) { // get_format missing enum AVPixelFormat fmts[2]; // fprintf(stderr, "codec: buggy libav, use ffmpeg\n"); @@ -177,9 +163,9 @@ static int Codec_get_buffer2(AVCodecContext * video_ctx, AVFrame * frame, int fl } #if 0 if (decoder->hwaccel_get_buffer && (AV_PIX_FMT_VDPAU == decoder->hwaccel_pix_fmt - || AV_PIX_FMT_CUDA == decoder->hwaccel_pix_fmt || AV_PIX_FMT_VAAPI == decoder->hwaccel_pix_fmt)) { - // Debug(3,"hwaccel get_buffer\n"); - return decoder->hwaccel_get_buffer(video_ctx, frame, flags); + || AV_PIX_FMT_CUDA == decoder->hwaccel_pix_fmt || AV_PIX_FMT_VAAPI == decoder->hwaccel_pix_fmt)) { + // Debug(3,"hwaccel get_buffer\n"); + return decoder->hwaccel_get_buffer(video_ctx, frame, flags); } #endif // Debug(3, "codec: fallback to default get_buffer\n"); @@ -193,12 +179,11 @@ static int Codec_get_buffer2(AVCodecContext * video_ctx, AVFrame * frame, int fl /** ** Allocate a new video decoder context. ** -** @param hw_decoder video hardware decoder +** @param hw_decoder video hardware decoder ** ** @returns private decoder pointer for video decoder. */ -VideoDecoder *CodecVideoNewDecoder(VideoHwDecoder * hw_decoder) -{ +VideoDecoder *CodecVideoNewDecoder(VideoHwDecoder *hw_decoder) { VideoDecoder *decoder; if (!(decoder = calloc(1, sizeof(*decoder)))) { @@ -214,10 +199,7 @@ VideoDecoder *CodecVideoNewDecoder(VideoHwDecoder * hw_decoder) ** ** @param decoder private video decoder */ -void CodecVideoDelDecoder(VideoDecoder * decoder) -{ - free(decoder); -} +void CodecVideoDelDecoder(VideoDecoder *decoder) { free(decoder); } /** ** Open video decoder. @@ -225,14 +207,13 @@ void CodecVideoDelDecoder(VideoDecoder * decoder) ** @param decoder private video decoder ** @param codec_id video codec id */ -void CodecVideoOpen(VideoDecoder * decoder, int codec_id) -{ +void CodecVideoOpen(VideoDecoder *decoder, int codec_id) { AVCodec *video_codec; const char *name; int ret, deint = 2; Debug(3, "***************codec: Video Open using video codec ID %#06x (%s)\n", codec_id, - avcodec_get_name(codec_id)); + avcodec_get_name(codec_id)); if (decoder->VideoCtx) { Error(_("codec: missing close\n")); @@ -261,7 +242,7 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) break; case AV_CODEC_ID_H264: name = "h264_v4l2m2m"; -// name = "h264_mmal"; + // name = "h264_mmal"; break; case AV_CODEC_ID_HEVC: name = "hevc_v4l2m2m"; @@ -289,8 +270,9 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) } decoder->VideoCtx->hw_device_ctx = av_buffer_ref(HwDeviceContext); #else - decoder->VideoCtx->pix_fmt = AV_PIX_FMT_DRM_PRIME; /* request a DRM frame */ - // decoder->VideoCtx->pix_fmt = AV_PIX_FMT_MMAL; /* request a DRM frame */ + decoder->VideoCtx->pix_fmt = AV_PIX_FMT_DRM_PRIME; /* request a DRM frame */ + // decoder->VideoCtx->pix_fmt = AV_PIX_FMT_MMAL; /* request a DRM frame + //*/ #endif // FIXME: for software decoder use all cpus, otherwise 1 @@ -310,12 +292,12 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) // decoder->VideoCtx->extra_hw_frames = 8; // VIDEO_SURFACES_MAX +1 if (video_codec->capabilities & (AV_CODEC_CAP_AUTO_THREADS)) { Debug(3, "codec: auto threads enabled"); -// decoder->VideoCtx->thread_count = 0; + // decoder->VideoCtx->thread_count = 0; } if (video_codec->capabilities & AV_CODEC_CAP_TRUNCATED) { Debug(3, "codec: supports truncated packets"); - //decoder->VideoCtx->flags |= CODEC_FLAG_TRUNCATED; + // decoder->VideoCtx->flags |= CODEC_FLAG_TRUNCATED; } // FIXME: own memory management for video frames. if (video_codec->capabilities & AV_CODEC_CAP_DR1) { @@ -323,16 +305,16 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) } if (video_codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) { Debug(3, "codec: supports frame threads"); -// decoder->VideoCtx->thread_count = 0; + // decoder->VideoCtx->thread_count = 0; // decoder->VideoCtx->thread_type |= FF_THREAD_FRAME; } if (video_codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) { Debug(3, "codec: supports slice threads"); -// decoder->VideoCtx->thread_count = 0; + // decoder->VideoCtx->thread_count = 0; // decoder->VideoCtx->thread_type |= FF_THREAD_SLICE; } -// if (av_opt_set_int(decoder->VideoCtx, "refcounted_frames", 1, 0) < 0) -// Fatal(_("VAAPI Refcounts invalid\n")); + // if (av_opt_set_int(decoder->VideoCtx, "refcounted_frames", 1, 0) < 0) + // Fatal(_("VAAPI Refcounts invalid\n")); decoder->VideoCtx->thread_safe_callbacks = 0; #endif @@ -352,8 +334,9 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) #endif #ifdef CUVID - if (strcmp(decoder->VideoCodec->long_name, "Nvidia CUVID MPEG2VIDEO decoder") == 0) { // deinterlace for mpeg2 is somehow broken - if (av_opt_set_int(decoder->VideoCtx->priv_data, "deint", deint, 0) < 0) { // adaptive + if (strcmp(decoder->VideoCodec->long_name, + "Nvidia CUVID MPEG2VIDEO decoder") == 0) { // deinterlace for mpeg2 is somehow broken + if (av_opt_set_int(decoder->VideoCtx->priv_data, "deint", deint, 0) < 0) { // adaptive pthread_mutex_unlock(&CodecLockMutex); Fatal(_("codec: can't set option deint to video codec!\n")); } @@ -368,7 +351,7 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) Fatal(_("codec: can't set option drop 2.field to video codec!\n")); } } else if (strstr(decoder->VideoCodec->long_name, "Nvidia CUVID") != NULL) { - if (av_opt_set_int(decoder->VideoCtx->priv_data, "deint", deint, 0) < 0) { // adaptive + if (av_opt_set_int(decoder->VideoCtx->priv_data, "deint", deint, 0) < 0) { // adaptive pthread_mutex_unlock(&CodecLockMutex); Fatal(_("codec: can't set option deint to video codec!\n")); } @@ -393,12 +376,12 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) pthread_mutex_unlock(&CodecLockMutex); - decoder->VideoCtx->opaque = decoder; // our structure + decoder->VideoCtx->opaque = decoder; // our structure - //decoder->VideoCtx->debug = FF_DEBUG_STARTCODE; - //decoder->VideoCtx->err_recognition |= AV_EF_EXPLODE; + // decoder->VideoCtx->debug = FF_DEBUG_STARTCODE; + // decoder->VideoCtx->err_recognition |= AV_EF_EXPLODE; - //av_log_set_level(AV_LOG_DEBUG); + // av_log_set_level(AV_LOG_DEBUG); av_log_set_level(0); decoder->VideoCtx->get_format = Codec_get_format; @@ -408,17 +391,17 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) decoder->VideoCtx->hwaccel_context = VideoGetHwAccelContext(decoder->HwDecoder); // - // Prepare frame buffer for decoder + // Prepare frame buffer for decoder // #if 0 if (!(decoder->Frame = av_frame_alloc())) { - Fatal(_("codec: can't allocate video decoder frame buffer\n")); + Fatal(_("codec: can't allocate video decoder frame buffer\n")); } #endif // reset buggy ffmpeg/libav flag decoder->GetFormatDone = 0; -#if defined (YADIF) || defined (RASPI) +#if defined(YADIF) || defined(RASPI) decoder->filter = 0; #endif } @@ -428,8 +411,7 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id) ** ** @param video_decoder private video decoder */ -void CodecVideoClose(VideoDecoder * video_decoder) -{ +void CodecVideoClose(VideoDecoder *video_decoder) { AVFrame *frame; // FIXME: play buffered data @@ -441,14 +423,14 @@ void CodecVideoClose(VideoDecoder * video_decoder) #if 1 frame = av_frame_alloc(); avcodec_send_packet(video_decoder->VideoCtx, NULL); - while (avcodec_receive_frame(video_decoder->VideoCtx, frame) >= 0) ; + while (avcodec_receive_frame(video_decoder->VideoCtx, frame) >= 0) + ; av_frame_free(&frame); #endif avcodec_close(video_decoder->VideoCtx); av_freep(&video_decoder->VideoCtx); pthread_mutex_unlock(&CodecLockMutex); } - } #if 0 @@ -472,16 +454,16 @@ void DisplayPts(AVCodecContext * video_ctx, AVFrame * frame) pts = frame->pkt_pts; if (pts == (int64_t) AV_NOPTS_VALUE) { - printf("*"); + printf("*"); } ms_delay = (1000 * video_ctx->time_base.num) / video_ctx->time_base.den; ms_delay += frame->repeat_pict * ms_delay / 2; - printf("codec: PTS %s%s %" PRId64 " %d %d/%d %d/%d %dms\n", frame->repeat_pict ? "r" : " ", - frame->interlaced_frame ? "I" : " ", pts, (int)(pts - last_pts) / 90, video_ctx->time_base.num, - video_ctx->time_base.den, video_ctx->framerate.num, video_ctx->framerate.den, ms_delay); + printf("codec: PTS %s%s %" PRId64 " %d %d/%d %d/%d %dms\n", frame->repeat_pict ? "r" : " ", + frame->interlaced_frame ? "I" : " ", pts, (int)(pts - last_pts) / 90, video_ctx->time_base.num, + video_ctx->time_base.den, video_ctx->framerate.num, video_ctx->framerate.den, ms_delay); if (pts != (int64_t) AV_NOPTS_VALUE) { - last_pts = pts; + last_pts = pts; } } @@ -495,14 +477,13 @@ void DisplayPts(AVCodecContext * video_ctx, AVFrame * frame) */ extern int CuvidTestSurfaces(); -#if defined YADIF || defined (VAAPI) -extern int init_filters(AVCodecContext * dec_ctx, void *decoder, AVFrame * frame); -extern int push_filters(AVCodecContext * dec_ctx, void *decoder, AVFrame * frame); +#if defined YADIF || defined(VAAPI) +extern int init_filters(AVCodecContext *dec_ctx, void *decoder, AVFrame *frame); +extern int push_filters(AVCodecContext *dec_ctx, void *decoder, AVFrame *frame); #endif #ifdef VAAPI -void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) -{ +void CodecVideoDecode(VideoDecoder *decoder, const AVPacket *avpkt) { AVCodecContext *video_ctx = decoder->VideoCtx; if (video_ctx->codec_type == AVMEDIA_TYPE_VIDEO && CuvidTestSurfaces()) { @@ -510,7 +491,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) AVPacket pkt[1]; AVFrame *frame; - *pkt = *avpkt; // use copy + *pkt = *avpkt; // use copy ret = avcodec_send_packet(video_ctx, pkt); if (ret < 0) { return; @@ -540,7 +521,8 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) decoder->filter = 2; } } - if (frame->interlaced_frame && decoder->filter == 2 && (frame->height != 720)) { // broken ZDF sends Interlaced flag + if (frame->interlaced_frame && decoder->filter == 2 && + (frame->height != 720)) { // broken ZDF sends Interlaced flag push_filters(video_ctx, decoder->HwDecoder, frame); continue; } @@ -557,8 +539,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) #ifdef CUVID -void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) -{ +void CodecVideoDecode(VideoDecoder *decoder, const AVPacket *avpkt) { AVCodecContext *video_ctx; AVFrame *frame; int ret, ret1; @@ -567,10 +548,10 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) static uint64_t first_time = 0; const AVPacket *pkt; - next_part: +next_part: video_ctx = decoder->VideoCtx; - pkt = avpkt; // use copy + pkt = avpkt; // use copy got_frame = 0; // printf("decode packet %d\n",(GetusTicks()-first_time)/1000000); @@ -585,21 +566,23 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) if (!CuvidTestSurfaces()) usleep(1000); -// printf("send packet to decode %s %04x\n",consumed?"ok":"Full",ret1); + // printf("send packet to decode %s %04x\n",consumed?"ok":"Full",ret1); if ((ret1 == AVERROR(EAGAIN) || ret1 == AVERROR_EOF || ret1 >= 0) && CuvidTestSurfaces()) { ret = 0; while ((ret >= 0) && CuvidTestSurfaces()) { // get frames until empty snd Surfaces avail. frame = av_frame_alloc(); - ret = avcodec_receive_frame(video_ctx, frame); // get new frame - if (ret >= 0) { // one is avail. + ret = avcodec_receive_frame(video_ctx, frame); // get new frame + if (ret >= 0) { // one is avail. got_frame = 1; } else { got_frame = 0; } - // printf("got %s packet from decoder\n",got_frame?"1":"no"); - if (got_frame) { // frame completed -// printf("video frame pts %#012" PRIx64 " %dms\n",frame->pts,(int)(apts - frame->pts) / 90); + // printf("got %s packet from + // decoder\n",got_frame?"1":"no"); + if (got_frame) { // frame completed +// printf("video frame pts %#012" PRIx64 " +//%dms\n",frame->pts,(int)(apts - frame->pts) / 90); #ifdef YADIF if (decoder->filter) { if (decoder->filter == 1) { @@ -611,7 +594,8 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) decoder->filter = 2; } } - if (frame->interlaced_frame && decoder->filter == 2 && (frame->height != 720)) { // broken ZDF sends Interlaced flag + if (frame->interlaced_frame && decoder->filter == 2 && + (frame->height != 720)) { // broken ZDF sends Interlaced flag ret = push_filters(video_ctx, decoder->HwDecoder, frame); // av_frame_unref(frame); continue; @@ -634,9 +618,8 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) } if (!consumed) { - goto next_part; // try again to stuff decoder + goto next_part; // try again to stuff decoder } - } #endif @@ -645,8 +628,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt) ** ** @param decoder video decoder data */ -void CodecVideoFlushBuffers(VideoDecoder * decoder) -{ +void CodecVideoFlushBuffers(VideoDecoder *decoder) { if (decoder->VideoCtx) { avcodec_flush_buffers(decoder->VideoCtx); } @@ -666,58 +648,46 @@ typedef struct _audio_decoder_ AudioDecoder; /// /// Audio decoder structure. /// -struct _audio_decoder_ -{ - AVCodec *AudioCodec; ///< audio codec - AVCodecContext *AudioCtx; ///< audio codec context +struct _audio_decoder_ { + AVCodec *AudioCodec; ///< audio codec + AVCodecContext *AudioCtx; ///< audio codec context - char Passthrough; ///< current pass-through flags - int SampleRate; ///< current stream sample rate - int Channels; ///< current stream channels + char Passthrough; ///< current pass-through flags + int SampleRate; ///< current stream sample rate + int Channels; ///< current stream channels - int HwSampleRate; ///< hw sample rate - int HwChannels; ///< hw channels + int HwSampleRate; ///< hw sample rate + int HwChannels; ///< hw channels - AVFrame *Frame; ///< decoded audio frame buffer + AVFrame *Frame; ///< decoded audio frame buffer + SwrContext *Resample; ///< ffmpeg software resample context -#ifdef USE_SWRESAMPLE -#if LIBSWRESAMPLE_VERSION_INT < AV_VERSION_INT(0, 15, 100) - struct SwrContext *Resample; ///< ffmpeg software resample context -#else - SwrContext *Resample; ///< ffmpeg software resample context -#endif -#endif -#ifdef USE_AVRESAMPLE - AVAudioResampleContext *Resample; ///< libav software resample context -#endif + uint16_t Spdif[24576 / 2]; ///< SPDIF output buffer + int SpdifIndex; ///< index into SPDIF output buffer + int SpdifCount; ///< SPDIF repeat counter - uint16_t Spdif[24576 / 2]; ///< SPDIF output buffer - int SpdifIndex; ///< index into SPDIF output buffer - int SpdifCount; ///< SPDIF repeat counter + int64_t LastDelay; ///< last delay + struct timespec LastTime; ///< last time + int64_t LastPTS; ///< last PTS - int64_t LastDelay; ///< last delay - struct timespec LastTime; ///< last time - int64_t LastPTS; ///< last PTS - - int Drift; ///< accumulated audio drift - int DriftCorr; ///< audio drift correction value - int DriftFrac; ///< audio drift fraction for ac3 + int Drift; ///< accumulated audio drift + int DriftCorr; ///< audio drift correction value + int DriftFrac; ///< audio drift fraction for ac3 }; /// /// IEC Data type enumeration. /// -enum IEC61937 -{ - IEC61937_AC3 = 0x01, ///< AC-3 data +enum IEC61937 { + IEC61937_AC3 = 0x01, ///< AC-3 data // FIXME: more data types - IEC61937_EAC3 = 0x15, ///< E-AC-3 data + IEC61937_EAC3 = 0x15, ///< E-AC-3 data }; #ifdef USE_AUDIO_DRIFT_CORRECTION -#define CORRECT_PCM 1 ///< do PCM audio-drift correction -#define CORRECT_AC3 2 ///< do AC-3 audio-drift correction -static char CodecAudioDrift; ///< flag: enable audio-drift correction +#define CORRECT_PCM 1 ///< do PCM audio-drift correction +#define CORRECT_AC3 2 ///< do AC-3 audio-drift correction +static char CodecAudioDrift; ///< flag: enable audio-drift correction #else static const int CodecAudioDrift = 0; #endif @@ -729,15 +699,14 @@ static char CodecPassthrough; #else static const int CodecPassthrough = 0; #endif -static char CodecDownmix; ///< enable AC-3 decoder downmix +static char CodecDownmix; ///< enable AC-3 decoder downmix /** ** Allocate a new audio decoder context. ** ** @returns private decoder pointer for audio decoder. */ -AudioDecoder *CodecAudioNewDecoder(void) -{ +AudioDecoder *CodecAudioNewDecoder(void) { AudioDecoder *audio_decoder; if (!(audio_decoder = calloc(1, sizeof(*audio_decoder)))) { @@ -755,9 +724,8 @@ AudioDecoder *CodecAudioNewDecoder(void) ** ** @param decoder private audio decoder */ -void CodecAudioDelDecoder(AudioDecoder * decoder) -{ - av_frame_free(&decoder->Frame); // callee does checks +void CodecAudioDelDecoder(AudioDecoder *decoder) { + av_frame_free(&decoder->Frame); // callee does checks free(decoder); } @@ -767,8 +735,7 @@ void CodecAudioDelDecoder(AudioDecoder * decoder) ** @param audio_decoder private audio decoder ** @param codec_id audio codec id */ -void CodecAudioOpen(AudioDecoder * audio_decoder, int codec_id) -{ +void CodecAudioOpen(AudioDecoder *audio_decoder, int codec_id) { AVCodec *audio_codec; Debug(3, "codec: using audio codec ID %#06x (%s)\n", codec_id, avcodec_get_name(codec_id)); @@ -817,20 +784,12 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, int codec_id) ** ** @param audio_decoder private audio decoder */ -void CodecAudioClose(AudioDecoder * audio_decoder) -{ +void CodecAudioClose(AudioDecoder *audio_decoder) { // FIXME: output any buffered data -#ifdef USE_SWRESAMPLE if (audio_decoder->Resample) { swr_free(&audio_decoder->Resample); } -#endif -#ifdef USE_AVRESAMPLE - if (audio_decoder->Resample) { - avresample_free(&audio_decoder->Resample); - } -#endif if (audio_decoder->AudioCtx) { pthread_mutex_lock(&CodecLockMutex); avcodec_close(audio_decoder->AudioCtx); @@ -844,8 +803,7 @@ void CodecAudioClose(AudioDecoder * audio_decoder) ** ** @param mask enable mask (PCM, AC-3) */ -void CodecSetAudioDrift(int mask) -{ +void CodecSetAudioDrift(int mask) { #ifdef USE_AUDIO_DRIFT_CORRECTION CodecAudioDrift = mask & (CORRECT_PCM | CORRECT_AC3); #endif @@ -857,8 +815,7 @@ void CodecSetAudioDrift(int mask) ** ** @param mask enable mask (PCM, AC-3, E-AC-3) */ -void CodecSetAudioPassthrough(int mask) -{ +void CodecSetAudioPassthrough(int mask) { #ifdef USE_PASSTHROUGH CodecPassthrough = mask & (CodecPCM | CodecAC3 | CodecEAC3); #endif @@ -870,8 +827,7 @@ void CodecSetAudioPassthrough(int mask) ** ** @param onoff enable/disable downmix. */ -void CodecSetAudioDownmix(int onoff) -{ +void CodecSetAudioDownmix(int onoff) { if (onoff == -1) { CodecDownmix ^= 1; return; @@ -882,16 +838,15 @@ void CodecSetAudioDownmix(int onoff) /** ** Reorder audio frame. ** -** ffmpeg L R C Ls Rs -> alsa L R Ls Rs C -** ffmpeg L R C LFE Ls Rs -> alsa L R Ls Rs C LFE -** ffmpeg L R C LFE Ls Rs Rl Rr -> alsa L R Ls Rs C LFE Rl Rr +** ffmpeg L R C Ls Rs -> alsa L R Ls Rs C +** ffmpeg L R C LFE Ls Rs -> alsa L R Ls Rs C LFE +** ffmpeg L R C LFE Ls Rs Rl Rr -> alsa L R Ls Rs C LFE Rl Rr ** -** @param buf[IN,OUT] sample buffer -** @param size size of sample buffer in bytes -** @param channels number of channels interleaved in sample buffer +** @param buf[IN,OUT] sample buffer +** @param size size of sample buffer in bytes +** @param channels number of channels interleaved in sample buffer */ -static void CodecReorderAudioFrame(int16_t * buf, int size, int channels) -{ +static void CodecReorderAudioFrame(int16_t *buf, int size, int channels) { int i; int c; int ls; @@ -945,17 +900,16 @@ static void CodecReorderAudioFrame(int16_t * buf, int size, int channels) ** @param audio_decoder audio decoder data ** @param[out] passthrough pass-through output */ -static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, int *passthrough) -{ +static int CodecAudioUpdateHelper(AudioDecoder *audio_decoder, int *passthrough) { const AVCodecContext *audio_ctx; int err; audio_ctx = audio_decoder->AudioCtx; Debug(3, "codec/audio: format change %s %dHz *%d channels%s%s%s%s%s\n", - av_get_sample_fmt_name(audio_ctx->sample_fmt), audio_ctx->sample_rate, audio_ctx->channels, - CodecPassthrough & CodecPCM ? " PCM" : "", CodecPassthrough & CodecMPA ? " MPA" : "", - CodecPassthrough & CodecAC3 ? " AC-3" : "", CodecPassthrough & CodecEAC3 ? " E-AC-3" : "", - CodecPassthrough ? " pass-through" : ""); + av_get_sample_fmt_name(audio_ctx->sample_fmt), audio_ctx->sample_rate, audio_ctx->channels, + CodecPassthrough & CodecPCM ? " PCM" : "", CodecPassthrough & CodecMPA ? " MPA" : "", + CodecPassthrough & CodecAC3 ? " AC-3" : "", CodecPassthrough & CodecEAC3 ? " E-AC-3" : "", + CodecPassthrough ? " pass-through" : ""); *passthrough = 0; audio_decoder->SampleRate = audio_ctx->sample_rate; @@ -965,14 +919,14 @@ static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, int *passthrough audio_decoder->Passthrough = CodecPassthrough; // SPDIF/HDMI pass-through - if ((CodecPassthrough & CodecAC3 && audio_ctx->codec_id == AV_CODEC_ID_AC3) - || (CodecPassthrough & CodecEAC3 && audio_ctx->codec_id == AV_CODEC_ID_EAC3)) { + if ((CodecPassthrough & CodecAC3 && audio_ctx->codec_id == AV_CODEC_ID_AC3) || + (CodecPassthrough & CodecEAC3 && audio_ctx->codec_id == AV_CODEC_ID_EAC3)) { if (audio_ctx->codec_id == AV_CODEC_ID_EAC3) { // E-AC-3 over HDMI some receivers need HBR audio_decoder->HwSampleRate *= 4; } audio_decoder->HwChannels = 2; - audio_decoder->SpdifIndex = 0; // reset buffer + audio_decoder->SpdifIndex = 0; // reset buffer audio_decoder->SpdifCount = 0; *passthrough = 1; } @@ -981,8 +935,8 @@ static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, int *passthrough // try E-AC-3 none HBR audio_decoder->HwSampleRate /= 4; - if (audio_ctx->codec_id != AV_CODEC_ID_EAC3 - || (err = AudioSetup(&audio_decoder->HwSampleRate, &audio_decoder->HwChannels, *passthrough))) { + if (audio_ctx->codec_id != AV_CODEC_ID_EAC3 || + (err = AudioSetup(&audio_decoder->HwSampleRate, &audio_decoder->HwChannels, *passthrough))) { Debug(3, "codec/audio: audio setup error\n"); // FIXME: handle errors @@ -993,8 +947,8 @@ static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, int *passthrough } Debug(3, "codec/audio: resample %s %dHz *%d -> %s %dHz *%d\n", av_get_sample_fmt_name(audio_ctx->sample_fmt), - audio_ctx->sample_rate, audio_ctx->channels, av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), - audio_decoder->HwSampleRate, audio_decoder->HwChannels); + audio_ctx->sample_rate, audio_ctx->channels, av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), + audio_decoder->HwSampleRate, audio_decoder->HwChannels); return 0; } @@ -1003,10 +957,9 @@ static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, int *passthrough ** Audio pass-through decoder helper. ** ** @param audio_decoder audio decoder data -** @param avpkt undecoded audio packet +** @param avpkt undecoded audio packet */ -static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPacket * avpkt) -{ +static int CodecAudioPassthroughHelper(AudioDecoder *audio_decoder, const AVPacket *avpkt) { #ifdef USE_PASSTHROUGH const AVCodecContext *audio_ctx; @@ -1025,14 +978,13 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac if (CodecAudioDrift & CORRECT_AC3) { int x; - x = (audio_decoder->DriftFrac + - (audio_decoder->DriftCorr * spdif_sz)) / (10 * audio_decoder->HwSampleRate * 100); - audio_decoder->DriftFrac = - (audio_decoder->DriftFrac + - (audio_decoder->DriftCorr * spdif_sz)) % (10 * audio_decoder->HwSampleRate * 100); + x = (audio_decoder->DriftFrac + (audio_decoder->DriftCorr * spdif_sz)) / + (10 * audio_decoder->HwSampleRate * 100); + audio_decoder->DriftFrac = (audio_decoder->DriftFrac + (audio_decoder->DriftCorr * spdif_sz)) % + (10 * audio_decoder->HwSampleRate * 100); // round to word border x *= audio_decoder->HwChannels * 4; - if (x < -64) { // limit correction + if (x < -64) { // limit correction x = -64; } else if (x > 64) { x = 64; @@ -1047,7 +999,7 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac Error(_("codec/audio: decoded data smaller than encoded\n")); return -1; } - spdif[0] = htole16(0xF872); // iec 61937 sync word + spdif[0] = htole16(0xF872); // iec 61937 sync word spdif[1] = htole16(0x4E1F); spdif[2] = htole16(IEC61937_AC3 | (avpkt->data[5] & 0x07) << 8); spdif[3] = htole16(avpkt->size * 8); @@ -1068,7 +1020,7 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac // build SPDIF header and append A52 audio to it // avpkt is the original data spdif = audio_decoder->Spdif; - spdif_sz = 24576; // 4 * 6144 + spdif_sz = 24576; // 4 * 6144 if (audio_decoder->HwSampleRate == 48000) { spdif_sz = 6144; } @@ -1078,8 +1030,8 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac } // check if we must pack multiple packets repeat = 1; - if ((avpkt->data[4] & 0xc0) != 0xc0) { // fscod - static const uint8_t eac3_repeat[4] = { 6, 3, 2, 1 }; + if ((avpkt->data[4] & 0xc0) != 0xc0) { // fscod + static const uint8_t eac3_repeat[4] = {6, 3, 2, 1}; // fscod2 repeat = eac3_repeat[(avpkt->data[4] & 0x30) >> 4]; @@ -1095,7 +1047,7 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac return 1; } - spdif[0] = htole16(0xF872); // iec 61937 sync word + spdif[0] = htole16(0xF872); // iec 61937 sync word spdif[1] = htole16(0x4E1F); spdif[2] = htole16(IEC61937_EAC3); spdif[3] = htole16(audio_decoder->SpdifIndex * 8); @@ -1112,16 +1064,13 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac return 0; } -#if defined(USE_SWRESAMPLE) || defined(USE_AVRESAMPLE) - /** ** Set/update audio pts clock. ** ** @param audio_decoder audio decoder data -** @param pts presentation timestamp +** @param pts presentation timestamp */ -static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) -{ +static void CodecAudioSetClock(AudioDecoder *audio_decoder, int64_t pts) { #ifdef USE_AUDIO_DRIFT_CORRECTION struct timespec nowtime; int64_t delay; @@ -1152,8 +1101,8 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) return; } - tim_diff = (nowtime.tv_sec - audio_decoder->LastTime.tv_sec) - * 1000 * 1000 * 1000 + (nowtime.tv_nsec - audio_decoder->LastTime.tv_nsec); + tim_diff = (nowtime.tv_sec - audio_decoder->LastTime.tv_sec) * 1000 * 1000 * 1000 + + (nowtime.tv_nsec - audio_decoder->LastTime.tv_nsec); drift = (tim_diff * 90) / (1000 * 1000) - pts_diff + delay - audio_decoder->LastDelay; @@ -1165,7 +1114,7 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) if (0) { Debug(3, "codec/audio: interval P:%5" PRId64 "ms T:%5" PRId64 "ms D:%4" PRId64 "ms %f %d\n", pts_diff / 90, - tim_diff / (1000 * 1000), delay / 90, drift / 90.0, audio_decoder->DriftCorr); + tim_diff / (1000 * 1000), delay / 90, drift / 90.0, audio_decoder->DriftCorr); } // underruns and av_resample have the same time :((( if (abs(drift) > 10 * 90) { @@ -1173,7 +1122,7 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) Debug(3, "codec/audio: drift(%6d) %3dms reset\n", audio_decoder->DriftCorr, drift / 90); audio_decoder->LastDelay = 0; #ifdef DEBUG - corr = 0; // keep gcc happy + corr = 0; // keep gcc happy #endif } else { @@ -1181,21 +1130,19 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) audio_decoder->Drift = drift; corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000); // SPDIF/HDMI passthrough - if ((CodecAudioDrift & CORRECT_AC3) && (!(CodecPassthrough & CodecAC3) - || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3) - && (!(CodecPassthrough & CodecEAC3) - || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) { + if ((CodecAudioDrift & CORRECT_AC3) && + (!(CodecPassthrough & CodecAC3) || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3) && + (!(CodecPassthrough & CodecEAC3) || audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) { audio_decoder->DriftCorr = -corr; } - if (audio_decoder->DriftCorr < -20000) { // limit correction + if (audio_decoder->DriftCorr < -20000) { // limit correction audio_decoder->DriftCorr = -20000; } else if (audio_decoder->DriftCorr > 20000) { audio_decoder->DriftCorr = 20000; } } -#ifdef USE_SWRESAMPLE if (audio_decoder->Resample && audio_decoder->DriftCorr) { int distance; @@ -1209,17 +1156,6 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) Debug(3, "codec/audio: swr_set_compensation failed\n"); } } -#endif -#ifdef USE_AVRESAMPLE - if (audio_decoder->Resample && audio_decoder->DriftCorr) { - int distance; - - distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000); - if (avresample_set_compensation(audio_decoder->Resample, audio_decoder->DriftCorr / 10, distance)) { - Debug(3, "codec/audio: swr_set_compensation failed\n"); - } - } -#endif if (1) { static int c; @@ -1237,8 +1173,7 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts) ** ** @param audio_decoder audio decoder data */ -static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder) -{ +static void CodecAudioUpdateFormat(AudioDecoder *audio_decoder) { int passthrough; const AVCodecContext *audio_ctx; @@ -1246,51 +1181,28 @@ static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder) // FIXME: handle swresample format conversions. return; } - if (passthrough) { // pass-through no conversion allowed + if (passthrough) { // pass-through no conversion allowed return; } audio_ctx = audio_decoder->AudioCtx; #ifdef DEBUG - if (audio_ctx->sample_fmt == AV_SAMPLE_FMT_S16 && audio_ctx->sample_rate == audio_decoder->HwSampleRate - && !CodecAudioDrift) { + if (audio_ctx->sample_fmt == AV_SAMPLE_FMT_S16 && audio_ctx->sample_rate == audio_decoder->HwSampleRate && + !CodecAudioDrift) { // FIXME: use Resample only, when it is needed! fprintf(stderr, "no resample needed\n"); } #endif -#ifdef USE_SWRESAMPLE - audio_decoder->Resample = - swr_alloc_set_opts(audio_decoder->Resample, audio_ctx->channel_layout, AV_SAMPLE_FMT_S16, - audio_decoder->HwSampleRate, audio_ctx->channel_layout, audio_ctx->sample_fmt, audio_ctx->sample_rate, 0, - NULL); + audio_decoder->Resample = swr_alloc_set_opts(audio_decoder->Resample, audio_ctx->channel_layout, AV_SAMPLE_FMT_S16, + audio_decoder->HwSampleRate, audio_ctx->channel_layout, + audio_ctx->sample_fmt, audio_ctx->sample_rate, 0, NULL); if (audio_decoder->Resample) { swr_init(audio_decoder->Resample); } else { Error(_("codec/audio: can't setup resample\n")); } -#endif -#ifdef USE_AVRESAMPLE - if (!(audio_decoder->Resample = avresample_alloc_context())) { - Error(_("codec/audio: can't setup resample\n")); - return; - } - - av_opt_set_int(audio_decoder->Resample, "in_channel_layout", audio_ctx->channel_layout, 0); - av_opt_set_int(audio_decoder->Resample, "in_sample_fmt", audio_ctx->sample_fmt, 0); - av_opt_set_int(audio_decoder->Resample, "in_sample_rate", audio_ctx->sample_rate, 0); - av_opt_set_int(audio_decoder->Resample, "out_channel_layout", audio_ctx->channel_layout, 0); - av_opt_set_int(audio_decoder->Resample, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); - av_opt_set_int(audio_decoder->Resample, "out_sample_rate", audio_decoder->HwSampleRate, 0); - - if (avresample_open(audio_decoder->Resample)) { - avresample_free(&audio_decoder->Resample); - audio_decoder->Resample = NULL; - Error(_("codec/audio: can't open resample\n")); - return; - } -#endif } /** @@ -1301,11 +1213,10 @@ static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder) ** @note the caller has not aligned avpkt and not cleared the end. ** ** @param audio_decoder audio decoder data -** @param avpkt audio packet +** @param avpkt audio packet */ -void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) -{ +void CodecAudioDecode(AudioDecoder *audio_decoder, const AVPacket *avpkt) { AVCodecContext *audio_ctx = audio_decoder->AudioCtx; if (audio_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { @@ -1314,7 +1225,7 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) AVFrame *frame = audio_decoder->Frame; av_frame_unref(frame); - *pkt = *avpkt; // use copy + *pkt = *avpkt; // use copy ret = avcodec_send_packet(audio_ctx, pkt); if (ret < 0) { Debug(3, "codec: sending audio packet failed"); @@ -1328,16 +1239,17 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) if (ret >= 0) { // update audio clock - if (avpkt->pts != (int64_t) AV_NOPTS_VALUE) { + if (avpkt->pts != (int64_t)AV_NOPTS_VALUE) { CodecAudioSetClock(audio_decoder, avpkt->pts); } // format change - if (audio_decoder->Passthrough != CodecPassthrough || audio_decoder->SampleRate != audio_ctx->sample_rate - || audio_decoder->Channels != audio_ctx->channels) { + if (audio_decoder->Passthrough != CodecPassthrough || + audio_decoder->SampleRate != audio_ctx->sample_rate || + audio_decoder->Channels != audio_ctx->channels) { CodecAudioUpdateFormat(audio_decoder); } if (!audio_decoder->HwSampleRate || !audio_decoder->HwChannels) { - return; // unsupported sample format + return; // unsupported sample format } if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) { return; @@ -1347,13 +1259,12 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) uint8_t *out[1]; out[0] = outbuf; - ret = - swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels), - (const uint8_t **)frame->extended_data, frame->nb_samples); + ret = swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels), + (const uint8_t **)frame->extended_data, frame->nb_samples); if (ret > 0) { if (!(audio_decoder->Passthrough & CodecPCM)) { - CodecReorderAudioFrame((int16_t *) outbuf, ret * 2 * audio_decoder->HwChannels, - audio_decoder->HwChannels); + CodecReorderAudioFrame((int16_t *)outbuf, ret * 2 * audio_decoder->HwChannels, + audio_decoder->HwChannels); } AudioEnqueue(outbuf, ret * 2 * audio_decoder->HwChannels); } @@ -1363,18 +1274,12 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt) } } -#endif - /** ** Flush the audio decoder. ** ** @param decoder audio decoder data */ -void CodecAudioFlushBuffers(AudioDecoder * decoder) -{ - - avcodec_flush_buffers(decoder->AudioCtx); -} +void CodecAudioFlushBuffers(AudioDecoder *decoder) { avcodec_flush_buffers(decoder->AudioCtx); } //---------------------------------------------------------------------------- // Codec @@ -1383,18 +1288,13 @@ void CodecAudioFlushBuffers(AudioDecoder * decoder) /** ** Empty log callback */ -static void CodecNoopCallback( __attribute__((unused)) - void *ptr, __attribute__((unused)) - int level, __attribute__((unused)) - const char *fmt, __attribute__((unused)) va_list vl) -{ -} +static void CodecNoopCallback(__attribute__((unused)) void *ptr, __attribute__((unused)) int level, + __attribute__((unused)) const char *fmt, __attribute__((unused)) va_list vl) {} /** ** Codec init */ -void CodecInit(void) -{ +void CodecInit(void) { pthread_mutex_init(&CodecLockMutex, NULL); #ifndef DEBUG // disable display ffmpeg error messages @@ -1407,7 +1307,4 @@ void CodecInit(void) /** ** Codec exit. */ -void CodecExit(void) -{ - pthread_mutex_destroy(&CodecLockMutex); -} +void CodecExit(void) { pthread_mutex_destroy(&CodecLockMutex); } diff --git a/codec.h b/codec.h index 2a28c8f..e5b2003 100644 --- a/codec.h +++ b/codec.h @@ -27,16 +27,15 @@ // Defines //---------------------------------------------------------------------------- -#define CodecPCM 0x01 ///< PCM bit mask -#define CodecMPA 0x02 ///< MPA bit mask (planned) -#define CodecAC3 0x04 ///< AC-3 bit mask -#define CodecEAC3 0x08 ///< E-AC-3 bit mask -#define CodecDTS 0x10 ///< DTS bit mask (planned) +#define CodecPCM 0x01 ///< PCM bit mask +#define CodecMPA 0x02 ///< MPA bit mask (planned) +#define CodecAC3 0x04 ///< AC-3 bit mask +#define CodecEAC3 0x08 ///< E-AC-3 bit mask +#define CodecDTS 0x10 ///< DTS bit mask (planned) #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 -enum HWAccelID -{ +enum HWAccelID { HWACCEL_NONE = 0, HWACCEL_AUTO, HWACCEL_VDPAU, @@ -53,19 +52,18 @@ extern AVBufferRef *hw_device_ctx; /// /// Video decoder structure. /// -struct _video_decoder_ -{ - VideoHwDecoder *HwDecoder; ///< video hardware decoder +struct _video_decoder_ { + VideoHwDecoder *HwDecoder; ///< video hardware decoder - int GetFormatDone; ///< flag get format called! - AVCodec *VideoCodec; ///< video codec - AVCodecContext *VideoCtx; ///< video codec context + int GetFormatDone; ///< flag get format called! + AVCodec *VideoCodec; ///< video codec + AVCodecContext *VideoCtx; ///< video codec context // #ifdef FFMPEG_WORKAROUND_ARTIFACTS - int FirstKeyFrame; ///< flag first frame + int FirstKeyFrame; ///< flag first frame // #endif - // AVFrame *Frame; ///< decoded video frame + // AVFrame *Frame; ///< decoded video frame - int filter; // flag for deint filter + int filter; // flag for deint filter /* hwaccel options */ enum HWAccelID hwaccel_id; @@ -75,9 +73,9 @@ struct _video_decoder_ /* hwaccel context */ enum HWAccelID active_hwaccel_id; void *hwaccel_ctx; - void (*hwaccel_uninit)(AVCodecContext * s); - int (*hwaccel_get_buffer)(AVCodecContext * s, AVFrame * frame, int flags); - int (*hwaccel_retrieve_data)(AVCodecContext * s, AVFrame * frame); + void (*hwaccel_uninit)(AVCodecContext *s); + int (*hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags); + int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame); enum AVPixelFormat hwaccel_pix_fmt; enum AVPixelFormat hwaccel_retrieved_pix_fmt; AVBufferRef *hw_frames_ctx; @@ -87,7 +85,6 @@ struct _video_decoder_ double cached_hdr_peak; // From VO struct mp_hwdec_devices *hwdec_devs; - }; //---------------------------------------------------------------------------- diff --git a/common.h b/common.h index ceb935e..35f4f54 100644 --- a/common.h +++ b/common.h @@ -19,17 +19,17 @@ #ifndef MPLAYER_GL_COMMON_H #define MPLAYER_GL_COMMON_H -#include -#include #include +#include +#include #if 0 -#include "config.h" #include "common/msg.h" +#include "config.h" #include "misc/bstr.h" -#include "video/out/vo.h" #include "video/csputils.h" #include "video/mp_image.h" +#include "video/out/vo.h" #endif #if HAVE_GL_COCOA @@ -53,9 +53,8 @@ struct GL; typedef struct GL GL; -enum -{ - MPGL_CAP_ROW_LENGTH = (1 << 4), // GL_[UN]PACK_ROW_LENGTH +enum { + MPGL_CAP_ROW_LENGTH = (1 << 4), // GL_[UN]PACK_ROW_LENGTH MPGL_CAP_FB = (1 << 5), MPGL_CAP_VAO = (1 << 6), MPGL_CAP_TEX_RG = (1 << 10), // GL_ARB_texture_rg / GL 3.x @@ -69,157 +68,158 @@ enum MPGL_CAP_ARB_FLOAT = (1 << 19), // GL_ARB_texture_float MPGL_CAP_EXT_CR_HFLOAT = (1 << 20), // GL_EXT_color_buffer_half_float - MPGL_CAP_SW = (1 << 30), // indirect or sw renderer + MPGL_CAP_SW = (1 << 30), // indirect or sw renderer }; // E.g. 310 means 3.1 // Code doesn't have to use the macros; they are for convenience only. -#define MPGL_VER(major, minor) (((major) * 100) + (minor) * 10) +#define MPGL_VER(major, minor) (((major)*100) + (minor)*10) #define MPGL_VER_GET_MAJOR(ver) ((unsigned)(ver) / 100) #define MPGL_VER_GET_MINOR(ver) ((unsigned)(ver) % 100 / 10) #define MPGL_VER_P(ver) MPGL_VER_GET_MAJOR(ver), MPGL_VER_GET_MINOR(ver) -void mpgl_load_functions(GL * gl, void *(*getProcAddress)(const GLubyte *), const char *ext2, struct mp_log *log); -void mpgl_load_functions2(GL * gl, void *(*get_fn)(void *ctx, const char *n), void *fn_ctx, const char *ext2, - struct mp_log *log); +void mpgl_load_functions(GL *gl, void *(*getProcAddress)(const GLubyte *), const char *ext2, struct mp_log *log); +void mpgl_load_functions2(GL *gl, void *(*get_fn)(void *ctx, const char *n), void *fn_ctx, const char *ext2, + struct mp_log *log); -typedef void (GLAPIENTRY * MP_GLDEBUGPROC) (GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar *, const void *); +typedef void(GLAPIENTRY *MP_GLDEBUGPROC)(GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar *, const void *); // function pointers loaded from the OpenGL library -struct GL -{ - int version; // MPGL_VER() mangled (e.g. 210 for 2.1) - int es; // es version (e.g. 300), 0 for desktop GL - int glsl_version; // e.g. 130 for GLSL 1.30 - char *extensions; // Equivalent to GL_EXTENSIONS - int mpgl_caps; // Bitfield of MPGL_CAP_* constants - bool debug_context; // use of e.g. GLX_CONTEXT_DEBUG_BIT_ARB - GLuint main_fb; // framebuffer to render to (normally 0) +struct GL { + int version; // MPGL_VER() mangled (e.g. 210 for 2.1) + int es; // es version (e.g. 300), 0 for desktop GL + int glsl_version; // e.g. 130 for GLSL 1.30 + char *extensions; // Equivalent to GL_EXTENSIONS + int mpgl_caps; // Bitfield of MPGL_CAP_* constants + bool debug_context; // use of e.g. GLX_CONTEXT_DEBUG_BIT_ARB + GLuint main_fb; // framebuffer to render to (normally 0) - void (GLAPIENTRY * Viewport) (GLint, GLint, GLsizei, GLsizei); - void (GLAPIENTRY * Clear) (GLbitfield); - void (GLAPIENTRY * GenTextures) (GLsizei, GLuint *); - void (GLAPIENTRY * DeleteTextures) (GLsizei, const GLuint *); - void (GLAPIENTRY * ClearColor) (GLclampf, GLclampf, GLclampf, GLclampf); - void (GLAPIENTRY * Enable) (GLenum); - void (GLAPIENTRY * Disable) (GLenum); - const GLubyte *(GLAPIENTRY * GetString) (GLenum); - void (GLAPIENTRY * BlendFuncSeparate) (GLenum, GLenum, GLenum, GLenum); - void (GLAPIENTRY * Flush) (void); - void (GLAPIENTRY * Finish) (void); - void (GLAPIENTRY * PixelStorei) (GLenum, GLint); - void (GLAPIENTRY * TexImage1D) (GLenum, GLint, GLint, GLsizei, GLint, GLenum, GLenum, const GLvoid *); - void (GLAPIENTRY * TexImage2D) (GLenum, GLint, GLint, GLsizei, GLsizei, GLint, GLenum, GLenum, const GLvoid *); - void (GLAPIENTRY * TexSubImage2D) (GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, const GLvoid *); - void (GLAPIENTRY * TexParameteri) (GLenum, GLenum, GLint); - void (GLAPIENTRY * GetIntegerv) (GLenum, GLint *); - void (GLAPIENTRY * ReadPixels) (GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, GLvoid *); - void (GLAPIENTRY * ReadBuffer) (GLenum); - void (GLAPIENTRY * DrawArrays) (GLenum, GLint, GLsizei); - GLenum(GLAPIENTRY * GetError) (void); - void (GLAPIENTRY * GetTexLevelParameteriv) (GLenum, GLint, GLenum, GLint *); - void (GLAPIENTRY * Scissor) (GLint, GLint, GLsizei, GLsizei); + void(GLAPIENTRY *Viewport)(GLint, GLint, GLsizei, GLsizei); + void(GLAPIENTRY *Clear)(GLbitfield); + void(GLAPIENTRY *GenTextures)(GLsizei, GLuint *); + void(GLAPIENTRY *DeleteTextures)(GLsizei, const GLuint *); + void(GLAPIENTRY *ClearColor)(GLclampf, GLclampf, GLclampf, GLclampf); + void(GLAPIENTRY *Enable)(GLenum); + void(GLAPIENTRY *Disable)(GLenum); + const GLubyte *(GLAPIENTRY *GetString)(GLenum); + void(GLAPIENTRY *BlendFuncSeparate)(GLenum, GLenum, GLenum, GLenum); + void(GLAPIENTRY *Flush)(void); + void(GLAPIENTRY *Finish)(void); + void(GLAPIENTRY *PixelStorei)(GLenum, GLint); + void(GLAPIENTRY *TexImage1D)(GLenum, GLint, GLint, GLsizei, GLint, GLenum, GLenum, const GLvoid *); + void(GLAPIENTRY *TexImage2D)(GLenum, GLint, GLint, GLsizei, GLsizei, GLint, GLenum, GLenum, const GLvoid *); + void(GLAPIENTRY *TexSubImage2D)(GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, const GLvoid *); + void(GLAPIENTRY *TexParameteri)(GLenum, GLenum, GLint); + void(GLAPIENTRY *GetIntegerv)(GLenum, GLint *); + void(GLAPIENTRY *ReadPixels)(GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, GLvoid *); + void(GLAPIENTRY *ReadBuffer)(GLenum); + void(GLAPIENTRY *DrawArrays)(GLenum, GLint, GLsizei); + GLenum(GLAPIENTRY *GetError)(void); + void(GLAPIENTRY *GetTexLevelParameteriv)(GLenum, GLint, GLenum, GLint *); + void(GLAPIENTRY *Scissor)(GLint, GLint, GLsizei, GLsizei); - void (GLAPIENTRY * GenBuffers) (GLsizei, GLuint *); - void (GLAPIENTRY * DeleteBuffers) (GLsizei, const GLuint *); - void (GLAPIENTRY * BindBuffer) (GLenum, GLuint); - void (GLAPIENTRY * BindBufferBase) (GLenum, GLuint, GLuint); - GLvoid *(GLAPIENTRY * MapBufferRange) (GLenum, GLintptr, GLsizeiptr, GLbitfield); - GLboolean(GLAPIENTRY * UnmapBuffer) (GLenum); - void (GLAPIENTRY * BufferData) (GLenum, intptr_t, const GLvoid *, GLenum); - void (GLAPIENTRY * ActiveTexture) (GLenum); - void (GLAPIENTRY * BindTexture) (GLenum, GLuint); - int (GLAPIENTRY * SwapInterval) (int); - void (GLAPIENTRY * TexImage3D) (GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, - const GLvoid *); + void(GLAPIENTRY *GenBuffers)(GLsizei, GLuint *); + void(GLAPIENTRY *DeleteBuffers)(GLsizei, const GLuint *); + void(GLAPIENTRY *BindBuffer)(GLenum, GLuint); + void(GLAPIENTRY *BindBufferBase)(GLenum, GLuint, GLuint); + GLvoid *(GLAPIENTRY *MapBufferRange)(GLenum, GLintptr, GLsizeiptr, GLbitfield); + GLboolean(GLAPIENTRY *UnmapBuffer)(GLenum); + void(GLAPIENTRY *BufferData)(GLenum, intptr_t, const GLvoid *, GLenum); + void(GLAPIENTRY *ActiveTexture)(GLenum); + void(GLAPIENTRY *BindTexture)(GLenum, GLuint); + int(GLAPIENTRY *SwapInterval)(int); + void(GLAPIENTRY *TexImage3D)(GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, + const GLvoid *); - void (GLAPIENTRY * GenVertexArrays) (GLsizei, GLuint *); - void (GLAPIENTRY * BindVertexArray) (GLuint); - GLint(GLAPIENTRY * GetAttribLocation) (GLuint, const GLchar *); - void (GLAPIENTRY * EnableVertexAttribArray) (GLuint); - void (GLAPIENTRY * DisableVertexAttribArray) (GLuint); - void (GLAPIENTRY * VertexAttribPointer) (GLuint, GLint, GLenum, GLboolean, GLsizei, const GLvoid *); - void (GLAPIENTRY * DeleteVertexArrays) (GLsizei, const GLuint *); - void (GLAPIENTRY * UseProgram) (GLuint); - GLint(GLAPIENTRY * GetUniformLocation) (GLuint, const GLchar *); - void (GLAPIENTRY * CompileShader) (GLuint); - GLuint(GLAPIENTRY * CreateProgram) (void); - GLuint(GLAPIENTRY * CreateShader) (GLenum); - void (GLAPIENTRY * ShaderSource) (GLuint, GLsizei, const GLchar **, const GLint *); - void (GLAPIENTRY * LinkProgram) (GLuint); - void (GLAPIENTRY * AttachShader) (GLuint, GLuint); - void (GLAPIENTRY * DeleteShader) (GLuint); - void (GLAPIENTRY * DeleteProgram) (GLuint); - void (GLAPIENTRY * GetShaderInfoLog) (GLuint, GLsizei, GLsizei *, GLchar *); - void (GLAPIENTRY * GetShaderiv) (GLuint, GLenum, GLint *); - void (GLAPIENTRY * GetProgramInfoLog) (GLuint, GLsizei, GLsizei *, GLchar *); - void (GLAPIENTRY * GetProgramiv) (GLenum, GLenum, GLint *); - const GLubyte *(GLAPIENTRY * GetStringi) (GLenum, GLuint); - void (GLAPIENTRY * BindAttribLocation) (GLuint, GLuint, const GLchar *); - void (GLAPIENTRY * BindFramebuffer) (GLenum, GLuint); - void (GLAPIENTRY * GenFramebuffers) (GLsizei, GLuint *); - void (GLAPIENTRY * DeleteFramebuffers) (GLsizei, const GLuint *); - GLenum(GLAPIENTRY * CheckFramebufferStatus) (GLenum); - void (GLAPIENTRY * FramebufferTexture2D) (GLenum, GLenum, GLenum, GLuint, GLint); - void (GLAPIENTRY * BlitFramebuffer) (GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLbitfield, GLenum); - void (GLAPIENTRY * GetFramebufferAttachmentParameteriv) (GLenum, GLenum, GLenum, GLint *); + void(GLAPIENTRY *GenVertexArrays)(GLsizei, GLuint *); + void(GLAPIENTRY *BindVertexArray)(GLuint); + GLint(GLAPIENTRY *GetAttribLocation)(GLuint, const GLchar *); + void(GLAPIENTRY *EnableVertexAttribArray)(GLuint); + void(GLAPIENTRY *DisableVertexAttribArray)(GLuint); + void(GLAPIENTRY *VertexAttribPointer)(GLuint, GLint, GLenum, GLboolean, GLsizei, const GLvoid *); + void(GLAPIENTRY *DeleteVertexArrays)(GLsizei, const GLuint *); + void(GLAPIENTRY *UseProgram)(GLuint); + GLint(GLAPIENTRY *GetUniformLocation)(GLuint, const GLchar *); + void(GLAPIENTRY *CompileShader)(GLuint); + GLuint(GLAPIENTRY *CreateProgram)(void); + GLuint(GLAPIENTRY *CreateShader)(GLenum); + void(GLAPIENTRY *ShaderSource)(GLuint, GLsizei, const GLchar **, const GLint *); + void(GLAPIENTRY *LinkProgram)(GLuint); + void(GLAPIENTRY *AttachShader)(GLuint, GLuint); + void(GLAPIENTRY *DeleteShader)(GLuint); + void(GLAPIENTRY *DeleteProgram)(GLuint); + void(GLAPIENTRY *GetShaderInfoLog)(GLuint, GLsizei, GLsizei *, GLchar *); + void(GLAPIENTRY *GetShaderiv)(GLuint, GLenum, GLint *); + void(GLAPIENTRY *GetProgramInfoLog)(GLuint, GLsizei, GLsizei *, GLchar *); + void(GLAPIENTRY *GetProgramiv)(GLenum, GLenum, GLint *); + const GLubyte *(GLAPIENTRY *GetStringi)(GLenum, GLuint); + void(GLAPIENTRY *BindAttribLocation)(GLuint, GLuint, const GLchar *); + void(GLAPIENTRY *BindFramebuffer)(GLenum, GLuint); + void(GLAPIENTRY *GenFramebuffers)(GLsizei, GLuint *); + void(GLAPIENTRY *DeleteFramebuffers)(GLsizei, const GLuint *); + GLenum(GLAPIENTRY *CheckFramebufferStatus)(GLenum); + void(GLAPIENTRY *FramebufferTexture2D)(GLenum, GLenum, GLenum, GLuint, GLint); + void(GLAPIENTRY *BlitFramebuffer)(GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLbitfield, GLenum); + void(GLAPIENTRY *GetFramebufferAttachmentParameteriv)(GLenum, GLenum, GLenum, GLint *); - void (GLAPIENTRY * Uniform1f) (GLint, GLfloat); - void (GLAPIENTRY * Uniform2f) (GLint, GLfloat, GLfloat); - void (GLAPIENTRY * Uniform3f) (GLint, GLfloat, GLfloat, GLfloat); - void (GLAPIENTRY * Uniform4f) (GLint, GLfloat, GLfloat, GLfloat, GLfloat); - void (GLAPIENTRY * Uniform1i) (GLint, GLint); - void (GLAPIENTRY * UniformMatrix2fv) (GLint, GLsizei, GLboolean, const GLfloat *); - void (GLAPIENTRY * UniformMatrix3fv) (GLint, GLsizei, GLboolean, const GLfloat *); + void(GLAPIENTRY *Uniform1f)(GLint, GLfloat); + void(GLAPIENTRY *Uniform2f)(GLint, GLfloat, GLfloat); + void(GLAPIENTRY *Uniform3f)(GLint, GLfloat, GLfloat, GLfloat); + void(GLAPIENTRY *Uniform4f)(GLint, GLfloat, GLfloat, GLfloat, GLfloat); + void(GLAPIENTRY *Uniform1i)(GLint, GLint); + void(GLAPIENTRY *UniformMatrix2fv)(GLint, GLsizei, GLboolean, const GLfloat *); + void(GLAPIENTRY *UniformMatrix3fv)(GLint, GLsizei, GLboolean, const GLfloat *); - void (GLAPIENTRY * InvalidateFramebuffer) (GLenum, GLsizei, const GLenum *); + void(GLAPIENTRY *InvalidateFramebuffer)(GLenum, GLsizei, const GLenum *); - GLsync(GLAPIENTRY * FenceSync) (GLenum, GLbitfield); - GLenum(GLAPIENTRY * ClientWaitSync) (GLsync, GLbitfield, GLuint64); - void (GLAPIENTRY * DeleteSync) (GLsync sync); + GLsync(GLAPIENTRY *FenceSync)(GLenum, GLbitfield); + GLenum(GLAPIENTRY *ClientWaitSync)(GLsync, GLbitfield, GLuint64); + void(GLAPIENTRY *DeleteSync)(GLsync sync); - void (GLAPIENTRY * GenQueries) (GLsizei, GLuint *); - void (GLAPIENTRY * DeleteQueries) (GLsizei, const GLuint *); - void (GLAPIENTRY * BeginQuery) (GLenum, GLuint); - void (GLAPIENTRY * EndQuery) (GLenum); - void (GLAPIENTRY * QueryCounter) (GLuint, GLenum); - GLboolean(GLAPIENTRY * IsQuery) (GLuint); - void (GLAPIENTRY * GetQueryObjectiv) (GLuint, GLenum, GLint *); - void (GLAPIENTRY * GetQueryObjecti64v) (GLuint, GLenum, GLint64 *); - void (GLAPIENTRY * GetQueryObjectuiv) (GLuint, GLenum, GLuint *); - void (GLAPIENTRY * GetQueryObjectui64v) (GLuint, GLenum, GLuint64 *); + void(GLAPIENTRY *GenQueries)(GLsizei, GLuint *); + void(GLAPIENTRY *DeleteQueries)(GLsizei, const GLuint *); + void(GLAPIENTRY *BeginQuery)(GLenum, GLuint); + void(GLAPIENTRY *EndQuery)(GLenum); + void(GLAPIENTRY *QueryCounter)(GLuint, GLenum); + GLboolean(GLAPIENTRY *IsQuery)(GLuint); + void(GLAPIENTRY *GetQueryObjectiv)(GLuint, GLenum, GLint *); + void(GLAPIENTRY *GetQueryObjecti64v)(GLuint, GLenum, GLint64 *); + void(GLAPIENTRY *GetQueryObjectuiv)(GLuint, GLenum, GLuint *); + void(GLAPIENTRY *GetQueryObjectui64v)(GLuint, GLenum, GLuint64 *); - void (GLAPIENTRY * VDPAUInitNV) (const GLvoid *, const GLvoid *); - void (GLAPIENTRY * VDPAUFiniNV) (void); - GLvdpauSurfaceNV(GLAPIENTRY * VDPAURegisterOutputSurfaceNV) - (GLvoid *, GLenum, GLsizei, const GLuint *); - GLvdpauSurfaceNV(GLAPIENTRY * VDPAURegisterVideoSurfaceNV) - (GLvoid *, GLenum, GLsizei, const GLuint *); - void (GLAPIENTRY * VDPAUUnregisterSurfaceNV) (GLvdpauSurfaceNV); - void (GLAPIENTRY * VDPAUSurfaceAccessNV) (GLvdpauSurfaceNV, GLenum); - void (GLAPIENTRY * VDPAUMapSurfacesNV) (GLsizei, const GLvdpauSurfaceNV *); - void (GLAPIENTRY * VDPAUUnmapSurfacesNV) (GLsizei, const GLvdpauSurfaceNV *); + void(GLAPIENTRY *VDPAUInitNV)(const GLvoid *, const GLvoid *); + void(GLAPIENTRY *VDPAUFiniNV)(void); + GLvdpauSurfaceNV(GLAPIENTRY *VDPAURegisterOutputSurfaceNV)(GLvoid *, GLenum, GLsizei, const GLuint *); + GLvdpauSurfaceNV(GLAPIENTRY *VDPAURegisterVideoSurfaceNV)(GLvoid *, GLenum, GLsizei, const GLuint *); + void(GLAPIENTRY *VDPAUUnregisterSurfaceNV)(GLvdpauSurfaceNV); + void(GLAPIENTRY *VDPAUSurfaceAccessNV)(GLvdpauSurfaceNV, GLenum); + void(GLAPIENTRY *VDPAUMapSurfacesNV)(GLsizei, const GLvdpauSurfaceNV *); + void(GLAPIENTRY *VDPAUUnmapSurfacesNV)(GLsizei, const GLvdpauSurfaceNV *); #if HAVE_GL_WIN32 // The HANDLE type might not be present on non-Win32 - BOOL(GLAPIENTRY * DXSetResourceShareHandleNV) (void *dxObject, HANDLE shareHandle); - HANDLE(GLAPIENTRY * DXOpenDeviceNV) (void *dxDevice); - BOOL(GLAPIENTRY * DXCloseDeviceNV) (HANDLE hDevice); - HANDLE(GLAPIENTRY * DXRegisterObjectNV) (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access); - BOOL(GLAPIENTRY * DXUnregisterObjectNV) (HANDLE hDevice, HANDLE hObject); - BOOL(GLAPIENTRY * DXLockObjectsNV) (HANDLE hDevice, GLint count, HANDLE * hObjects); - BOOL(GLAPIENTRY * DXUnlockObjectsNV) (HANDLE hDevice, GLint count, HANDLE * hObjects); + BOOL(GLAPIENTRY *DXSetResourceShareHandleNV) + (void *dxObject, HANDLE shareHandle); + HANDLE(GLAPIENTRY *DXOpenDeviceNV)(void *dxDevice); + BOOL(GLAPIENTRY *DXCloseDeviceNV)(HANDLE hDevice); + HANDLE(GLAPIENTRY *DXRegisterObjectNV) + (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access); + BOOL(GLAPIENTRY *DXUnregisterObjectNV)(HANDLE hDevice, HANDLE hObject); + BOOL(GLAPIENTRY *DXLockObjectsNV) + (HANDLE hDevice, GLint count, HANDLE *hObjects); + BOOL(GLAPIENTRY *DXUnlockObjectsNV) + (HANDLE hDevice, GLint count, HANDLE *hObjects); #endif - GLint(GLAPIENTRY * GetVideoSync) (GLuint *); - GLint(GLAPIENTRY * WaitVideoSync) (GLint, GLint, unsigned int *); + GLint(GLAPIENTRY *GetVideoSync)(GLuint *); + GLint(GLAPIENTRY *WaitVideoSync)(GLint, GLint, unsigned int *); - void (GLAPIENTRY * GetTranslatedShaderSourceANGLE) (GLuint, GLsizei, GLsizei *, GLchar * source); + void(GLAPIENTRY *GetTranslatedShaderSourceANGLE)(GLuint, GLsizei, GLsizei *, GLchar *source); - void (GLAPIENTRY * DebugMessageCallback) (MP_GLDEBUGPROC callback, const void *userParam); + void(GLAPIENTRY *DebugMessageCallback)(MP_GLDEBUGPROC callback, const void *userParam); - void *(GLAPIENTRY * MPGetNativeDisplay) (const char *name); + void *(GLAPIENTRY *MPGetNativeDisplay)(const char *name); }; #endif /* MPLAYER_GL_COMMON_H */ diff --git a/drm.c b/drm.c index 9948604..dafc55d 100644 --- a/drm.c +++ b/drm.c @@ -1,30 +1,26 @@ -#include +#include #include #include -#include -#include -#include +#include #include #include -#define DRM_DEBUG +#define DRM_DEBUG //---------------------------------------------------------------------------- // DRM //---------------------------------------------------------------------------- -struct _Drm_Render_ -{ +struct _Drm_Render_ { int fd_drm; drmModeModeInfo mode; drmModeCrtc *saved_crtc; -// drmEventContext ev; + // drmEventContext ev; int bpp; uint32_t connector_id, crtc_id, video_plane; uint32_t hdr_metadata; - uint32_t mmWidth,mmHeight; // Size in mm + uint32_t mmWidth, mmHeight; // Size in mm uint32_t hdr_blob_id; - }; typedef struct _Drm_Render_ VideoRender; @@ -46,10 +42,7 @@ struct type_name { const char *name; }; -static const char *util_lookup_type_name(unsigned int type, - const struct type_name *table, - unsigned int count) -{ +static const char *util_lookup_type_name(unsigned int type, const struct type_name *table, unsigned int count) { unsigned int i; for (i = 0; i < count; i++) @@ -60,41 +53,36 @@ static const char *util_lookup_type_name(unsigned int type, } static const struct type_name connector_type_names[] = { - { DRM_MODE_CONNECTOR_Unknown, "unknown" }, - { DRM_MODE_CONNECTOR_VGA, "VGA" }, - { DRM_MODE_CONNECTOR_DVII, "DVI-I" }, - { DRM_MODE_CONNECTOR_DVID, "DVI-D" }, - { DRM_MODE_CONNECTOR_DVIA, "DVI-A" }, - { DRM_MODE_CONNECTOR_Composite, "composite" }, - { DRM_MODE_CONNECTOR_SVIDEO, "s-video" }, - { DRM_MODE_CONNECTOR_LVDS, "LVDS" }, - { DRM_MODE_CONNECTOR_Component, "component" }, - { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN" }, - { DRM_MODE_CONNECTOR_DisplayPort, "DP" }, - { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" }, - { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" }, - { DRM_MODE_CONNECTOR_TV, "TV" }, - { DRM_MODE_CONNECTOR_eDP, "eDP" }, - { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, - { DRM_MODE_CONNECTOR_DSI, "DSI" }, - { DRM_MODE_CONNECTOR_DPI, "DPI" }, + {DRM_MODE_CONNECTOR_Unknown, "unknown"}, + {DRM_MODE_CONNECTOR_VGA, "VGA"}, + {DRM_MODE_CONNECTOR_DVII, "DVI-I"}, + {DRM_MODE_CONNECTOR_DVID, "DVI-D"}, + {DRM_MODE_CONNECTOR_DVIA, "DVI-A"}, + {DRM_MODE_CONNECTOR_Composite, "composite"}, + {DRM_MODE_CONNECTOR_SVIDEO, "s-video"}, + {DRM_MODE_CONNECTOR_LVDS, "LVDS"}, + {DRM_MODE_CONNECTOR_Component, "component"}, + {DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN"}, + {DRM_MODE_CONNECTOR_DisplayPort, "DP"}, + {DRM_MODE_CONNECTOR_HDMIA, "HDMI-A"}, + {DRM_MODE_CONNECTOR_HDMIB, "HDMI-B"}, + {DRM_MODE_CONNECTOR_TV, "TV"}, + {DRM_MODE_CONNECTOR_eDP, "eDP"}, + {DRM_MODE_CONNECTOR_VIRTUAL, "Virtual"}, + {DRM_MODE_CONNECTOR_DSI, "DSI"}, + {DRM_MODE_CONNECTOR_DPI, "DPI"}, }; -const char *util_lookup_connector_type_name(unsigned int type) -{ - return util_lookup_type_name(type, connector_type_names, - ARRAY_SIZE(connector_type_names)); +const char *util_lookup_connector_type_name(unsigned int type) { + return util_lookup_type_name(type, connector_type_names, ARRAY_SIZE(connector_type_names)); } -static uint64_t GetPropertyValue(int fd_drm, uint32_t objectID, - uint32_t objectType, const char *propName) -{ +static uint64_t GetPropertyValue(int fd_drm, uint32_t objectID, uint32_t objectType, const char *propName) { uint32_t i; int found = 0; uint64_t value = 0; drmModePropertyPtr Prop; - drmModeObjectPropertiesPtr objectProps = - drmModeObjectGetProperties(fd_drm, objectID, objectType); + drmModeObjectPropertiesPtr objectProps = drmModeObjectGetProperties(fd_drm, objectID, objectType); for (i = 0; i < objectProps->count_props; i++) { if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL) @@ -115,20 +103,16 @@ static uint64_t GetPropertyValue(int fd_drm, uint32_t objectID, #ifdef DRM_DEBUG if (!found) - fprintf(stderr, "GetPropertyValue: Unable to find value for property \'%s\'.\n", - propName); + fprintf(stderr, "GetPropertyValue: Unable to find value for property \'%s\'.\n", propName); #endif return value; } -static uint32_t GetPropertyID(int fd_drm, uint32_t objectID, - uint32_t objectType, const char *propName) -{ +static uint32_t GetPropertyID(int fd_drm, uint32_t objectID, uint32_t objectType, const char *propName) { uint32_t i; int found = 0; uint32_t value = -1; drmModePropertyPtr Prop; - drmModeObjectPropertiesPtr objectProps = - drmModeObjectGetProperties(fd_drm, objectID, objectType); + drmModeObjectPropertiesPtr objectProps = drmModeObjectGetProperties(fd_drm, objectID, objectType); for (i = 0; i < objectProps->count_props; i++) { if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL) @@ -146,24 +130,21 @@ static uint32_t GetPropertyID(int fd_drm, uint32_t objectID, #ifdef DRM_DEBUG if (!found) - Debug(3,"GetPropertyValue: Unable to find ID for property \'%s\'.\n",propName); + Debug(3, "GetPropertyValue: Unable to find ID for property \'%s\'.\n", propName); #endif return value; } -static int SetPropertyRequest(drmModeAtomicReqPtr ModeReq, int fd_drm, - uint32_t objectID, uint32_t objectType, - const char *propName, uint64_t value) -{ +static int SetPropertyRequest(drmModeAtomicReqPtr ModeReq, int fd_drm, uint32_t objectID, uint32_t objectType, + const char *propName, uint64_t value) { uint32_t i; uint64_t id = 0; drmModePropertyPtr Prop; - drmModeObjectPropertiesPtr objectProps = - drmModeObjectGetProperties(fd_drm, objectID, objectType); + drmModeObjectPropertiesPtr objectProps = drmModeObjectGetProperties(fd_drm, objectID, objectType); for (i = 0; i < objectProps->count_props; i++) { if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL) - printf( "SetPropertyRequest: Unable to query property.\n"); + printf("SetPropertyRequest: Unable to query property.\n"); if (strcmp(propName, Prop->name) == 0) { id = Prop->prop_id; @@ -177,15 +158,13 @@ static int SetPropertyRequest(drmModeAtomicReqPtr ModeReq, int fd_drm, drmModeFreeObjectProperties(objectProps); if (id == 0) - printf( "SetPropertyRequest: Unable to find value for property \'%s\'.\n", - propName); + printf("SetPropertyRequest: Unable to find value for property \'%s\'.\n", propName); return drmModeAtomicAddProperty(ModeReq, objectID, id, value); } static void CuvidSetVideoMode(void); -void set_video_mode(int width, int height) -{ +void set_video_mode(int width, int height) { drmModeConnector *connector; drmModeModeInfo *mode; int ii; @@ -194,29 +173,25 @@ void set_video_mode(int width, int height) connector = drmModeGetConnector(render->fd_drm, render->connector_id); for (ii = 0; ii < connector->count_modes; ii++) { mode = &connector->modes[ii]; - printf("Mode %d %dx%d Rate %d\n",ii,mode->hdisplay,mode->vdisplay,mode->vrefresh); - if (width == mode->hdisplay && - height == mode->vdisplay && - mode->vrefresh == DRMRefresh && - render->mode.hdisplay != width && - render->mode.vdisplay != height && - !(mode->flags & DRM_MODE_FLAG_INTERLACE)) { + printf("Mode %d %dx%d Rate %d\n", ii, mode->hdisplay, mode->vdisplay, mode->vrefresh); + if (width == mode->hdisplay && height == mode->vdisplay && mode->vrefresh == DRMRefresh && + render->mode.hdisplay != width && render->mode.vdisplay != height && + !(mode->flags & DRM_MODE_FLAG_INTERLACE)) { memcpy(&render->mode, mode, sizeof(drmModeModeInfo)); VideoWindowWidth = mode->hdisplay; VideoWindowHeight = mode->vdisplay; - eglDestroySurface (eglDisplay, eglSurface); + eglDestroySurface(eglDisplay, eglSurface); EglCheck(); - gbm_surface_destroy (gbm.surface); + gbm_surface_destroy(gbm.surface); InitBo(render->bpp); CuvidSetVideoMode(); - Debug(3,"Set new mode %d:%d\n",mode->hdisplay,mode->vdisplay); + Debug(3, "Set new mode %d:%d\n", mode->hdisplay, mode->vdisplay); break; } } } -static int FindDevice(VideoRender * render) -{ +static int FindDevice(VideoRender *render) { drmVersion *version; drmModeRes *resources; drmModeConnector *connector; @@ -228,7 +203,7 @@ static int FindDevice(VideoRender * render) uint32_t j, k; uint64_t has_dumb; uint64_t has_prime; - int i,ii=0; + int i, ii = 0; char connectorstr[10]; int found = 0; #ifdef RASPI @@ -240,30 +215,27 @@ static int FindDevice(VideoRender * render) fprintf(stderr, "FindDevice: cannot open /dev/dri/card0: %m\n"); return -errno; } - + int ret = drmSetMaster(render->fd_drm); - - if (ret < 0) - { + + if (ret < 0) { drm_magic_t magic; ret = drmGetMagic(render->fd_drm, &magic); - if (ret < 0) - { - Debug(3, "drm:%s - failed to get drm magic: %s\n", __FUNCTION__, strerror(errno)); - return -1; + if (ret < 0) { + Debug(3, "drm:%s - failed to get drm magic: %s\n", __FUNCTION__, strerror(errno)); + return -1; } ret = drmAuthMagic(render->fd_drm, magic); - if (ret < 0) - { - Debug(3, "drm:%s - failed to authorize drm magic: %s\n", __FUNCTION__, strerror(errno)); - return -1; + if (ret < 0) { + Debug(3, "drm:%s - failed to authorize drm magic: %s\n", __FUNCTION__, strerror(errno)); + return -1; } } - + version = drmGetVersion(render->fd_drm); - fprintf(stderr, "FindDevice: open /dev/dri/card0: %s\n", version->name); + fprintf(stderr, "FindDevice: open /dev/dri/card0: %s\n", version->name); // check capability if (drmGetCap(render->fd_drm, DRM_CAP_DUMB_BUFFER, &has_dumb) < 0 || has_dumb == 0) @@ -284,15 +256,14 @@ static int FindDevice(VideoRender * render) if (drmGetCap(render->fd_drm, DRM_PRIME_CAP_IMPORT, &has_prime) < 0) fprintf(stderr, "FindDevice: DRM_PRIME_CAP_IMPORT not available.\n"); - if ((resources = drmModeGetResources(render->fd_drm)) == NULL){ + if ((resources = drmModeGetResources(render->fd_drm)) == NULL) { fprintf(stderr, "FindDevice: cannot retrieve DRM resources (%d): %m\n", errno); return -errno; } #ifdef DEBUG - Debug(3,"[FindDevice] DRM have %i connectors, %i crtcs, %i encoders\n", - resources->count_connectors, resources->count_crtcs, - resources->count_encoders); + Debug(3, "[FindDevice] DRM have %i connectors, %i crtcs, %i encoders\n", resources->count_connectors, + resources->count_crtcs, resources->count_encoders); #endif // find all available connectors @@ -303,49 +274,49 @@ static int FindDevice(VideoRender * render) return -errno; } - sprintf(connectorstr,"%s-%u",util_lookup_connector_type_name(connector->connector_type),connector->connector_type_id); - printf("Connector >%s< is %sconnected\n",connectorstr,connector->connection == DRM_MODE_CONNECTED?"":"not "); - if (DRMConnector && strcmp(DRMConnector,connectorstr)) + sprintf(connectorstr, "%s-%u", util_lookup_connector_type_name(connector->connector_type), + connector->connector_type_id); + printf("Connector >%s< is %sconnected\n", connectorstr, + connector->connection == DRM_MODE_CONNECTED ? "" : "not "); + if (DRMConnector && strcmp(DRMConnector, connectorstr)) continue; if (connector->connection == DRM_MODE_CONNECTED && connector->count_modes > 0) { float aspect = (float)connector->mmWidth / (float)connector->mmHeight; if ((aspect > 1.70) && (aspect < 1.85)) { - render->mmHeight = 90; - render->mmWidth = 160; + render->mmHeight = 90; + render->mmWidth = 160; } else { - render->mmHeight = connector->mmHeight; - render->mmWidth = connector->mmWidth; + render->mmHeight = connector->mmHeight; + render->mmWidth = connector->mmWidth; } render->connector_id = connector->connector_id; // FIXME: use default encoder/crtc pair - if ((encoder = drmModeGetEncoder(render->fd_drm, connector->encoder_id)) == NULL){ + if ((encoder = drmModeGetEncoder(render->fd_drm, connector->encoder_id)) == NULL) { fprintf(stderr, "FindDevice: cannot retrieve encoder (%d): %m\n", errno); return -errno; } render->crtc_id = encoder->crtc_id; - render->hdr_metadata = GetPropertyID(render->fd_drm, connector->connector_id, - DRM_MODE_OBJECT_CONNECTOR, "HDR_OUTPUT_METADATA"); - printf("ID %d of METADATA in Connector %d connected %d\n",render->hdr_metadata,connector->connector_id,connector->connection); + render->hdr_metadata = GetPropertyID(render->fd_drm, connector->connector_id, DRM_MODE_OBJECT_CONNECTOR, + "HDR_OUTPUT_METADATA"); + printf("ID %d of METADATA in Connector %d connected %d\n", render->hdr_metadata, connector->connector_id, + connector->connection); - memcpy(&render->mode, &connector->modes[0], sizeof(drmModeModeInfo)); // set fallback + memcpy(&render->mode, &connector->modes[0], sizeof(drmModeModeInfo)); // set fallback // search Modes for Connector for (ii = 0; ii < connector->count_modes; ii++) { mode = &connector->modes[ii]; - printf("Mode %d %dx%d Rate %d\n",ii,mode->hdisplay,mode->vdisplay,mode->vrefresh); + printf("Mode %d %dx%d Rate %d\n", ii, mode->hdisplay, mode->vdisplay, mode->vrefresh); if (VideoWindowWidth && VideoWindowHeight) { // preset by command line - if (VideoWindowWidth == mode->hdisplay && - VideoWindowHeight == mode->vdisplay && - mode->vrefresh == DRMRefresh && - !(mode->flags & DRM_MODE_FLAG_INTERLACE)) { + if (VideoWindowWidth == mode->hdisplay && VideoWindowHeight == mode->vdisplay && + mode->vrefresh == DRMRefresh && !(mode->flags & DRM_MODE_FLAG_INTERLACE)) { memcpy(&render->mode, mode, sizeof(drmModeModeInfo)); break; } - } - else { + } else { if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) { memcpy(&render->mode, mode, sizeof(drmModeModeInfo)); VideoWindowWidth = mode->hdisplay; @@ -355,16 +326,17 @@ static int FindDevice(VideoRender * render) } } found = 1; - i = resources->count_connectors; // uuuuhh + i = resources->count_connectors; // uuuuhh } VideoWindowWidth = render->mode.hdisplay; VideoWindowHeight = render->mode.vdisplay; if (found) - printf("Use Mode %d %dx%d Rate %d\n",ii,render->mode.hdisplay,render->mode.vdisplay,render->mode.vrefresh); + printf("Use Mode %d %dx%d Rate %d\n", ii, render->mode.hdisplay, render->mode.vdisplay, + render->mode.vrefresh); drmModeFreeConnector(connector); } if (!found) { - Debug(3,"Requested Connector not found or not connected\n"); + Debug(3, "Requested Connector not found or not connected\n"); printf("Requested Connector not found or not connected\n"); return -1; } @@ -384,16 +356,16 @@ static int FindDevice(VideoRender * render) break; } - uint64_t type = GetPropertyValue(render->fd_drm, plane_res->planes[j], - DRM_MODE_OBJECT_PLANE, "type"); + uint64_t type = GetPropertyValue(render->fd_drm, plane_res->planes[j], DRM_MODE_OBJECT_PLANE, "type"); uint64_t zpos = 0; #ifdef DRM_DEBUG // If more then 2 crtcs this must rewriten!!! - printf("[FindDevice] Plane id %i crtc_id %i possible_crtcs %i possible CRTC %i type %s\n", - plane->plane_id, plane->crtc_id, plane->possible_crtcs, resources->crtcs[i], - (type == DRM_PLANE_TYPE_PRIMARY) ? "primary plane" : - (type == DRM_PLANE_TYPE_OVERLAY) ? "overlay plane" : - (type == DRM_PLANE_TYPE_CURSOR) ? "cursor plane" : "No plane type"); + printf("[FindDevice] Plane id %i crtc_id %i possible_crtcs %i possible CRTC %i type %s\n", plane->plane_id, + plane->crtc_id, plane->possible_crtcs, resources->crtcs[i], + (type == DRM_PLANE_TYPE_PRIMARY) ? "primary plane" + : (type == DRM_PLANE_TYPE_OVERLAY) ? "overlay plane" + : (type == DRM_PLANE_TYPE_CURSOR) ? "cursor plane" + : "No plane type"); #endif // test pixel format and plane caps @@ -423,8 +395,7 @@ static int FindDevice(VideoRender * render) drmModeFreeResources(resources); #ifdef DRM_DEBUG - printf("[FindDevice] DRM setup CRTC: %i video_plane: %i \n", - render->crtc_id, render->video_plane); + printf("[FindDevice] DRM setup CRTC: %i video_plane: %i \n", render->crtc_id, render->video_plane); #endif // save actual modesetting @@ -436,8 +407,7 @@ static int FindDevice(VideoRender * render) /// /// Initialize video output module. /// -void VideoInitDrm() -{ +void VideoInitDrm() { int i; if (!(render = calloc(1, sizeof(*render)))) { @@ -445,25 +415,24 @@ void VideoInitDrm() return; } - if (FindDevice(render)){ - Fatal(_( "VideoInit: FindDevice() failed\n")); + if (FindDevice(render)) { + Fatal(_("VideoInit: FindDevice() failed\n")); } - gbm.dev = gbm_create_device (render->fd_drm); - assert (gbm.dev != NULL); + gbm.dev = gbm_create_device(render->fd_drm); + assert(gbm.dev != NULL); PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display = NULL; - get_platform_display = - (void *) eglGetProcAddress("eglGetPlatformDisplayEXT"); + get_platform_display = (void *)eglGetProcAddress("eglGetPlatformDisplayEXT"); assert(get_platform_display != NULL); eglDisplay = get_platform_display(EGL_PLATFORM_GBM_KHR, gbm.dev, NULL); EglCheck(); - - assert (eglDisplay != NULL); -// return; + + assert(eglDisplay != NULL); + // return; drmModeAtomicReqPtr ModeReq; const uint32_t flags = DRM_MODE_ATOMIC_ALLOW_MODESET; @@ -477,13 +446,11 @@ void VideoInitDrm() fprintf(stderr, "cannot allocate atomic request (%d): %m\n", errno); return; } - printf("set CRTC %d of Connector %d aktiv\n",render->crtc_id,render->connector_id); - SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, - DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID); - SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, - DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id); - SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, - DRM_MODE_OBJECT_CRTC, "ACTIVE", 1); + printf("set CRTC %d of Connector %d aktiv\n", render->crtc_id, render->connector_id); + SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID); + SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", + render->crtc_id); + SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, DRM_MODE_OBJECT_CRTC, "ACTIVE", 1); if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0) fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno); @@ -492,54 +459,51 @@ void VideoInitDrm() fprintf(stderr, "cannot destroy property blob (%d): %m\n", errno); drmModeAtomicFree(ModeReq); - } -void get_drm_aspect(int *num,int *den) -{ +void get_drm_aspect(int *num, int *den) { *num = VideoWindowWidth; *den = VideoWindowHeight; } -struct gbm_bo *bo = NULL, *next_bo=NULL; +struct gbm_bo *bo = NULL, *next_bo = NULL; struct drm_fb *fb; static int m_need_modeset = 0; -static int old_color=-1,old_trc=-1; +static int old_color = -1, old_trc = -1; void InitBo(int bpp) { // create the GBM and EGL surface - - render->bpp = bpp; - gbm.surface = gbm_surface_create (gbm.dev, VideoWindowWidth,VideoWindowHeight, - bpp==10?GBM_FORMAT_XRGB2101010:GBM_FORMAT_ARGB8888, - GBM_BO_USE_SCANOUT|GBM_BO_USE_RENDERING); - assert(gbm.surface != NULL); - eglSurface = eglCreateWindowSurface (eglDisplay, eglConfig, gbm.surface, NULL); - assert(eglSurface != NULL); + render->bpp = bpp; + gbm.surface = gbm_surface_create(gbm.dev, VideoWindowWidth, VideoWindowHeight, + bpp == 10 ? GBM_FORMAT_XRGB2101010 : GBM_FORMAT_ARGB8888, + GBM_BO_USE_SCANOUT | GBM_BO_USE_RENDERING); + assert(gbm.surface != NULL); + eglSurface = eglCreateWindowSurface(eglDisplay, eglConfig, gbm.surface, NULL); + assert(eglSurface != NULL); } static struct gbm_bo *previous_bo = NULL; static uint32_t previous_fb; static int has_modeset = 0; -static void drm_swap_buffers () { +static void drm_swap_buffers() { uint32_t fb; - eglSwapBuffers (eglDisplay, eglSurface); - struct gbm_bo *bo = gbm_surface_lock_front_buffer (gbm.surface); + eglSwapBuffers(eglDisplay, eglSurface); + struct gbm_bo *bo = gbm_surface_lock_front_buffer(gbm.surface); #if 1 if (bo == NULL) - bo = gbm_surface_lock_front_buffer (gbm.surface); + bo = gbm_surface_lock_front_buffer(gbm.surface); #endif - assert (bo != NULL); - uint32_t handle = gbm_bo_get_handle (bo).u32; - uint32_t pitch = gbm_bo_get_stride (bo); + assert(bo != NULL); + uint32_t handle = gbm_bo_get_handle(bo).u32; + uint32_t pitch = gbm_bo_get_stride(bo); - - drmModeAddFB (render->fd_drm, VideoWindowWidth,VideoWindowHeight,render->bpp==10? 30:24, 32, pitch, handle, &fb); -// drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode); + drmModeAddFB(render->fd_drm, VideoWindowWidth, VideoWindowHeight, render->bpp == 10 ? 30 : 24, 32, pitch, handle, + &fb); + // drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode); if (m_need_modeset) { drmModeAtomicReqPtr ModeReq; @@ -556,23 +520,19 @@ static void drm_swap_buffers () { } // Need to disable the CRTC in order to submit the HDR data.... - SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, - DRM_MODE_OBJECT_CRTC, "ACTIVE", 0); + SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, DRM_MODE_OBJECT_CRTC, "ACTIVE", 0); if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0) fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno); - - SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, - DRM_MODE_OBJECT_CONNECTOR, "Colorspace",old_color==AVCOL_PRI_BT2020?9:2 ); - SetPropertyRequest(ModeReq, render->fd_drm, render->video_plane, - DRM_MODE_OBJECT_PLANE, "COLOR_ENCODING",old_color==AVCOL_PRI_BT2020?2:1 ); - SetPropertyRequest(ModeReq, render->fd_drm, render->video_plane, - DRM_MODE_OBJECT_PLANE, "COLOR_RANGE",0 ); - SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, - DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID); - SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, - DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id); - SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, - DRM_MODE_OBJECT_CRTC, "ACTIVE", 1); + + SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, DRM_MODE_OBJECT_CONNECTOR, "Colorspace", + old_color == AVCOL_PRI_BT2020 ? 9 : 2); + SetPropertyRequest(ModeReq, render->fd_drm, render->video_plane, DRM_MODE_OBJECT_PLANE, "COLOR_ENCODING", + old_color == AVCOL_PRI_BT2020 ? 2 : 1); + SetPropertyRequest(ModeReq, render->fd_drm, render->video_plane, DRM_MODE_OBJECT_PLANE, "COLOR_RANGE", 0); + SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID); + SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", + render->crtc_id); + SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, DRM_MODE_OBJECT_CRTC, "ACTIVE", 1); if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0) fprintf(stderr, "cannot set atomic mode modeset 2 (%d): %m\n", errno); @@ -584,33 +544,32 @@ static void drm_swap_buffers () { m_need_modeset = 0; has_modeset = 1; } - drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode); + drmModeSetCrtc(render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode); if (previous_bo) { - drmModeRmFB (render->fd_drm, previous_fb); - gbm_surface_release_buffer (gbm.surface, previous_bo); + drmModeRmFB(render->fd_drm, previous_fb); + gbm_surface_release_buffer(gbm.surface, previous_bo); } previous_bo = bo; previous_fb = fb; - } -static void drm_clean_up () { +static void drm_clean_up() { // set the previous crtc if (!render) return; - Debug(3,"drm clean up\n"); + Debug(3, "drm clean up\n"); if (previous_bo) { - drmModeRmFB (render->fd_drm, previous_fb); - gbm_surface_release_buffer (gbm.surface, previous_bo); + drmModeRmFB(render->fd_drm, previous_fb); + gbm_surface_release_buffer(gbm.surface, previous_bo); } - drmModeSetCrtc (render->fd_drm, render->saved_crtc->crtc_id, render->saved_crtc->buffer_id, - render->saved_crtc->x, render->saved_crtc->y, &render->connector_id, 1, &render->saved_crtc->mode); - drmModeFreeCrtc (render->saved_crtc); - + drmModeSetCrtc(render->fd_drm, render->saved_crtc->crtc_id, render->saved_crtc->buffer_id, render->saved_crtc->x, + render->saved_crtc->y, &render->connector_id, 1, &render->saved_crtc->mode); + drmModeFreeCrtc(render->saved_crtc); + if (has_modeset) { drmModeAtomicReqPtr ModeReq; const uint32_t flags = DRM_MODE_ATOMIC_ALLOW_MODESET; @@ -626,25 +585,19 @@ static void drm_clean_up () { } // Need to disable the CRTC in order to submit the HDR data.... - SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, - DRM_MODE_OBJECT_CRTC, "ACTIVE", 0); + SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, DRM_MODE_OBJECT_CRTC, "ACTIVE", 0); if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0) fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno); - - SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, - DRM_MODE_OBJECT_CONNECTOR, "HDR_OUTPUT_METADATA", 0); - SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, - DRM_MODE_OBJECT_CONNECTOR, "Colorspace",2 ); - SetPropertyRequest(ModeReq, render->fd_drm, render->video_plane, - DRM_MODE_OBJECT_PLANE, "COLOR_ENCODING",1 ); - SetPropertyRequest(ModeReq, render->fd_drm, render->video_plane, - DRM_MODE_OBJECT_PLANE, "COLOR_RANGE",1 ); - SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, - DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID); - SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, - DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id); - SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, - DRM_MODE_OBJECT_CRTC, "ACTIVE", 1); + + SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, DRM_MODE_OBJECT_CONNECTOR, + "HDR_OUTPUT_METADATA", 0); + SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, DRM_MODE_OBJECT_CONNECTOR, "Colorspace", 2); + SetPropertyRequest(ModeReq, render->fd_drm, render->video_plane, DRM_MODE_OBJECT_PLANE, "COLOR_ENCODING", 1); + SetPropertyRequest(ModeReq, render->fd_drm, render->video_plane, DRM_MODE_OBJECT_PLANE, "COLOR_RANGE", 1); + SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID); + SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id, DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", + render->crtc_id); + SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id, DRM_MODE_OBJECT_CRTC, "ACTIVE", 1); if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0) fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno); @@ -655,27 +608,26 @@ static void drm_clean_up () { drmModeAtomicFree(ModeReq); has_modeset = 0; } - + if (render->hdr_blob_id) drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id); render->hdr_blob_id = 0; - eglDestroySurface (eglDisplay, eglSurface); + eglDestroySurface(eglDisplay, eglSurface); EglCheck(); - gbm_surface_destroy (gbm.surface); - eglDestroyContext (eglDisplay, eglContext); + gbm_surface_destroy(gbm.surface); + eglDestroyContext(eglDisplay, eglContext); EglCheck(); - eglDestroyContext (eglDisplay, eglSharedContext); + eglDestroyContext(eglDisplay, eglSharedContext); EglCheck(); eglSharedContext = NULL; - eglTerminate (eglDisplay); + eglTerminate(eglDisplay); EglCheck(); - gbm_device_destroy (gbm.dev); + gbm_device_destroy(gbm.dev); drmDropMaster(render->fd_drm); - close (render->fd_drm); + close(render->fd_drm); eglDisplay = NULL; free(render); - } diff --git a/drvapi_error_string.h b/drvapi_error_string.h index ccf42fb..866a157 100644 --- a/drvapi_error_string.h +++ b/drvapi_error_string.h @@ -14,12 +14,11 @@ #define _DRVAPI_ERROR_STRING_H_ #include -#include #include +#include // Error Code string definitions here -typedef struct -{ +typedef struct { char const *error_string; unsigned int error_id; } s_CudaErrorStr; @@ -62,30 +61,30 @@ s_CudaErrorStr sCudaDrvErrorString[] = { /** * This indicates profiling APIs are called while application is running * in visual profiler mode. - */ + */ {"CUDA_ERROR_PROFILER_DISABLED", 5}, /** * This indicates profiling has not been initialized for this context. * Call cuProfilerInitialize() to resolve this. - */ + */ {"CUDA_ERROR_PROFILER_NOT_INITIALIZED", 6}, /** * This indicates profiler has already been started and probably * cuProfilerStart() is incorrectly called. - */ + */ {"CUDA_ERROR_PROFILER_ALREADY_STARTED", 7}, /** * This indicates profiler has already been stopped and probably * cuProfilerStop() is incorrectly called. - */ + */ {"CUDA_ERROR_PROFILER_ALREADY_STOPPED", 8}, /** - * This indicates that no CUDA-capable devices were detected by the installed - * CUDA driver. + * This indicates that no CUDA-capable devices were detected by the + * installed CUDA driver. */ {"CUDA_ERROR_NO_DEVICE (no CUDA-capable devices were detected)", 100}, @@ -207,9 +206,9 @@ s_CudaErrorStr sCudaDrvErrorString[] = { {"CUDA_ERROR_INVALID_GRAPHICS_CONTEXT", 219}, /** - * This indicates that an uncorrectable NVLink error was detected during the - * execution. - */ + * This indicates that an uncorrectable NVLink error was detected during the + * execution. + */ {"CUDA_ERROR_NVLINK_UNCORRECTABLE", 220}, /** @@ -256,18 +255,19 @@ s_CudaErrorStr sCudaDrvErrorString[] = { /** * This indicates that asynchronous operations issued previously have not - * completed yet. This result is not actually an error, but must be indicated - * differently than ::CUDA_SUCCESS (which indicates completion). Calls that - * may return this value include ::cuEventQuery() and ::cuStreamQuery(). + * completed yet. This result is not actually an error, but must be + * indicated differently than ::CUDA_SUCCESS (which indicates completion). + * Calls that may return this value include ::cuEventQuery() and + * ::cuStreamQuery(). */ {"CUDA_ERROR_NOT_READY", 600}, /** * While executing a kernel, the device encountered a * load or store instruction on an invalid memory address. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. + * This leaves the process in an inconsistent state and any further CUDA + * work will return the same error. To continue using CUDA, the process must + * be terminated and relaunched. */ {"CUDA_ERROR_ILLEGAL_ADDRESS", 700}, @@ -342,8 +342,8 @@ s_CudaErrorStr sCudaDrvErrorString[] = { {"CUDA_ERROR_TOO_MANY_PEERS", 711}, /** - * This error indicates that the memory range passed to ::cuMemHostRegister() - * has already been registered. + * This error indicates that the memory range passed to + * ::cuMemHostRegister() has already been registered. */ {"CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", 712}, @@ -356,25 +356,25 @@ s_CudaErrorStr sCudaDrvErrorString[] = { /** * While executing a kernel, the device encountered a stack error. * This can be due to stack corruption or exceeding the stack size limit. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. + * This leaves the process in an inconsistent state and any further CUDA + * work will return the same error. To continue using CUDA, the process must + * be terminated and relaunched. */ {"CUDA_ERROR_HARDWARE_STACK_ERROR", 714}, /** * While executing a kernel, the device encountered an illegal instruction. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. + * This leaves the process in an inconsistent state and any further CUDA + * work will return the same error. To continue using CUDA, the process must + * be terminated and relaunched. */ {"CUDA_ERROR_ILLEGAL_INSTRUCTION", 715}, /** - * While executing a kernel, the device encountered a load or store instruction - * on a memory address which is not aligned. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated + * While executing a kernel, the device encountered a load or store + * instruction on a memory address which is not aligned. This leaves the + * process in an inconsistent state and any further CUDA work will return + * the same error. To continue using CUDA, the process must be terminated * and relaunched. */ {"CUDA_ERROR_MISALIGNED_ADDRESS", 716}, @@ -384,17 +384,17 @@ s_CudaErrorStr sCudaDrvErrorString[] = { * which can only operate on memory locations in certain address spaces * (global, shared, or local), but was supplied a memory address not * belonging to an allowed address space. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. + * This leaves the process in an inconsistent state and any further CUDA + * work will return the same error. To continue using CUDA, the process must + * be terminated and relaunched. */ {"CUDA_ERROR_INVALID_ADDRESS_SPACE", 717}, /** - * While executing a kernel, the device program counter wrapped its address space. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. + * While executing a kernel, the device program counter wrapped its address + * space. This leaves the process in an inconsistent state and any further + * CUDA work will return the same error. To continue using CUDA, the process + * must be terminated and relaunched. */ {"CUDA_ERROR_INVALID_PC", 718}, @@ -409,11 +409,13 @@ s_CudaErrorStr sCudaDrvErrorString[] = { {"CUDA_ERROR_LAUNCH_FAILED", 719}, /** - * This error indicates that the number of blocks launched per grid for a kernel that was - * launched via either ::cuLaunchCooperativeKernel or ::cuLaunchCooperativeKernelMultiDevice - * exceeds the maximum number of blocks as allowed by ::cuOccupancyMaxActiveBlocksPerMultiprocessor - * or ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors - * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. + * This error indicates that the number of blocks launched per grid for a + * kernel that was launched via either ::cuLaunchCooperativeKernel or + * ::cuLaunchCooperativeKernelMultiDevice exceeds the maximum number of + * blocks as allowed by ::cuOccupancyMaxActiveBlocksPerMultiprocessor or + * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number + * of multiprocessors as specified by the device attribute + * ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. */ {"CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE", 720}, @@ -432,13 +434,11 @@ s_CudaErrorStr sCudaDrvErrorString[] = { * This indicates that an unknown internal error has occurred. */ {"CUDA_ERROR_UNKNOWN", 999}, - {NULL, -1} -}; + {NULL, -1}}; // This is just a linear search through the array, since the error_id's are not // always ocurring consecutively -static inline const char *getCudaDrvErrorString(CUresult error_id) -{ +static inline const char *getCudaDrvErrorString(CUresult error_id) { int index = 0; while (sCudaDrvErrorString[index].error_id != error_id && (int)sCudaDrvErrorString[index].error_id != -1) { diff --git a/hdr.c b/hdr.c index 91614c9..96231fd 100644 --- a/hdr.c +++ b/hdr.c @@ -29,7 +29,7 @@ struct hdr_metadata_infoframe { */ struct { __u16 x, y; - } display_primaries[3]; + } display_primaries[3]; /** * @white_point: White Point of Colorspace Data. * These are coded as unsigned 16-bit values in units of @@ -40,7 +40,7 @@ struct hdr_metadata_infoframe { */ struct { __u16 x, y; - } white_point; + } white_point; /** * @max_display_mastering_luminance: Max Mastering Display Luminance. * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, @@ -87,7 +87,6 @@ struct hdr_output_metadata { }; #endif - enum hdr_metadata_eotf { EOTF_TRADITIONAL_GAMMA_SDR, EOTF_TRADITIONAL_GAMMA_HDR, @@ -95,27 +94,15 @@ enum hdr_metadata_eotf { EOTF_HLG, }; - enum metadata_id { METADATA_TYPE1, }; -void -weston_hdr_metadata(void *data, - uint16_t display_primary_r_x, - uint16_t display_primary_r_y, - uint16_t display_primary_g_x, - uint16_t display_primary_g_y, - uint16_t display_primary_b_x, - uint16_t display_primary_b_y, - uint16_t white_point_x, - uint16_t white_point_y, - uint16_t min_luminance, - uint16_t max_luminance, - uint16_t max_cll, - uint16_t max_fall, - enum hdr_metadata_eotf eotf) -{ +void weston_hdr_metadata(void *data, uint16_t display_primary_r_x, uint16_t display_primary_r_y, + uint16_t display_primary_g_x, uint16_t display_primary_g_y, uint16_t display_primary_b_x, + uint16_t display_primary_b_y, uint16_t white_point_x, uint16_t white_point_y, + uint16_t min_luminance, uint16_t max_luminance, uint16_t max_cll, uint16_t max_fall, + enum hdr_metadata_eotf eotf) { uint8_t *data8; uint16_t *data16; @@ -124,7 +111,7 @@ weston_hdr_metadata(void *data, *data8++ = eotf; *data8++ = METADATA_TYPE1; - data16 = (void*)data8; + data16 = (void *)data8; *data16++ = display_primary_r_x; *data16++ = display_primary_r_y; @@ -155,151 +142,302 @@ struct weston_colorspace { struct weston_colorspace hdr10; static const struct weston_colorspace bt470m = { - .r = {{ 0.670f, 0.330f, }}, - .g = {{ 0.210f, 0.710f, }}, - .b = {{ 0.140f, 0.080f, }}, - .whitepoint = {{ 0.3101f, 0.3162f, }}, + .r = {{ + 0.670f, + 0.330f, + }}, + .g = {{ + 0.210f, + 0.710f, + }}, + .b = {{ + 0.140f, + 0.080f, + }}, + .whitepoint = {{ + 0.3101f, + 0.3162f, + }}, .name = "BT.470 M", .whitepoint_name = "C", }; static const struct weston_colorspace bt470bg = { - .r = {{ 0.640f, 0.330f, }}, - .g = {{ 0.290f, 0.600f, }}, - .b = {{ 0.150f, 0.060f, }}, - .whitepoint = {{ 0.3127f, 0.3290f, }}, + .r = {{ + 0.640f, + 0.330f, + }}, + .g = {{ + 0.290f, + 0.600f, + }}, + .b = {{ + 0.150f, + 0.060f, + }}, + .whitepoint = {{ + 0.3127f, + 0.3290f, + }}, .name = "BT.470 B/G", .whitepoint_name = "D65", }; static const struct weston_colorspace smpte170m = { - .r = {{ 0.630f, 0.340f, }}, - .g = {{ 0.310f, 0.595f, }}, - .b = {{ 0.155f, 0.070f, }}, - .whitepoint = {{ 0.3127f, 0.3290f, }}, + .r = {{ + 0.630f, + 0.340f, + }}, + .g = {{ + 0.310f, + 0.595f, + }}, + .b = {{ + 0.155f, + 0.070f, + }}, + .whitepoint = {{ + 0.3127f, + 0.3290f, + }}, .name = "SMPTE 170M", .whitepoint_name = "D65", }; static const struct weston_colorspace smpte240m = { - .r = {{ 0.630f, 0.340f, }}, - .g = {{ 0.310f, 0.595f, }}, - .b = {{ 0.155f, 0.070f, }}, - .whitepoint = {{ 0.3127f, 0.3290f, }}, + .r = {{ + 0.630f, + 0.340f, + }}, + .g = {{ + 0.310f, + 0.595f, + }}, + .b = {{ + 0.155f, + 0.070f, + }}, + .whitepoint = {{ + 0.3127f, + 0.3290f, + }}, .name = "SMPTE 240M", .whitepoint_name = "D65", }; static const struct weston_colorspace bt709 = { - .r = {{ 0.640f, 0.330f, }}, - .g = {{ 0.300f, 0.600f, }}, - .b = {{ 0.150f, 0.060f, }}, - .whitepoint = {{ 0.3127f, 0.3290f, }}, + .r = {{ + 0.640f, + 0.330f, + }}, + .g = {{ + 0.300f, + 0.600f, + }}, + .b = {{ + 0.150f, + 0.060f, + }}, + .whitepoint = {{ + 0.3127f, + 0.3290f, + }}, .name = "BT.709", .whitepoint_name = "D65", }; static const struct weston_colorspace bt2020 = { - .r = {{ 0.708f, 0.292f, }}, - .g = {{ 0.170f, 0.797f, }}, - .b = {{ 0.131f, 0.046f, }}, - .whitepoint = {{ 0.3127f, 0.3290f, }}, + .r = {{ + 0.708f, + 0.292f, + }}, + .g = {{ + 0.170f, + 0.797f, + }}, + .b = {{ + 0.131f, + 0.046f, + }}, + .whitepoint = {{ + 0.3127f, + 0.3290f, + }}, .name = "BT.2020", .whitepoint_name = "D65", }; static const struct weston_colorspace srgb = { - .r = {{ 0.640f, 0.330f, }}, - .g = {{ 0.300f, 0.600f, }}, - .b = {{ 0.150f, 0.060f, }}, - .whitepoint = {{ 0.3127f, 0.3290f, }}, + .r = {{ + 0.640f, + 0.330f, + }}, + .g = {{ + 0.300f, + 0.600f, + }}, + .b = {{ + 0.150f, + 0.060f, + }}, + .whitepoint = {{ + 0.3127f, + 0.3290f, + }}, .name = "sRGB", .whitepoint_name = "D65", }; static const struct weston_colorspace adobergb = { - .r = {{ 0.640f, 0.330f, }}, - .g = {{ 0.210f, 0.710f, }}, - .b = {{ 0.150f, 0.060f, }}, - .whitepoint = {{ 0.3127f, 0.3290f, }}, + .r = {{ + 0.640f, + 0.330f, + }}, + .g = {{ + 0.210f, + 0.710f, + }}, + .b = {{ + 0.150f, + 0.060f, + }}, + .whitepoint = {{ + 0.3127f, + 0.3290f, + }}, .name = "AdobeRGB", .whitepoint_name = "D65", }; static const struct weston_colorspace dci_p3 = { - .r = {{ 0.680f, 0.320f, }}, - .g = {{ 0.265f, 0.690f, }}, - .b = {{ 0.150f, 0.060f, }}, - .whitepoint = {{ 0.3127f, 0.3290f, }}, + .r = {{ + 0.680f, + 0.320f, + }}, + .g = {{ + 0.265f, + 0.690f, + }}, + .b = {{ + 0.150f, + 0.060f, + }}, + .whitepoint = {{ + 0.3127f, + 0.3290f, + }}, .name = "DCI-P3 D65", .whitepoint_name = "D65", }; static const struct weston_colorspace prophotorgb = { - .r = {{ 0.7347f, 0.2653f, }}, - .g = {{ 0.1596f, 0.8404f, }}, - .b = {{ 0.0366f, 0.0001f, }}, - .whitepoint = {{ .3457, .3585 }}, + .r = {{ + 0.7347f, + 0.2653f, + }}, + .g = {{ + 0.1596f, + 0.8404f, + }}, + .b = {{ + 0.0366f, + 0.0001f, + }}, + .whitepoint = {{.3457, .3585}}, .name = "ProPhoto RGB", .whitepoint_name = "D50", }; static const struct weston_colorspace ciergb = { - .r = {{ 0.7347f, 0.2653f, }}, - .g = {{ 0.2738f, 0.7174f, }}, - .b = {{ 0.1666f, 0.0089f, }}, - .whitepoint = {{ 1.0f / 3.0f, 1.0f / 3.0f, }}, + .r = {{ + 0.7347f, + 0.2653f, + }}, + .g = {{ + 0.2738f, + 0.7174f, + }}, + .b = {{ + 0.1666f, + 0.0089f, + }}, + .whitepoint = {{ + 1.0f / 3.0f, + 1.0f / 3.0f, + }}, .name = "CIE RGB", .whitepoint_name = "E", }; static const struct weston_colorspace ciexyz = { - .r = {{ 1.0f, 0.0f, }}, - .g = {{ 0.0f, 1.0f, }}, - .b = {{ 0.0f, 0.0f, }}, - .whitepoint = {{ 1.0f / 3.0f, 1.0f / 3.0f, }}, + .r = {{ + 1.0f, + 0.0f, + }}, + .g = {{ + 0.0f, + 1.0f, + }}, + .b = {{ + 0.0f, + 0.0f, + }}, + .whitepoint = {{ + 1.0f / 3.0f, + 1.0f / 3.0f, + }}, .name = "CIE XYZ", .whitepoint_name = "E", }; const struct weston_colorspace ap0 = { - .r = {{ 0.7347f, 0.2653f, }}, - .g = {{ 0.0000f, 1.0000f, }}, - .b = {{ 0.0001f, -0.0770f, }}, - .whitepoint = {{ .32168f, .33767f, }}, + .r = {{ + 0.7347f, + 0.2653f, + }}, + .g = {{ + 0.0000f, + 1.0000f, + }}, + .b = {{ + 0.0001f, + -0.0770f, + }}, + .whitepoint = {{ + .32168f, + .33767f, + }}, .name = "ACES primaries #0", .whitepoint_name = "D60", }; const struct weston_colorspace ap1 = { - .r = {{ 0.713f, 0.393f, }}, - .g = {{ 0.165f, 0.830f, }}, - .b = {{ 0.128f, 0.044f, }}, - .whitepoint = {{ 0.32168f, 0.33767f, }}, + .r = {{ + 0.713f, + 0.393f, + }}, + .g = {{ + 0.165f, + 0.830f, + }}, + .b = {{ + 0.128f, + 0.044f, + }}, + .whitepoint = {{ + 0.32168f, + 0.33767f, + }}, .name = "ACES primaries #1", .whitepoint_name = "D60", }; -static const struct weston_colorspace * const colorspaces[] = { - &bt470m, - &bt470bg, - &smpte170m, - &smpte240m, - &bt709, - &bt2020, - &srgb, - &adobergb, - &dci_p3, - &prophotorgb, - &ciergb, - &ciexyz, - &ap0, - &ap1, +static const struct weston_colorspace *const colorspaces[] = { + &bt470m, &bt470bg, &smpte170m, &smpte240m, &bt709, &bt2020, &srgb, + &adobergb, &dci_p3, &prophotorgb, &ciergb, &ciexyz, &ap0, &ap1, }; #define ARRAY_LENGTH(a) (sizeof(a) / sizeof(a)[0]) -const struct weston_colorspace * -weston_colorspace_lookup(const char *name) -{ +const struct weston_colorspace *weston_colorspace_lookup(const char *name) { unsigned i; if (!name) @@ -315,35 +453,27 @@ weston_colorspace_lookup(const char *name) return NULL; } -static int cleanup=0; +static int cleanup = 0; - -static uint16_t encode_xyy(float xyy) -{ - return xyy * 50000; -} +static uint16_t encode_xyy(float xyy) { return xyy * 50000; } static AVMasteringDisplayMetadata md_save = {0}; static AVContentLightMetadata ld_save = {0}; -static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSideData *sd2) -{ - drmModeAtomicReqPtr ModeReq; +static void set_hdr_metadata(int color, int trc, AVFrameSideData *sd1, AVFrameSideData *sd2) { + drmModeAtomicReqPtr ModeReq; struct weston_colorspace *cs; enum hdr_metadata_eotf eotf; struct hdr_output_metadata data; - int ret,MaxCLL=1500,MaxFALL=400; - int max_lum=4000,min_lum=0050; + int ret, MaxCLL = 1500, MaxFALL = 400; + int max_lum = 4000, min_lum = 0050; struct AVMasteringDisplayMetadata *md = NULL; struct AVContentLightMetadata *ld = NULL; - - // clean up FFMEPG stuff if (trc == AVCOL_TRC_BT2020_10) trc = AVCOL_TRC_ARIB_STD_B67; - if ((old_color == color && old_trc == trc && !sd1 && !sd2) || !render->hdr_metadata) - return; // nothing to do + return; // nothing to do if (sd1) md = sd1->data; @@ -351,81 +481,76 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid if (sd2) ld = sd2->data; - if (md && !memcmp(md,&md_save,sizeof(md_save))) - if (ld && !memcmp(ld,&ld_save,sizeof(ld_save))) { + if (md && !memcmp(md, &md_save, sizeof(md_save))) + if (ld && !memcmp(ld, &ld_save, sizeof(ld_save))) { + return; + } else if (ld && !memcmp(ld, &ld_save, sizeof(ld_save))) { return; } - else if (ld && !memcmp(ld,&ld_save,sizeof(ld_save))) { - return; - } if (ld) - memcpy(&ld_save,ld,sizeof(ld_save)); + memcpy(&ld_save, ld, sizeof(ld_save)); if (md) - memcpy(&md_save,md,sizeof(md_save)); + memcpy(&md_save, md, sizeof(md_save)); - Debug(3,"Update HDR to TRC %d color %d\n",trc,color); + Debug(3, "Update HDR to TRC %d color %d\n", trc, color); old_color = color; old_trc = trc; - if (VulkanTargetColorSpace != 3) { // no HDR TV - m_need_modeset = 1; // change in colorsettings - return; - } - + m_need_modeset = 1; // change in colorsettings + return; + } + if (render->hdr_blob_id) drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id); - switch(trc) { - case AVCOL_TRC_BT709: // 1 - case AVCOL_TRC_UNSPECIFIED: // 2 + switch (trc) { + case AVCOL_TRC_BT709: // 1 + case AVCOL_TRC_UNSPECIFIED: // 2 eotf = EOTF_TRADITIONAL_GAMMA_SDR; break; - case AVCOL_TRC_BT2020_10: // 14 + case AVCOL_TRC_BT2020_10: // 14 case AVCOL_TRC_BT2020_12: - case AVCOL_TRC_ARIB_STD_B67: // 18 HLG + case AVCOL_TRC_ARIB_STD_B67: // 18 HLG eotf = EOTF_HLG; break; - case AVCOL_TRC_SMPTE2084: // 16 + case AVCOL_TRC_SMPTE2084: // 16 eotf = EOTF_ST2084; - break; + break; default: eotf = EOTF_TRADITIONAL_GAMMA_SDR; break; } switch (color) { - case AVCOL_PRI_BT709: // 1 - case AVCOL_PRI_UNSPECIFIED: // 2 + case AVCOL_PRI_BT709: // 1 + case AVCOL_PRI_UNSPECIFIED: // 2 cs = weston_colorspace_lookup("BT.709"); break; - case AVCOL_PRI_BT2020: // 9 + case AVCOL_PRI_BT2020: // 9 cs = weston_colorspace_lookup("BT.2020"); break; - case AVCOL_PRI_BT470BG: // 5 - cs = weston_colorspace_lookup("BT.470 B/G"); // BT.601 + case AVCOL_PRI_BT470BG: // 5 + cs = weston_colorspace_lookup("BT.470 B/G"); // BT.601 break; default: cs = weston_colorspace_lookup("BT.709"); break; } - if (md) { // we got Metadata + if (md) { // we got Metadata if (md->has_primaries) { - Debug(3,"Mastering Display Metadata,\n has_primaries:%d has_luminance:%d \n" - "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f) \n" - "min_luminance=%f, max_luminance=%f\n", - md->has_primaries, md->has_luminance, - av_q2d(md->display_primaries[0][0]), - av_q2d(md->display_primaries[0][1]), - av_q2d(md->display_primaries[1][0]), - av_q2d(md->display_primaries[1][1]), - av_q2d(md->display_primaries[2][0]), - av_q2d(md->display_primaries[2][1]), - av_q2d(md->white_point[0]), av_q2d(md->white_point[1]), - av_q2d(md->min_luminance), av_q2d(md->max_luminance)); + Debug(3, + "Mastering Display Metadata,\n has_primaries:%d has_luminance:%d \n" + "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f) \n" + "min_luminance=%f, max_luminance=%f\n", + md->has_primaries, md->has_luminance, av_q2d(md->display_primaries[0][0]), + av_q2d(md->display_primaries[0][1]), av_q2d(md->display_primaries[1][0]), + av_q2d(md->display_primaries[1][1]), av_q2d(md->display_primaries[2][0]), + av_q2d(md->display_primaries[2][1]), av_q2d(md->white_point[0]), av_q2d(md->white_point[1]), + av_q2d(md->min_luminance), av_q2d(md->max_luminance)); cs = &hdr10; cs->r.f[0] = (float)md->display_primaries[0][0].num / (float)md->display_primaries[0][0].den; @@ -439,53 +564,43 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid } if (md->has_luminance) { max_lum = av_q2d(md->max_luminance); - min_lum = av_q2d(md->min_luminance) * 10000 ; - printf("max_lum %d min_lum %d\n",max_lum,min_lum); + min_lum = av_q2d(md->min_luminance) * 10000; + printf("max_lum %d min_lum %d\n", max_lum, min_lum); } } if (ld) { - Debug(3,"Has MaxCLL %d MaxFALL %d\n",ld->MaxCLL,ld->MaxFALL); + Debug(3, "Has MaxCLL %d MaxFALL %d\n", ld->MaxCLL, ld->MaxFALL); MaxCLL = ld->MaxCLL; MaxFALL = ld->MaxFALL; } - data.metadata_type = 7; // ???????????????????????? - weston_hdr_metadata(&data.hdmi_metadata_type1, - encode_xyy(cs->r.f[0]), - encode_xyy(cs->r.f[1]), - encode_xyy(cs->g.f[0]), - encode_xyy(cs->g.f[1]), - encode_xyy(cs->b.f[0]), - encode_xyy(cs->b.f[1]), - encode_xyy(cs->whitepoint.f[0]), - encode_xyy(cs->whitepoint.f[1]), - max_lum, // max_display_mastering_luminance - min_lum, // min_display_mastering_luminance - MaxCLL, // Maximum Content Light Level (MaxCLL) - MaxFALL, // Maximum Frame-Average Light Level (MaxFALL) - eotf); + data.metadata_type = 7; // ???????????????????????? + weston_hdr_metadata(&data.hdmi_metadata_type1, encode_xyy(cs->r.f[0]), encode_xyy(cs->r.f[1]), + encode_xyy(cs->g.f[0]), encode_xyy(cs->g.f[1]), encode_xyy(cs->b.f[0]), encode_xyy(cs->b.f[1]), + encode_xyy(cs->whitepoint.f[0]), encode_xyy(cs->whitepoint.f[1]), + max_lum, // max_display_mastering_luminance + min_lum, // min_display_mastering_luminance + MaxCLL, // Maximum Content Light Level (MaxCLL) + MaxFALL, // Maximum Frame-Average Light Level (MaxFALL) + eotf); + ret = drmModeCreatePropertyBlob(render->fd_drm, &data, sizeof(data), &render->hdr_blob_id); + if (ret) { + printf("DRM: HDR metadata: failed blob create \n"); + render->hdr_blob_id = 0; + return; + } - - ret = drmModeCreatePropertyBlob(render->fd_drm, &data, sizeof(data), &render->hdr_blob_id); - if (ret) { - printf("DRM: HDR metadata: failed blob create \n"); - render->hdr_blob_id = 0; - return; - } + ret = drmModeConnectorSetProperty(render->fd_drm, render->connector_id, render->hdr_metadata, render->hdr_blob_id); + if (ret) { + printf("DRM: HDR metadata: failed property set %d\n", ret); - ret = drmModeConnectorSetProperty(render->fd_drm, render->connector_id, - render->hdr_metadata, render->hdr_blob_id); - if (ret) { - printf("DRM: HDR metadata: failed property set %d\n",ret); - - if (render->hdr_blob_id) - drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id); - render->hdr_blob_id = 0; - return; - } + if (render->hdr_blob_id) + drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id); + render->hdr_blob_id = 0; + return; + } m_need_modeset = 1; - Debug(3,"DRM: HDR metadata: prop set\n"); - + Debug(3, "DRM: HDR metadata: prop set\n"); } diff --git a/iatomic.h b/iatomic.h index aa62922..4588947 100644 --- a/iatomic.h +++ b/iatomic.h @@ -23,9 +23,7 @@ /// @addtogroup iatomic /// @{ -#define GCC_VERSION (__GNUC__ * 10000 \ - + __GNUC_MINOR__ * 100 \ - + __GNUC_PATCHLEVEL__) +#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) // gcc before 4.7 didn't support atomic builtins, // use alsa atomic functions. @@ -59,38 +57,32 @@ typedef volatile int atomic_t; /// /// Set atomic value. /// -#define atomic_set(ptr, val) \ - __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST) +#define atomic_set(ptr, val) __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST) /// /// Read atomic value. /// -#define atomic_read(ptr) \ - __atomic_load_n(ptr, __ATOMIC_SEQ_CST) +#define atomic_read(ptr) __atomic_load_n(ptr, __ATOMIC_SEQ_CST) /// /// Increment atomic value. /// -#define atomic_inc(ptr) \ - __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) +#define atomic_inc(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) /// /// Decrement atomic value. /// -#define atomic_dec(ptr) \ - __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) +#define atomic_dec(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) /// /// Add to atomic value. /// -#define atomic_add(val, ptr) \ - __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST) +#define atomic_add(val, ptr) __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST) /// /// Subtract from atomic value. /// -#define atomic_sub(val, ptr) \ - __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST) +#define atomic_sub(val, ptr) __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST) #endif diff --git a/misc.h b/misc.h index 1bc8426..aec62e8 100644 --- a/misc.h +++ b/misc.h @@ -24,9 +24,9 @@ /// @addtogroup misc /// @{ -#include #include -#include // clock_gettime +#include +#include // clock_gettime ////////////////////////////////////////////////////////////////////////////// // Defines @@ -40,23 +40,22 @@ // Variables ////////////////////////////////////////////////////////////////////////////// -extern int SysLogLevel; ///< how much information wanted +extern int SysLogLevel; ///< how much information wanted ////////////////////////////////////////////////////////////////////////////// // Prototypes ////////////////////////////////////////////////////////////////////////////// -static inline void Syslog(const int, const char *format, ...) - __attribute__((format(printf, 2, 3))); +static inline void Syslog(const int, const char *format, ...) __attribute__((format(printf, 2, 3))); ////////////////////////////////////////////////////////////////////////////// // Inlines ////////////////////////////////////////////////////////////////////////////// #ifdef DEBUG -#define DebugLevel 4 /// private debug level +#define DebugLevel 4 /// private debug level #else -#define DebugLevel 0 /// private debug level +#define DebugLevel 0 /// private debug level #endif /** @@ -67,8 +66,7 @@ static inline void Syslog(const int, const char *format, ...) ** - 2 info ** - 3 important debug and fixme's */ -static inline void Syslog(const int level, const char *format, ...) -{ +static inline void Syslog(const int level, const char *format, ...) { if (SysLogLevel > level || DebugLevel > level) { va_list ap; @@ -81,30 +79,34 @@ static inline void Syslog(const int level, const char *format, ...) /** ** Show error. */ -#define Error(fmt...) Syslog(LOG_ERR, fmt) +#define Error(fmt...) Syslog(LOG_ERR, fmt) /** ** Show fatal error. */ -#define Fatal(fmt...) do { Error(fmt); abort(); } while (0) +#define Fatal(fmt...) \ + do { \ + Error(fmt); \ + abort(); \ + } while (0) /** ** Show warning. */ -#define Warning(fmt...) Syslog(LOG_WARNING, fmt) +#define Warning(fmt...) Syslog(LOG_WARNING, fmt) /** ** Show info. */ -#define Info(fmt...) Syslog(LOG_INFO, fmt) +#define Info(fmt...) Syslog(LOG_INFO, fmt) /** ** Show debug. */ #ifdef DEBUG -#define Debug(level, fmt...) Syslog(level, fmt) +#define Debug(level, fmt...) Syslog(level, fmt) #else -#define Debug(level, fmt...) /* disabled */ +#define Debug(level, fmt...) /* disabled */ #endif #ifndef AV_NOPTS_VALUE @@ -116,17 +118,16 @@ static inline void Syslog(const int level, const char *format, ...) ** ** @param ts dvb time stamp */ -static inline const char *Timestamp2String(int64_t ts) -{ +static inline const char *Timestamp2String(int64_t ts) { static char buf[4][16]; static int idx; - if (ts == (int64_t) AV_NOPTS_VALUE) { + if (ts == (int64_t)AV_NOPTS_VALUE) { return "--:--:--.---"; } idx = (idx + 1) % 3; snprintf(buf[idx], sizeof(buf[idx]), "%2d:%02d:%02d.%03d", (int)(ts / (90 * 3600000)), - (int)((ts / (90 * 60000)) % 60), (int)((ts / (90 * 1000)) % 60), (int)((ts / 90) % 1000)); + (int)((ts / (90 * 60000)) % 60), (int)((ts / (90 * 1000)) % 60), (int)((ts / 90) % 1000)); return buf[idx]; } @@ -136,8 +137,7 @@ static inline const char *Timestamp2String(int64_t ts) ** ** @returns ticks in ms, */ -static inline uint32_t GetMsTicks(void) -{ +static inline uint32_t GetMsTicks(void) { #ifdef CLOCK_MONOTONIC struct timespec tspec; @@ -153,14 +153,13 @@ static inline uint32_t GetMsTicks(void) #endif } -static inline uint64_t GetusTicks(void) -{ +static inline uint64_t GetusTicks(void) { #ifdef CLOCK_MONOTONIC struct timespec tspec; clock_gettime(CLOCK_MONOTONIC, &tspec); - return (uint64_t) (tspec.tv_sec * 1000000) + (tspec.tv_nsec); + return (uint64_t)(tspec.tv_sec * 1000000) + (tspec.tv_nsec); #else struct timeval tval; diff --git a/openglosd.cpp b/openglosd.cpp index ab0905c..4193f1d 100644 --- a/openglosd.cpp +++ b/openglosd.cpp @@ -1,13 +1,12 @@ #define __STL_CONFIG_H -#include #include "openglosd.h" +#include /**************************************************************************************** -* Helpers -****************************************************************************************/ + * Helpers + ****************************************************************************************/ -void ConvertColor(const GLint & colARGB, glm::vec4 & col) -{ +void ConvertColor(const GLint &colARGB, glm::vec4 &col) { col.a = ((colARGB & 0xFF000000) >> 24) / 255.0; col.r = ((colARGB & 0x00FF0000) >> 16) / 255.0; col.g = ((colARGB & 0x0000FF00) >> 8) / 255.0; @@ -15,8 +14,8 @@ void ConvertColor(const GLint & colARGB, glm::vec4 & col) } /**************************************************************************************** -* cShader -****************************************************************************************/ + * cShader + ****************************************************************************************/ #ifdef CUVID const char *glversion = "#version 330 core "; @@ -87,7 +86,6 @@ void main() \ } \ "; - const char *textVertexShader = "%s\n \ \ layout (location = 0) in vec2 position; \ @@ -126,24 +124,20 @@ void main() \ /// /// GLX check error. /// -#define GlxCheck(void)\ -{\ - GLenum err;\ -\ - if ((err = glGetError()) != GL_NO_ERROR) {\ - esyslog( "video/glx: error %s:%d %d '%s'\n",__FILE__,__LINE__, err, gluErrorString(err));\ - }\ -} +#define GlxCheck(void) \ + { \ + GLenum err; \ + \ + if ((err = glGetError()) != GL_NO_ERROR) { \ + esyslog("video/glx: error %s:%d %d '%s'\n", __FILE__, __LINE__, err, gluErrorString(err)); \ + } \ + } static cShader *Shaders[stCount]; -void cShader::Use(void) -{ - glUseProgram(id); -} +void cShader::Use(void) { glUseProgram(id); } -bool cShader::Load(eShaderType type) -{ +bool cShader::Load(eShaderType type) { this->type = type; const char *vertexCode = NULL; @@ -179,38 +173,27 @@ bool cShader::Load(eShaderType type) return true; } -void cShader::SetFloat(const GLchar * name, GLfloat value) -{ - glUniform1f(glGetUniformLocation(id, name), value); -} +void cShader::SetFloat(const GLchar *name, GLfloat value) { glUniform1f(glGetUniformLocation(id, name), value); } -void cShader::SetInteger(const GLchar * name, GLint value) -{ - glUniform1i(glGetUniformLocation(id, name), value); -} +void cShader::SetInteger(const GLchar *name, GLint value) { glUniform1i(glGetUniformLocation(id, name), value); } -void cShader::SetVector2f(const GLchar * name, GLfloat x, GLfloat y) -{ +void cShader::SetVector2f(const GLchar *name, GLfloat x, GLfloat y) { glUniform2f(glGetUniformLocation(id, name), x, y); } -void cShader::SetVector3f(const GLchar * name, GLfloat x, GLfloat y, GLfloat z) -{ +void cShader::SetVector3f(const GLchar *name, GLfloat x, GLfloat y, GLfloat z) { glUniform3f(glGetUniformLocation(id, name), x, y, z); } -void cShader::SetVector4f(const GLchar * name, GLfloat x, GLfloat y, GLfloat z, GLfloat w) -{ +void cShader::SetVector4f(const GLchar *name, GLfloat x, GLfloat y, GLfloat z, GLfloat w) { glUniform4f(glGetUniformLocation(id, name), x, y, z, w); } -void cShader::SetMatrix4(const GLchar * name, const glm::mat4 & matrix) -{ +void cShader::SetMatrix4(const GLchar *name, const glm::mat4 &matrix) { glUniformMatrix4fv(glGetUniformLocation(id, name), 1, GL_FALSE, glm::value_ptr(matrix)); } -bool cShader::Compile(const char *vertexCode, const char *fragmentCode) -{ +bool cShader::Compile(const char *vertexCode, const char *fragmentCode) { GLuint sVertex, sFragment; char *buffer = (char *)malloc(1000); @@ -241,15 +224,15 @@ bool cShader::Compile(const char *vertexCode, const char *fragmentCode) glLinkProgram(id); if (!CheckCompileErrors(id, true)) return false; - // Delete the shaders as they're linked into our program now and no longer necessery + // Delete the shaders as they're linked into our program now and no longer + // necessery glDeleteShader(sVertex); glDeleteShader(sFragment); free(buffer); return true; } -bool cShader::CheckCompileErrors(GLuint object, bool program) -{ +bool cShader::CheckCompileErrors(GLuint object, bool program) { GLint success; GLchar infoLog[1024]; @@ -271,29 +254,24 @@ bool cShader::CheckCompileErrors(GLuint object, bool program) return true; } -#define KERNING_UNKNOWN (-10000) +#define KERNING_UNKNOWN (-10000) /**************************************************************************************** -* cOglGlyph -****************************************************************************************/ -cOglGlyph::cOglGlyph(FT_ULong charCode, FT_BitmapGlyph ftGlyph) -{ + * cOglGlyph + ****************************************************************************************/ +cOglGlyph::cOglGlyph(FT_ULong charCode, FT_BitmapGlyph ftGlyph) { this->charCode = charCode; bearingLeft = ftGlyph->left; bearingTop = ftGlyph->top; width = ftGlyph->bitmap.width; height = ftGlyph->bitmap.rows; - advanceX = ftGlyph->root.advance.x >> 16; //value in 1/2^16 pixel + advanceX = ftGlyph->root.advance.x >> 16; // value in 1/2^16 pixel LoadTexture(ftGlyph); } -cOglGlyph::~cOglGlyph(void) -{ +cOglGlyph::~cOglGlyph(void) {} -} - -int cOglGlyph::GetKerningCache(FT_ULong prevSym) -{ +int cOglGlyph::GetKerningCache(FT_ULong prevSym) { for (int i = kerningCache.Size(); --i > 0;) { if (kerningCache[i].prevSym == prevSym) return kerningCache[i].kerning; @@ -301,25 +279,18 @@ int cOglGlyph::GetKerningCache(FT_ULong prevSym) return KERNING_UNKNOWN; } -void cOglGlyph::SetKerningCache(FT_ULong prevSym, int kerning) -{ - kerningCache.Append(tKerning(prevSym, kerning)); -} +void cOglGlyph::SetKerningCache(FT_ULong prevSym, int kerning) { kerningCache.Append(tKerning(prevSym, kerning)); } -void cOglGlyph::BindTexture(void) -{ - glBindTexture(GL_TEXTURE_2D, texture); -} +void cOglGlyph::BindTexture(void) { glBindTexture(GL_TEXTURE_2D, texture); } -void cOglGlyph::LoadTexture(FT_BitmapGlyph ftGlyph) -{ +void cOglGlyph::LoadTexture(FT_BitmapGlyph ftGlyph) { // Disable byte-alignment restriction glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, ftGlyph->bitmap.width, ftGlyph->bitmap.rows, 0, GL_RED, GL_UNSIGNED_BYTE, - ftGlyph->bitmap.buffer); + ftGlyph->bitmap.buffer); // Set texture options glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); @@ -327,24 +298,22 @@ void cOglGlyph::LoadTexture(FT_BitmapGlyph ftGlyph) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glBindTexture(GL_TEXTURE_2D, 0); glPixelStorei(GL_UNPACK_ALIGNMENT, 4); - } extern "C" void GlxInitopengl(); extern "C" void GlxDrawopengl(); extern "C" void GlxDestroy(); -extern "C" void makejpg(uint8_t * data, int width, int height); +extern "C" void makejpg(uint8_t *data, int width, int height); /**************************************************************************************** -* cOglFont -****************************************************************************************/ + * cOglFont + ****************************************************************************************/ FT_Library cOglFont::ftLib = 0; -cList < cOglFont > *cOglFont::fonts = 0; +cList *cOglFont::fonts = 0; bool cOglFont::initiated = false; -cOglFont::cOglFont(const char *fontName, int charHeight):name(fontName) -{ +cOglFont::cOglFont(const char *fontName, int charHeight) : name(fontName) { size = charHeight; height = 0; bottom = 0; @@ -359,13 +328,9 @@ cOglFont::cOglFont(const char *fontName, int charHeight):name(fontName) bottom = abs((face->size->metrics.descender - 63) / 64); } -cOglFont::~cOglFont(void) -{ - FT_Done_Face(face); -} +cOglFont::~cOglFont(void) { FT_Done_Face(face); } -cOglFont *cOglFont::Get(const char *name, int charHeight) -{ +cOglFont *cOglFont::Get(const char *name, int charHeight) { if (!fonts) Init(); @@ -380,39 +345,36 @@ cOglFont *cOglFont::Get(const char *name, int charHeight) return font; } -void cOglFont::Init(void) -{ +void cOglFont::Init(void) { if (FT_Init_FreeType(&ftLib)) { esyslog("[softhddev]failed to initialize FreeType library!"); return; } - fonts = new cList < cOglFont >; + fonts = new cList; initiated = true; } -void cOglFont::Cleanup(void) -{ +void cOglFont::Cleanup(void) { if (!initiated) return; delete fonts; fonts = 0; - if (ftLib) { - if (FT_Done_FreeType(ftLib)) - esyslog("failed to deinitialize FreeType library!"); - } - ftLib = 0; + if (ftLib) { + if (FT_Done_FreeType(ftLib)) + esyslog("failed to deinitialize FreeType library!"); + } + ftLib = 0; } -cOglGlyph *cOglFont::Glyph(FT_ULong charCode) const -{ +cOglGlyph *cOglFont::Glyph(FT_ULong charCode) const { // Non-breaking space: if (charCode == 0xA0) charCode = 0x20; // Lookup in cache: - for (cOglGlyph * g = glyphCache.First(); g; g = glyphCache.Next(g)) { + for (cOglGlyph *g = glyphCache.First(); g; g = glyphCache.Next(g)) { if (g->CharCode() == charCode) { return g; } @@ -451,7 +413,7 @@ cOglGlyph *cOglFont::Glyph(FT_ULong charCode) const error = FT_Glyph_StrokeBorder(&ftGlyph, stroker, 0, 1); if (error) { esyslog("[softhddev]FT_Glyph_StrokeBorder FT_Error (0x%02x) : %s\n", FT_Errors[error].code, - FT_Errors[error].message); + FT_Errors[error].message); return NULL; } FT_Stroker_Done(stroker); @@ -459,11 +421,11 @@ cOglGlyph *cOglFont::Glyph(FT_ULong charCode) const error = FT_Glyph_To_Bitmap(&ftGlyph, FT_RENDER_MODE_NORMAL, 0, 1); if (error) { esyslog("[softhddev]FT_Glyph_To_Bitmap FT_Error (0x%02x) : %s\n", FT_Errors[error].code, - FT_Errors[error].message); + FT_Errors[error].message); return NULL; } - cOglGlyph *Glyph = new cOglGlyph(charCode, (FT_BitmapGlyph) ftGlyph); + cOglGlyph *Glyph = new cOglGlyph(charCode, (FT_BitmapGlyph)ftGlyph); glyphCache.Add(Glyph); FT_Done_Glyph(ftGlyph); @@ -471,8 +433,7 @@ cOglGlyph *cOglFont::Glyph(FT_ULong charCode) const return Glyph; } -int cOglFont::Kerning(cOglGlyph * glyph, FT_ULong prevSym) const -{ +int cOglFont::Kerning(cOglGlyph *glyph, FT_ULong prevSym) const { int kerning = 0; if (glyph && prevSym) { @@ -491,10 +452,9 @@ int cOglFont::Kerning(cOglGlyph * glyph, FT_ULong prevSym) const } /**************************************************************************************** -* cOglFb -****************************************************************************************/ -cOglFb::cOglFb(GLint width, GLint height, GLint viewPortWidth, GLint viewPortHeight) -{ + * cOglFb + ****************************************************************************************/ +cOglFb::cOglFb(GLint width, GLint height, GLint viewPortWidth, GLint viewPortHeight) { initiated = false; fb = 0; texture = 0; @@ -509,16 +469,14 @@ cOglFb::cOglFb(GLint width, GLint height, GLint viewPortWidth, GLint viewPortHei scrollable = false; } -cOglFb::~cOglFb(void) -{ +cOglFb::~cOglFb(void) { if (texture) glDeleteTextures(1, &texture); if (fb) glDeleteFramebuffers(1, &fb); } -bool cOglFb::Init(void) -{ +bool cOglFb::Init(void) { initiated = true; glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); @@ -542,64 +500,51 @@ bool cOglFb::Init(void) return true; } -void cOglFb::Bind(void) -{ +void cOglFb::Bind(void) { if (!initiated) Init(); glViewport(0, 0, width, height); glBindFramebuffer(GL_FRAMEBUFFER, fb); } -void cOglFb::BindRead(void) -{ - glBindFramebuffer(GL_READ_FRAMEBUFFER, fb); -} +void cOglFb::BindRead(void) { glBindFramebuffer(GL_READ_FRAMEBUFFER, fb); } -void cOglFb::BindWrite(void) -{ - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fb); -} +void cOglFb::BindWrite(void) { glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fb); } -void cOglFb::Unbind(void) -{ +void cOglFb::Unbind(void) { glBindFramebuffer(GL_FRAMEBUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); } -bool cOglFb::BindTexture(void) -{ +bool cOglFb::BindTexture(void) { if (!initiated) return false; glBindTexture(GL_TEXTURE_2D, texture); return true; } -void cOglFb::Blit(GLint destX1, GLint destY1, GLint destX2, GLint destY2) -{ +void cOglFb::Blit(GLint destX1, GLint destY1, GLint destX2, GLint destY2) { glBlitFramebuffer(0, 0, width, height, destX1, destY1, destX2, destY2, GL_COLOR_BUFFER_BIT, GL_NEAREST); glFlush(); } /**************************************************************************************** -* cOglOutputFb -****************************************************************************************/ -cOglOutputFb::cOglOutputFb(GLint width, GLint height):cOglFb(width, height, width, height) -{ + * cOglOutputFb + ****************************************************************************************/ +cOglOutputFb::cOglOutputFb(GLint width, GLint height) : cOglFb(width, height, width, height) { // surface = 0; initiated = false; fb = 0; texture = 0; } -cOglOutputFb::~cOglOutputFb(void) -{ +cOglOutputFb::~cOglOutputFb(void) { // glVDPAUUnregisterSurfaceNV(surface); glDeleteTextures(1, &texture); glDeleteFramebuffers(1, &fb); } -bool cOglOutputFb::Init(void) -{ +bool cOglOutputFb::Init(void) { initiated = true; glGenTextures(1, &texture); @@ -620,26 +565,21 @@ bool cOglOutputFb::Init(void) return true; } -void cOglOutputFb::BindWrite(void) -{ +void cOglOutputFb::BindWrite(void) { if (!initiated) Init(); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fb); } -void cOglOutputFb::Unbind(void) -{ - glBindFramebuffer(GL_FRAMEBUFFER, 0); -} +void cOglOutputFb::Unbind(void) { glBindFramebuffer(GL_FRAMEBUFFER, 0); } /**************************************************************************************** -* cOglVb -****************************************************************************************/ + * cOglVb + ****************************************************************************************/ static cOglVb *VertexBuffers[vbCount]; -cOglVb::cOglVb(int type) -{ - this->type = (eVertexBufferType) type; +cOglVb::cOglVb(int type) { + this->type = (eVertexBufferType)type; vao = 0; vbo = 0; sizeVertex1 = 0; @@ -648,12 +588,9 @@ cOglVb::cOglVb(int type) drawMode = 0; } -cOglVb::~cOglVb(void) -{ -} +cOglVb::~cOglVb(void) {} -bool cOglVb::Init(void) -{ +bool cOglVb::Init(void) { if (type == vbTexture) { // Texture VBO definition @@ -700,11 +637,11 @@ bool cOglVb::Init(void) glEnableVertexAttribArray(0); glVertexAttribPointer(0, sizeVertex1, GL_FLOAT, GL_FALSE, (sizeVertex1 + sizeVertex2) * sizeof(GLfloat), - (GLvoid *) 0); + (GLvoid *)0); if (sizeVertex2 > 0) { glEnableVertexAttribArray(1); glVertexAttribPointer(1, sizeVertex2, GL_FLOAT, GL_FALSE, (sizeVertex1 + sizeVertex2) * sizeof(GLfloat), - (GLvoid *) (sizeVertex1 * sizeof(GLfloat))); + (GLvoid *)(sizeVertex1 * sizeof(GLfloat))); } glBindBuffer(GL_ARRAY_BUFFER, 0); @@ -713,52 +650,35 @@ bool cOglVb::Init(void) return true; } -void cOglVb::Bind(void) -{ - glBindVertexArray(vao); -} +void cOglVb::Bind(void) { glBindVertexArray(vao); } -void cOglVb::Unbind(void) -{ - glBindVertexArray(0); -} +void cOglVb::Unbind(void) { glBindVertexArray(0); } -void cOglVb::ActivateShader(void) -{ - Shaders[shader]->Use(); -} +void cOglVb::ActivateShader(void) { Shaders[shader]->Use(); } -void cOglVb::EnableBlending(void) -{ +void cOglVb::EnableBlending(void) { glEnable(GL_BLEND); glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA); } -void cOglVb::DisableBlending(void) -{ - glDisable(GL_BLEND); -} +void cOglVb::DisableBlending(void) { glDisable(GL_BLEND); } -void cOglVb::SetShaderColor(GLint color) -{ +void cOglVb::SetShaderColor(GLint color) { glm::vec4 col; ConvertColor(color, col); Shaders[shader]->SetVector4f("inColor", col.r, col.g, col.b, col.a); } -void cOglVb::SetShaderAlpha(GLint alpha) -{ - Shaders[shader]->SetVector4f("alpha", 1.0f, 1.0f, 1.0f, (GLfloat) (alpha) / 255.0f); +void cOglVb::SetShaderAlpha(GLint alpha) { + Shaders[shader]->SetVector4f("alpha", 1.0f, 1.0f, 1.0f, (GLfloat)(alpha) / 255.0f); } -void cOglVb::SetShaderProjectionMatrix(GLint width, GLint height) -{ - glm::mat4 projection = glm::ortho(0.0f, (GLfloat) width, (GLfloat) height, 0.0f, -1.0f, 1.0f); +void cOglVb::SetShaderProjectionMatrix(GLint width, GLint height) { + glm::mat4 projection = glm::ortho(0.0f, (GLfloat)width, (GLfloat)height, 0.0f, -1.0f, 1.0f); Shaders[shader]->SetMatrix4("projection", projection); } -void cOglVb::SetVertexData(GLfloat * vertices, int count) -{ +void cOglVb::SetVertexData(GLfloat *vertices, int count) { if (count == 0) count = numVertices; glBindBuffer(GL_ARRAY_BUFFER, vbo); @@ -766,8 +686,7 @@ void cOglVb::SetVertexData(GLfloat * vertices, int count) glBindBuffer(GL_ARRAY_BUFFER, 0); } -void cOglVb::DrawArrays(int count) -{ +void cOglVb::DrawArrays(int count) { if (count == 0) count = numVertices; glDrawArrays(drawMode, 0, count); @@ -775,16 +694,12 @@ void cOglVb::DrawArrays(int count) } /**************************************************************************************** -* cOpenGLCmd -****************************************************************************************/ + * cOpenGLCmd + ****************************************************************************************/ //------------------ cOglCmdInitOutputFb -------------------- -cOglCmdInitOutputFb::cOglCmdInitOutputFb(cOglOutputFb * oFb):cOglCmd(NULL) -{ - this->oFb = oFb; -} +cOglCmdInitOutputFb::cOglCmdInitOutputFb(cOglOutputFb *oFb) : cOglCmd(NULL) { this->oFb = oFb; } -bool cOglCmdInitOutputFb::Execute(void) -{ +bool cOglCmdInitOutputFb::Execute(void) { bool ok = oFb->Init(); oFb->Unbind(); @@ -792,13 +707,9 @@ bool cOglCmdInitOutputFb::Execute(void) } //------------------ cOglCmdInitFb -------------------- -cOglCmdInitFb::cOglCmdInitFb(cOglFb * fb, cCondWait * wait):cOglCmd(fb) -{ - this->wait = wait; -} +cOglCmdInitFb::cOglCmdInitFb(cOglFb *fb, cCondWait *wait) : cOglCmd(fb) { this->wait = wait; } -bool cOglCmdInitFb::Execute(void) -{ +bool cOglCmdInitFb::Execute(void) { bool ok = fb->Init(); fb->Unbind(); @@ -808,12 +719,9 @@ bool cOglCmdInitFb::Execute(void) } //------------------ cOglCmdDeleteFb -------------------- -cOglCmdDeleteFb::cOglCmdDeleteFb(cOglFb * fb):cOglCmd(fb) -{ -} +cOglCmdDeleteFb::cOglCmdDeleteFb(cOglFb *fb) : cOglCmd(fb) {} -bool cOglCmdDeleteFb::Execute(void) -{ +bool cOglCmdDeleteFb::Execute(void) { if (fb) delete fb; @@ -821,46 +729,44 @@ bool cOglCmdDeleteFb::Execute(void) } //------------------ cOglCmdRenderFbToBufferFb -------------------- -cOglCmdRenderFbToBufferFb::cOglCmdRenderFbToBufferFb(cOglFb * fb, cOglFb * buffer, GLint x, GLint y, GLint transparency, GLint drawPortX, GLint drawPortY):cOglCmd - (fb) -{ +cOglCmdRenderFbToBufferFb::cOglCmdRenderFbToBufferFb(cOglFb *fb, cOglFb *buffer, GLint x, GLint y, GLint transparency, + GLint drawPortX, GLint drawPortY) + : cOglCmd(fb) { this->buffer = buffer; - this->x = (GLfloat) x; - this->y = (GLfloat) y; - this->drawPortX = (GLfloat) drawPortX; - this->drawPortY = (GLfloat) drawPortY; + this->x = (GLfloat)x; + this->y = (GLfloat)y; + this->drawPortX = (GLfloat)drawPortX; + this->drawPortY = (GLfloat)drawPortY; this->transparency = transparency; - } -bool cOglCmdRenderFbToBufferFb::Execute(void) -{ - GLfloat x2 = x + fb->ViewportWidth(); //right - GLfloat y2 = y + fb->ViewportHeight(); //bottom +bool cOglCmdRenderFbToBufferFb::Execute(void) { + GLfloat x2 = x + fb->ViewportWidth(); // right + GLfloat y2 = y + fb->ViewportHeight(); // bottom - GLfloat texX1 = drawPortX / (GLfloat) fb->Width(); - GLfloat texY1 = drawPortY / (GLfloat) fb->Height(); + GLfloat texX1 = drawPortX / (GLfloat)fb->Width(); + GLfloat texY1 = drawPortY / (GLfloat)fb->Height(); GLfloat texX2 = texX1 + 1.0f; GLfloat texY2 = texY1 + 1.0f; if (fb->Scrollable()) { - GLfloat pageHeight = (GLfloat) fb->ViewportHeight() / (GLfloat) fb->Height(); + GLfloat pageHeight = (GLfloat)fb->ViewportHeight() / (GLfloat)fb->Height(); - texX1 = abs(drawPortX) / (GLfloat) fb->Width(); - texY1 = 1.0f - pageHeight - abs(drawPortY) / (GLfloat) fb->Height(); - texX2 = texX1 + (GLfloat) fb->ViewportWidth() / (GLfloat) fb->Width(); -// x2 = x + fb->Width(); + texX1 = abs(drawPortX) / (GLfloat)fb->Width(); + texY1 = 1.0f - pageHeight - abs(drawPortY) / (GLfloat)fb->Height(); + texX2 = texX1 + (GLfloat)fb->ViewportWidth() / (GLfloat)fb->Width(); + // x2 = x + fb->Width(); texY2 = texY1 + pageHeight; } GLfloat quadVertices[] = { - // Pos // TexCoords - x, y, texX1, texY2, //left top - x, y2, texX1, texY1, //left bottom - x2, y2, texX2, texY1, //right bottom + // Pos // TexCoords + x, y, texX1, texY2, // left top + x, y2, texX1, texY1, // left bottom + x2, y2, texX2, texY1, // right bottom - x, y, texX1, texY2, //left top - x2, y2, texX2, texY1, //right bottom - x2, y, texX2, texY2 //right top + x, y, texX1, texY2, // left top + x2, y2, texX2, texY1, // right bottom + x2, y, texX2, texY2 // right top }; VertexBuffers[vbTexture]->ActivateShader(); @@ -882,8 +788,8 @@ bool cOglCmdRenderFbToBufferFb::Execute(void) } //------------------ cOglCmdCopyBufferToOutputFb -------------------- -cOglCmdCopyBufferToOutputFb::cOglCmdCopyBufferToOutputFb(cOglFb * fb, cOglOutputFb * oFb, GLint x, GLint y):cOglCmd(fb) -{ +cOglCmdCopyBufferToOutputFb::cOglCmdCopyBufferToOutputFb(cOglFb *fb, cOglOutputFb *oFb, GLint x, GLint y) + : cOglCmd(fb) { this->oFb = oFb; this->x = x; this->y = y; @@ -891,8 +797,7 @@ cOglCmdCopyBufferToOutputFb::cOglCmdCopyBufferToOutputFb(cOglFb * fb, cOglOutput extern unsigned char *posd; -bool cOglCmdCopyBufferToOutputFb::Execute(void) -{ +bool cOglCmdCopyBufferToOutputFb::Execute(void) { pthread_mutex_lock(&OSDMutex); fb->BindRead(); @@ -909,13 +814,9 @@ bool cOglCmdCopyBufferToOutputFb::Execute(void) } //------------------ cOglCmdFill -------------------- -cOglCmdFill::cOglCmdFill(cOglFb * fb, GLint color):cOglCmd(fb) -{ - this->color = color; -} +cOglCmdFill::cOglCmdFill(cOglFb *fb, GLint color) : cOglCmd(fb) { this->color = color; } -bool cOglCmdFill::Execute(void) -{ +bool cOglCmdFill::Execute(void) { glm::vec4 col; ConvertColor(color, col); fb->Bind(); @@ -926,9 +827,8 @@ bool cOglCmdFill::Execute(void) } //------------------ cOglCmdDrawRectangle -------------------- -cOglCmdDrawRectangle::cOglCmdDrawRectangle(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color):cOglCmd - (fb) -{ +cOglCmdDrawRectangle::cOglCmdDrawRectangle(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color) + : cOglCmd(fb) { this->x = x; this->y = y; this->width = width; @@ -936,18 +836,17 @@ cOglCmdDrawRectangle::cOglCmdDrawRectangle(cOglFb * fb, GLint x, GLint y, GLint this->color = color; } -bool cOglCmdDrawRectangle::Execute(void) -{ +bool cOglCmdDrawRectangle::Execute(void) { GLfloat x1 = x; GLfloat y1 = y; GLfloat x2 = x + width; GLfloat y2 = y + height; GLfloat vertices[] = { - x1, y1, //left top - x2, y1, //right top - x2, y2, //right bottom - x1, y2 //left bottom + x1, y1, // left top + x2, y1, // right top + x2, y2, // right bottom + x1, y2 // left bottom }; VertexBuffers[vbRect]->ActivateShader(); @@ -967,14 +866,14 @@ bool cOglCmdDrawRectangle::Execute(void) } //------------------ cOglCmdDrawEllipse -------------------- -///quadrants: -///< 0 draws the entire ellipse -///< 1..4 draws only the first, second, third or fourth quadrant, respectively -///< 5..8 draws the right, top, left or bottom half, respectively -///< -1..-4 draws the inverted part of the given quadrant -cOglCmdDrawEllipse::cOglCmdDrawEllipse(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint quadrants):cOglCmd - (fb) -{ +/// quadrants: +///< 0 draws the entire ellipse +///< 1..4 draws only the first, second, third or fourth quadrant, +///< respectively 5..8 draws the right, top, left or bottom half, +///< respectively -1..-4 draws the inverted part of the given quadrant +cOglCmdDrawEllipse::cOglCmdDrawEllipse(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color, + GLint quadrants) + : cOglCmd(fb) { this->x = x; this->y = y; this->width = width; @@ -983,8 +882,7 @@ cOglCmdDrawEllipse::cOglCmdDrawEllipse(cOglFb * fb, GLint x, GLint y, GLint widt this->quadrants = quadrants; } -bool cOglCmdDrawEllipse::Execute(void) -{ +bool cOglCmdDrawEllipse::Execute(void) { int numVertices = 0; GLfloat *vertices = NULL; @@ -1026,35 +924,33 @@ bool cOglCmdDrawEllipse::Execute(void) VertexBuffers[vbEllipse]->EnableBlending(); fb->Unbind(); - delete[]vertices; + delete[] vertices; return true; } -GLfloat *cOglCmdDrawEllipse::CreateVerticesFull(int &numVertices) -{ +GLfloat *cOglCmdDrawEllipse::CreateVerticesFull(int &numVertices) { int size = 364; numVertices = size / 2; - GLfloat radiusX = (GLfloat) width / 2; - GLfloat radiusY = (GLfloat) height / 2; + GLfloat radiusX = (GLfloat)width / 2; + GLfloat radiusY = (GLfloat)height / 2; GLfloat *vertices = new GLfloat[size]; vertices[0] = x + radiusX; vertices[1] = y + radiusY; for (int i = 0; i <= 180; i++) { - vertices[2 * i + 2] = x + radiusX + (GLfloat) cos(2 * i * M_PI / 180.0f) * radiusX; - vertices[2 * i + 3] = y + radiusY - (GLfloat) sin(2 * i * M_PI / 180.0f) * radiusY; + vertices[2 * i + 2] = x + radiusX + (GLfloat)cos(2 * i * M_PI / 180.0f) * radiusX; + vertices[2 * i + 3] = y + radiusY - (GLfloat)sin(2 * i * M_PI / 180.0f) * radiusY; } return vertices; } -GLfloat *cOglCmdDrawEllipse::CreateVerticesQuadrant(int &numVertices) -{ +GLfloat *cOglCmdDrawEllipse::CreateVerticesQuadrant(int &numVertices) { int size = 94; numVertices = size / 2; - GLfloat radiusX = (GLfloat) width; - GLfloat radiusY = (GLfloat) height; + GLfloat radiusX = (GLfloat)width; + GLfloat radiusY = (GLfloat)height; GLint transX = 0; GLint transY = 0; GLint startAngle = 0; @@ -1111,14 +1007,13 @@ GLfloat *cOglCmdDrawEllipse::CreateVerticesQuadrant(int &numVertices) break; } for (int i = 0; i <= 45; i++) { - vertices[2 * i + 2] = x + transX + (GLfloat) cos((2 * i + startAngle) * M_PI / 180.0f) * radiusX; - vertices[2 * i + 3] = y + transY - (GLfloat) sin((2 * i + startAngle) * M_PI / 180.0f) * radiusY; + vertices[2 * i + 2] = x + transX + (GLfloat)cos((2 * i + startAngle) * M_PI / 180.0f) * radiusX; + vertices[2 * i + 3] = y + transY - (GLfloat)sin((2 * i + startAngle) * M_PI / 180.0f) * radiusY; } return vertices; } -GLfloat *cOglCmdDrawEllipse::CreateVerticesHalf(int &numVertices) -{ +GLfloat *cOglCmdDrawEllipse::CreateVerticesHalf(int &numVertices) { int size = 184; numVertices = size / 2; @@ -1131,16 +1026,16 @@ GLfloat *cOglCmdDrawEllipse::CreateVerticesHalf(int &numVertices) switch (quadrants) { case 5: - radiusX = (GLfloat) width; - radiusY = (GLfloat) height / 2; + radiusX = (GLfloat)width; + radiusY = (GLfloat)height / 2; vertices[0] = x; vertices[1] = y + radiusY; startAngle = 270; transY = radiusY; break; case 6: - radiusX = (GLfloat) width / 2; - radiusY = (GLfloat) height; + radiusX = (GLfloat)width / 2; + radiusY = (GLfloat)height; vertices[0] = x + radiusX; vertices[1] = y + radiusY; startAngle = 0; @@ -1148,8 +1043,8 @@ GLfloat *cOglCmdDrawEllipse::CreateVerticesHalf(int &numVertices) transY = radiusY; break; case 7: - radiusX = (GLfloat) width; - radiusY = (GLfloat) height / 2; + radiusX = (GLfloat)width; + radiusY = (GLfloat)height / 2; vertices[0] = x + radiusX; vertices[1] = y + radiusY; startAngle = 90; @@ -1157,8 +1052,8 @@ GLfloat *cOglCmdDrawEllipse::CreateVerticesHalf(int &numVertices) transY = radiusY; break; case 8: - radiusX = (GLfloat) width / 2; - radiusY = (GLfloat) height; + radiusX = (GLfloat)width / 2; + radiusY = (GLfloat)height; vertices[0] = x + radiusX; vertices[1] = y; startAngle = 180; @@ -1168,14 +1063,14 @@ GLfloat *cOglCmdDrawEllipse::CreateVerticesHalf(int &numVertices) break; } for (int i = 0; i <= 90; i++) { - vertices[2 * i + 2] = x + transX + (GLfloat) cos((2 * i + startAngle) * M_PI / 180.0f) * radiusX; - vertices[2 * i + 3] = y + transY - (GLfloat) sin((2 * i + startAngle) * M_PI / 180.0f) * radiusY; + vertices[2 * i + 2] = x + transX + (GLfloat)cos((2 * i + startAngle) * M_PI / 180.0f) * radiusX; + vertices[2 * i + 3] = y + transY - (GLfloat)sin((2 * i + startAngle) * M_PI / 180.0f) * radiusY; } return vertices; } //------------------ cOglCmdDrawSlope -------------------- -///type: +/// type: ///< 0: horizontal, rising, lower ///< 1: horizontal, rising, upper ///< 2: horizontal, falling, lower @@ -1184,9 +1079,8 @@ GLfloat *cOglCmdDrawEllipse::CreateVerticesHalf(int &numVertices) ///< 5: vertical, rising, upper ///< 6: vertical, falling, lower ///< 7: vertical, falling, upper -cOglCmdDrawSlope::cOglCmdDrawSlope(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint type):cOglCmd - (fb) -{ +cOglCmdDrawSlope::cOglCmdDrawSlope(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint type) + : cOglCmd(fb) { this->x = x; this->y = y; this->width = width; @@ -1195,8 +1089,7 @@ cOglCmdDrawSlope::cOglCmdDrawSlope(cOglFb * fb, GLint x, GLint y, GLint width, G this->type = type; } -bool cOglCmdDrawSlope::Execute(void) -{ +bool cOglCmdDrawSlope::Execute(void) { bool falling = type & 0x02; bool vertical = type & 0x04; @@ -1210,27 +1103,27 @@ bool cOglCmdDrawSlope::Execute(void) switch (type) { case 0: case 4: - vertices[0] = (GLfloat) (x + width); - vertices[1] = (GLfloat) (y + height); + vertices[0] = (GLfloat)(x + width); + vertices[1] = (GLfloat)(y + height); break; case 1: case 5: - vertices[0] = (GLfloat) x; - vertices[1] = (GLfloat) y; + vertices[0] = (GLfloat)x; + vertices[1] = (GLfloat)y; break; case 2: case 6: - vertices[0] = (GLfloat) x; - vertices[1] = (GLfloat) (y + height); + vertices[0] = (GLfloat)x; + vertices[1] = (GLfloat)(y + height); break; case 3: case 7: - vertices[0] = (GLfloat) (x + width); - vertices[1] = (GLfloat) y; + vertices[0] = (GLfloat)(x + width); + vertices[1] = (GLfloat)y; break; default: - vertices[0] = (GLfloat) (x); - vertices[1] = (GLfloat) (y); + vertices[0] = (GLfloat)(x); + vertices[1] = (GLfloat)(y); break; } @@ -1240,11 +1133,11 @@ bool cOglCmdDrawSlope::Execute(void) if (falling) c = -c; if (vertical) { - vertices[2 * i + 2] = (GLfloat) x + (GLfloat) width / 2.0f + (GLfloat) width *c / 2.0f; - vertices[2 * i + 3] = (GLfloat) y + (GLfloat) i *((GLfloat) height) / steps; + vertices[2 * i + 2] = (GLfloat)x + (GLfloat)width / 2.0f + (GLfloat)width * c / 2.0f; + vertices[2 * i + 3] = (GLfloat)y + (GLfloat)i * ((GLfloat)height) / steps; } else { - vertices[2 * i + 2] = (GLfloat) x + (GLfloat) i *((GLfloat) width) / steps; - vertices[2 * i + 3] = (GLfloat) y + (GLfloat) height / 2.0f + (GLfloat) height *c / 2.0f; + vertices[2 * i + 2] = (GLfloat)x + (GLfloat)i * ((GLfloat)width) / steps; + vertices[2 * i + 3] = (GLfloat)y + (GLfloat)height / 2.0f + (GLfloat)height * c / 2.0f; } } @@ -1262,14 +1155,14 @@ bool cOglCmdDrawSlope::Execute(void) VertexBuffers[vbSlope]->EnableBlending(); fb->Unbind(); - delete[]vertices; + delete[] vertices; return true; } //------------------ cOglCmdDrawText -------------------- -cOglCmdDrawText::cOglCmdDrawText(cOglFb * fb, GLint x, GLint y, unsigned int *symbols, GLint limitX, const char *name, - int fontSize, tColor colorText):cOglCmd(fb), fontName(name) -{ +cOglCmdDrawText::cOglCmdDrawText(cOglFb *fb, GLint x, GLint y, unsigned int *symbols, GLint limitX, const char *name, + int fontSize, tColor colorText) + : cOglCmd(fb), fontName(name) { this->x = x; this->y = y; this->limitX = limitX; @@ -1279,13 +1172,9 @@ cOglCmdDrawText::cOglCmdDrawText(cOglFb * fb, GLint x, GLint y, unsigned int *sy this->fontName = name; } -cOglCmdDrawText::~cOglCmdDrawText(void) -{ - free(symbols); -} +cOglCmdDrawText::~cOglCmdDrawText(void) { free(symbols); } -bool cOglCmdDrawText::Execute(void) -{ +bool cOglCmdDrawText::Execute(void) { cOglFont *f = cOglFont::Get(*fontName, fontSize); if (!f) @@ -1320,19 +1209,19 @@ bool cOglCmdDrawText::Execute(void) kerning = f->Kerning(g, prevSym); prevSym = sym; - GLfloat x1 = xGlyph + kerning + g->BearingLeft(); //left - GLfloat y1 = y + (fontHeight - bottom - g->BearingTop()); //top - GLfloat x2 = x1 + g->Width(); //right - GLfloat y2 = y1 + g->Height(); //bottom + GLfloat x1 = xGlyph + kerning + g->BearingLeft(); // left + GLfloat y1 = y + (fontHeight - bottom - g->BearingTop()); // top + GLfloat x2 = x1 + g->Width(); // right + GLfloat y2 = y1 + g->Height(); // bottom GLfloat vertices[] = { - x1, y2, 0.0, 1.0, // left bottom - x1, y1, 0.0, 0.0, // left top - x2, y1, 1.0, 0.0, // right top + x1, y2, 0.0, 1.0, // left bottom + x1, y1, 0.0, 0.0, // left top + x2, y1, 1.0, 0.0, // right top - x1, y2, 0.0, 1.0, // left bottom - x2, y1, 1.0, 0.0, // right top - x2, y2, 1.0, 1.0 // right bottom + x1, y2, 0.0, 1.0, // left bottom + x2, y1, 1.0, 0.0, // right top + x2, y2, 1.0, 1.0 // right bottom }; g->BindTexture(); @@ -1351,9 +1240,9 @@ bool cOglCmdDrawText::Execute(void) } //------------------ cOglCmdDrawImage -------------------- -cOglCmdDrawImage::cOglCmdDrawImage(cOglFb * fb, tColor * argb, GLint width, GLint height, GLint x, GLint y, - bool overlay, double scaleX, double scaleY):cOglCmd(fb) -{ +cOglCmdDrawImage::cOglCmdDrawImage(cOglFb *fb, tColor *argb, GLint width, GLint height, GLint x, GLint y, bool overlay, + double scaleX, double scaleY) + : cOglCmd(fb) { this->argb = argb; this->x = x; this->y = y; @@ -1362,21 +1251,16 @@ cOglCmdDrawImage::cOglCmdDrawImage(cOglFb * fb, tColor * argb, GLint width, GLin this->overlay = overlay; this->scaleX = scaleX; this->scaleY = scaleY; - } -cOglCmdDrawImage::~cOglCmdDrawImage(void) -{ - free(argb); -} +cOglCmdDrawImage::~cOglCmdDrawImage(void) { free(argb); } -bool cOglCmdDrawImage::Execute(void) -{ +bool cOglCmdDrawImage::Execute(void) { GLuint texture; #ifdef USE_DRM -// pthread_mutex_lock(&OSDMutex); - GlxDrawopengl(); // here we need the Shared Context for upload + // pthread_mutex_lock(&OSDMutex); + GlxDrawopengl(); // here we need the Shared Context for upload GlxCheck(); #endif glGenTextures(1, &texture); @@ -1389,24 +1273,24 @@ bool cOglCmdDrawImage::Execute(void) glBindTexture(GL_TEXTURE_2D, 0); glFlush(); #ifdef USE_DRM - GlxInitopengl(); // Reset Context + GlxInitopengl(); // Reset Context GlxCheck(); // pthread_mutex_unlock(&OSDMutex); #endif - GLfloat x1 = x; //left - GLfloat y1 = y; //top - GLfloat x2 = x + width; //right - GLfloat y2 = y + height; //bottom + GLfloat x1 = x; // left + GLfloat y1 = y; // top + GLfloat x2 = x + width; // right + GLfloat y2 = y + height; // bottom GLfloat quadVertices[] = { - x1, y2, 0.0, 1.0, // left bottom - x1, y1, 0.0, 0.0, // left top - x2, y1, 1.0, 0.0, // right top + x1, y2, 0.0, 1.0, // left bottom + x1, y1, 0.0, 0.0, // left top + x2, y1, 1.0, 0.0, // right top - x1, y2, 0.0, 1.0, // left bottom - x2, y1, 1.0, 0.0, // right top - x2, y2, 1.0, 1.0 // right bottom + x1, y2, 0.0, 1.0, // left bottom + x2, y1, 1.0, 0.0, // right top + x2, y2, 1.0, 1.0 // right bottom }; VertexBuffers[vbTexture]->ActivateShader(); @@ -1431,29 +1315,27 @@ bool cOglCmdDrawImage::Execute(void) } //------------------ cOglCmdDrawTexture -------------------- -cOglCmdDrawTexture::cOglCmdDrawTexture(cOglFb * fb, sOglImage * imageRef, GLint x, GLint y):cOglCmd(fb) -{ +cOglCmdDrawTexture::cOglCmdDrawTexture(cOglFb *fb, sOglImage *imageRef, GLint x, GLint y) : cOglCmd(fb) { this->imageRef = imageRef; this->x = x; this->y = y; } -bool cOglCmdDrawTexture::Execute(void) -{ - GLfloat x1 = x; //top - GLfloat y1 = y; //left - GLfloat x2 = x + imageRef->width; //right - GLfloat y2 = y + imageRef->height; //bottom +bool cOglCmdDrawTexture::Execute(void) { + GLfloat x1 = x; // top + GLfloat y1 = y; // left + GLfloat x2 = x + imageRef->width; // right + GLfloat y2 = y + imageRef->height; // bottom GLfloat quadVertices[] = { - // Pos // TexCoords - x1, y1, 0.0f, 0.0f, //left bottom - x1, y2, 0.0f, 1.0f, //left top - x2, y2, 1.0f, 1.0f, //right top + // Pos // TexCoords + x1, y1, 0.0f, 0.0f, // left bottom + x1, y2, 0.0f, 1.0f, // left top + x2, y2, 1.0f, 1.0f, // right top - x1, y1, 0.0f, 0.0f, //left bottom - x2, y2, 1.0f, 1.0f, //right top - x2, y1, 1.0f, 0.0f //right bottom + x1, y1, 0.0f, 0.0f, // left bottom + x2, y2, 1.0f, 1.0f, // right top + x2, y1, 1.0f, 0.0f // right bottom }; VertexBuffers[vbTexture]->ActivateShader(); @@ -1472,28 +1354,23 @@ bool cOglCmdDrawTexture::Execute(void) } //------------------ cOglCmdStoreImage -------------------- -cOglCmdStoreImage::cOglCmdStoreImage(sOglImage * imageRef, tColor * argb):cOglCmd(NULL) -{ +cOglCmdStoreImage::cOglCmdStoreImage(sOglImage *imageRef, tColor *argb) : cOglCmd(NULL) { this->imageRef = imageRef; data = argb; } -cOglCmdStoreImage::~cOglCmdStoreImage(void) -{ - free(data); -} +cOglCmdStoreImage::~cOglCmdStoreImage(void) { free(data); } -bool cOglCmdStoreImage::Execute(void) -{ +bool cOglCmdStoreImage::Execute(void) { #ifdef USE_DRM -// pthread_mutex_lock(&OSDMutex); - GlxDrawopengl(); // here we need the Shared Context for upload + // pthread_mutex_lock(&OSDMutex); + GlxDrawopengl(); // here we need the Shared Context for upload GlxCheck(); #endif glGenTextures(1, &imageRef->texture); glBindTexture(GL_TEXTURE_2D, imageRef->texture); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, imageRef->width, imageRef->height, 0, GL_BGRA, - GL_UNSIGNED_INT_8_8_8_8_REV, data); + GL_UNSIGNED_INT_8_8_8_8_REV, data); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); @@ -1501,7 +1378,7 @@ bool cOglCmdStoreImage::Execute(void) glBindTexture(GL_TEXTURE_2D, 0); glFlush(); #ifdef USE_DRM - GlxInitopengl(); // Reset Context + GlxInitopengl(); // Reset Context GlxCheck(); // pthread_mutex_lock(&OSDMutex); #endif @@ -1509,14 +1386,12 @@ bool cOglCmdStoreImage::Execute(void) } //------------------ cOglCmdDropImage -------------------- -cOglCmdDropImage::cOglCmdDropImage(sOglImage * imageRef, cCondWait * wait):cOglCmd(NULL) -{ +cOglCmdDropImage::cOglCmdDropImage(sOglImage *imageRef, cCondWait *wait) : cOglCmd(NULL) { this->imageRef = imageRef; this->wait = wait; } -bool cOglCmdDropImage::Execute(void) -{ +bool cOglCmdDropImage::Execute(void) { if (imageRef->texture != GL_NONE) glDeleteTextures(1, &imageRef->texture); wait->Signal(); @@ -1524,10 +1399,9 @@ bool cOglCmdDropImage::Execute(void) } /****************************************************************************** -* cOglThread -******************************************************************************/ -cOglThread::cOglThread(cCondWait * startWait, int maxCacheSize):cThread("oglThread") -{ + * cOglThread + ******************************************************************************/ +cOglThread::cOglThread(cCondWait *startWait, int maxCacheSize) : cThread("oglThread") { stalled = false; memCached = 0; @@ -1543,18 +1417,15 @@ cOglThread::cOglThread(cCondWait * startWait, int maxCacheSize):cThread("oglThre } Start(); - } -cOglThread::~cOglThread() -{ +cOglThread::~cOglThread() { delete wait; wait = NULL; } -void cOglThread::Stop(void) -{ +void cOglThread::Stop(void) { for (int i = 0; i < OGL_MAX_OSDIMAGES; i++) { if (imageCache[i].used) { DropImageData(i); @@ -1564,8 +1435,7 @@ void cOglThread::Stop(void) stalled = false; } -void cOglThread::DoCmd(cOglCmd * cmd) -{ +void cOglThread::DoCmd(cOglCmd *cmd) { while (stalled) cCondWait::SleepMs(10); @@ -1585,16 +1455,17 @@ void cOglThread::DoCmd(cOglCmd * cmd) wait->Signal(); } -int cOglThread::StoreImage(const cImage & image) -{ +int cOglThread::StoreImage(const cImage &image) { if (!maxCacheSize) { return 0; } if (image.Width() > maxTextureSize || image.Height() > maxTextureSize) { - esyslog("[softhddev] cannot store image of %dpx x %dpx " "(maximum size is %dpx x %dpx) - falling back to " - "cOsdProvider::StoreImageData()", image.Width(), image.Height(), maxTextureSize, maxTextureSize); + esyslog("[softhddev] cannot store image of %dpx x %dpx " + "(maximum size is %dpx x %dpx) - falling back to " + "cOsdProvider::StoreImageData()", + image.Width(), image.Height(), maxTextureSize, maxTextureSize); return 0; } @@ -1605,7 +1476,9 @@ int cOglThread::StoreImage(const cImage & image) float cachedMB = memCached / 1024.0f / 1024.0f; float maxMB = maxCacheSize / 1024.0f / 1024.0f; - esyslog("[softhddev]Maximum size for GPU cache reached. Used: %.2fMB Max: %.2fMB", cachedMB, maxMB); + esyslog("[softhddev]Maximum size for GPU cache reached. Used: %.2fMB Max: " + "%.2fMB", + cachedMB, maxMB); return 0; } @@ -1638,7 +1511,7 @@ int cOglThread::StoreImage(const cImage & image) if (imageRef->texture == GL_NONE) { esyslog("[softhddev]failed to store OSD image texture! (%s)", - timer.TimedOut()? "timed out" : "allocation failed"); + timer.TimedOut() ? "timed out" : "allocation failed"); DropImageData(slot); slot = 0; } @@ -1647,8 +1520,7 @@ int cOglThread::StoreImage(const cImage & image) return slot; } -int cOglThread::GetFreeSlot(void) -{ +int cOglThread::GetFreeSlot(void) { Lock(); int slot = 0; @@ -1662,8 +1534,7 @@ int cOglThread::GetFreeSlot(void) return slot; } -void cOglThread::ClearSlot(int slot) -{ +void cOglThread::ClearSlot(int slot) { int i = -slot - 1; if (i >= 0 && i < OGL_MAX_OSDIMAGES) { @@ -1676,8 +1547,7 @@ void cOglThread::ClearSlot(int slot) } } -sOglImage *cOglThread::GetImageRef(int slot) -{ +sOglImage *cOglThread::GetImageRef(int slot) { int i = -slot - 1; if (0 <= i && i < OGL_MAX_OSDIMAGES) @@ -1685,8 +1555,7 @@ sOglImage *cOglThread::GetImageRef(int slot) return 0; } -void cOglThread::DropImageData(int imageHandle) -{ +void cOglThread::DropImageData(int imageHandle) { sOglImage *imageRef = GetImageRef(imageHandle); if (!imageRef) @@ -1701,8 +1570,7 @@ void cOglThread::DropImageData(int imageHandle) ClearSlot(imageHandle); } -void cOglThread::Action(void) -{ +void cOglThread::Action(void) { if (!InitOpenGL()) { esyslog("[softhddev]Could not initiate OpenGL Context"); Cleanup(); @@ -1738,7 +1606,7 @@ void cOglThread::Action(void) glGetIntegerv(GL_MAX_TEXTURE_SIZE, &maxTextureSize); dsyslog("[softhddev]Maximum Pixmap size: %dx%dpx", maxTextureSize, maxTextureSize); - //now Thread is ready to do his job + // now Thread is ready to do his job startWait->Signal(); stalled = false; @@ -1756,7 +1624,9 @@ void cOglThread::Action(void) Unlock(); // uint64_t start = cTimeMs::Now(); cmd->Execute(); - // esyslog("[softhddev]\"%s\", %dms, %d commands left, time %" PRIu64 "", cmd->Description(), (int)(cTimeMs::Now() - start), commands.size(), cTimeMs::Now()); + // esyslog("[softhddev]\"%s\", %dms, %d commands left, time %" PRIu64 "", + // cmd->Description(), (int)(cTimeMs::Now() - start), commands.size(), + // cTimeMs::Now()); delete cmd; if (stalled && commands.size() < OGL_CMDQUEUE_SIZE / 2) @@ -1768,8 +1638,7 @@ void cOglThread::Action(void) dsyslog("[softhddev]OpenGL Worker Thread Ended"); } -bool cOglThread::InitOpenGL(void) -{ +bool cOglThread::InitOpenGL(void) { #ifdef USE_DRM GlxInitopengl(); #else @@ -1806,38 +1675,34 @@ bool cOglThread::InitOpenGL(void) GLenum err = glewInit(); if (err != GLEW_OK) { - esyslog("[softhddev]glewInit failed: %s Using Version: %s\n",glewGetErrorString(err),glewGetString(GLEW_VERSION)); -// return false; + esyslog("[softhddev]glewInit failed: %s Using Version: %s\n", glewGetErrorString(err), + glewGetString(GLEW_VERSION)); + // return false; } - #endif VertexBuffers[vbText]->EnableBlending(); glDisable(GL_DEPTH_TEST); return true; - } -bool cOglThread::InitShaders(void) -{ +bool cOglThread::InitShaders(void) { for (int i = 0; i < stCount; i++) { cShader *shader = new cShader(); - if (!shader->Load((eShaderType) i)) + if (!shader->Load((eShaderType)i)) return false; Shaders[i] = shader; } return true; } -void cOglThread::DeleteShaders(void) -{ +void cOglThread::DeleteShaders(void) { for (int i = 0; i < stCount; i++) delete Shaders[i]; } -bool cOglThread::InitVdpauInterop(void) -{ +bool cOglThread::InitVdpauInterop(void) { #if 0 void *vdpDevice = GetVDPAUDevice(); void *procAdress = GetVDPAUProcAdress(); @@ -1845,13 +1710,12 @@ bool cOglThread::InitVdpauInterop(void) while (glGetError() != GL_NO_ERROR) ; glVDPAUInitNV(vdpDevice, procAdress); if (glGetError() != GL_NO_ERROR) - return false; + return false; #endif return true; } -bool cOglThread::InitVertexBuffers(void) -{ +bool cOglThread::InitVertexBuffers(void) { for (int i = 0; i < vbCount; i++) { cOglVb *vb = new cOglVb(i); @@ -1862,15 +1726,13 @@ bool cOglThread::InitVertexBuffers(void) return true; } -void cOglThread::DeleteVertexBuffers(void) -{ +void cOglThread::DeleteVertexBuffers(void) { for (int i = 0; i < vbCount; i++) { delete VertexBuffers[i]; } } -void cOglThread::Cleanup(void) -{ +void cOglThread::Cleanup(void) { esyslog("[softhddev]OglThread cleanup\n"); pthread_mutex_lock(&OSDMutex); OsdClose(); @@ -1891,29 +1753,26 @@ void cOglThread::Cleanup(void) } /**************************************************************************************** -* cOglPixmap -****************************************************************************************/ + * cOglPixmap + ****************************************************************************************/ -cOglPixmap::cOglPixmap(std::shared_ptr < cOglThread > oglThread, int Layer, const cRect & ViewPort, - const cRect & DrawPort):cPixmap(Layer, ViewPort, DrawPort) -{ +cOglPixmap::cOglPixmap(std::shared_ptr oglThread, int Layer, const cRect &ViewPort, const cRect &DrawPort) + : cPixmap(Layer, ViewPort, DrawPort) { this->oglThread = oglThread; - int width = DrawPort.IsEmpty()? ViewPort.Width() : DrawPort.Width(); - int height = DrawPort.IsEmpty()? ViewPort.Height() : DrawPort.Height(); + int width = DrawPort.IsEmpty() ? ViewPort.Width() : DrawPort.Width(); + int height = DrawPort.IsEmpty() ? ViewPort.Height() : DrawPort.Height(); fb = new cOglFb(width, height, ViewPort.Width(), ViewPort.Height()); dirty = true; } -cOglPixmap::~cOglPixmap(void) -{ +cOglPixmap::~cOglPixmap(void) { if (!oglThread->Active()) return; oglThread->DoCmd(new cOglCmdDeleteFb(fb)); } -void cOglPixmap::SetAlpha(int Alpha) -{ +void cOglPixmap::SetAlpha(int Alpha) { Alpha = constrain(Alpha, ALPHA_TRANSPARENT, ALPHA_OPAQUE); if (Alpha != cPixmap::Alpha()) { cPixmap::SetAlpha(Alpha); @@ -1921,27 +1780,23 @@ void cOglPixmap::SetAlpha(int Alpha) } } -void cOglPixmap::SetTile(bool Tile) -{ +void cOglPixmap::SetTile(bool Tile) { cPixmap::SetTile(Tile); SetDirty(); } -void cOglPixmap::SetViewPort(const cRect & Rect) -{ +void cOglPixmap::SetViewPort(const cRect &Rect) { cPixmap::SetViewPort(Rect); SetDirty(); } -void cOglPixmap::SetDrawPortPoint(const cPoint & Point, bool Dirty) -{ +void cOglPixmap::SetDrawPortPoint(const cPoint &Point, bool Dirty) { cPixmap::SetDrawPortPoint(Point, Dirty); if (Dirty) SetDirty(); } -void cOglPixmap::Clear(void) -{ +void cOglPixmap::Clear(void) { if (!oglThread->Active()) return; LOCK_PIXMAPS; @@ -1950,8 +1805,7 @@ void cOglPixmap::Clear(void) MarkDrawPortDirty(DrawPort()); } -void cOglPixmap::Fill(tColor Color) -{ +void cOglPixmap::Fill(tColor Color) { if (!oglThread->Active()) return; LOCK_PIXMAPS; @@ -1960,8 +1814,7 @@ void cOglPixmap::Fill(tColor Color) MarkDrawPortDirty(DrawPort()); } -void cOglPixmap::DrawImage(const cPoint & Point, const cImage & Image) -{ +void cOglPixmap::DrawImage(const cPoint &Point, const cImage &Image) { if (!oglThread->Active()) return; tColor *argb = MALLOC(tColor, Image.Width() * Image.Height()); @@ -1975,8 +1828,7 @@ void cOglPixmap::DrawImage(const cPoint & Point, const cImage & Image) MarkDrawPortDirty(cRect(Point, cSize(Image.Width(), Image.Height())).Intersected(DrawPort().Size())); } -void cOglPixmap::DrawImage(const cPoint & Point, int ImageHandle) -{ +void cOglPixmap::DrawImage(const cPoint &Point, int ImageHandle) { if (!oglThread->Active()) return; @@ -1986,9 +1838,8 @@ void cOglPixmap::DrawImage(const cPoint & Point, int ImageHandle) oglThread->DoCmd(new cOglCmdDrawTexture(fb, img, Point.X(), Point.Y())); } /* - Fallback to VDR implementation, needs to separate cSoftOsdProvider from softhddevice.cpp - else { - if (cSoftOsdProvider::GetImageData(ImageHandle)) + Fallback to VDR implementation, needs to separate cSoftOsdProvider from + softhddevice.cpp else { if (cSoftOsdProvider::GetImageData(ImageHandle)) DrawImage(Point, *cSoftOsdProvider::GetImageData(ImageHandle)); } */ @@ -1996,8 +1847,7 @@ void cOglPixmap::DrawImage(const cPoint & Point, int ImageHandle) MarkDrawPortDirty(DrawPort()); } -void cOglPixmap::DrawPixel(const cPoint & Point, tColor Color) -{ +void cOglPixmap::DrawPixel(const cPoint &Point, tColor Color) { cRect r(Point.X(), Point.Y(), 1, 1); oglThread->DoCmd(new cOglCmdDrawRectangle(fb, r.X(), r.Y(), r.Width(), r.Height(), Color)); @@ -2006,8 +1856,7 @@ void cOglPixmap::DrawPixel(const cPoint & Point, tColor Color) MarkDrawPortDirty(r); } -void cOglPixmap::DrawBitmap(const cPoint & Point, const cBitmap & Bitmap, tColor ColorFg, tColor ColorBg, bool Overlay) -{ +void cOglPixmap::DrawBitmap(const cPoint &Point, const cBitmap &Bitmap, tColor ColorFg, tColor ColorBg, bool Overlay) { if (!oglThread->Active()) return; LOCK_PIXMAPS; @@ -2023,9 +1872,11 @@ void cOglPixmap::DrawBitmap(const cPoint & Point, const cBitmap & Bitmap, tColor for (int px = 0; px < Bitmap.Width(); px++) { tIndex index = *Bitmap.Data(px, py); - *p++ = (!index - && Overlay) ? clrTransparent : (specialColors ? (index == 0 ? ColorBg : index == - 1 ? ColorFg : Bitmap.Color(index)) : Bitmap.Color(index)); + *p++ = (!index && Overlay) ? clrTransparent + : (specialColors ? (index == 0 ? ColorBg + : index == 1 ? ColorFg + : Bitmap.Color(index)) + : Bitmap.Color(index)); } oglThread->DoCmd(new cOglCmdDrawImage(fb, argb, Bitmap.Width(), Bitmap.Height(), Point.X(), Point.Y(), true)); @@ -2033,9 +1884,8 @@ void cOglPixmap::DrawBitmap(const cPoint & Point, const cBitmap & Bitmap, tColor MarkDrawPortDirty(cRect(Point, cSize(Bitmap.Width(), Bitmap.Height())).Intersected(DrawPort().Size())); } -void cOglPixmap::DrawText(const cPoint & Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont * Font, - int Width, int Height, int Alignment) -{ +void cOglPixmap::DrawText(const cPoint &Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont *Font, + int Width, int Height, int Alignment) { if (!oglThread->Active()) return; LOCK_PIXMAPS; @@ -2072,18 +1922,19 @@ void cOglPixmap::DrawText(const cPoint & Point, const char *s, tColor ColorFg, t x += Width - w; if ((Alignment & taBorder) != 0) x -= std::max(h / TEXT_ALIGN_BORDER, 1); - } else { // taCentered + } else { // taCentered if (w < Width) x += (Width - w) / 2; } } if (Height) { - if ((Alignment & taTop) != 0) ; + if ((Alignment & taTop) != 0) + ; else if ((Alignment & taBottom) != 0) { if (h < Height) y += Height - h; - } else { // taCentered + } else { // taCentered if (h < Height) y += (Height - h) / 2; } @@ -2095,8 +1946,7 @@ void cOglPixmap::DrawText(const cPoint & Point, const char *s, tColor ColorFg, t MarkDrawPortDirty(r); } -void cOglPixmap::DrawRectangle(const cRect & Rect, tColor Color) -{ +void cOglPixmap::DrawRectangle(const cRect &Rect, tColor Color) { if (!oglThread->Active()) return; LOCK_PIXMAPS; @@ -2105,8 +1955,7 @@ void cOglPixmap::DrawRectangle(const cRect & Rect, tColor Color) MarkDrawPortDirty(Rect); } -void cOglPixmap::DrawEllipse(const cRect & Rect, tColor Color, int Quadrants) -{ +void cOglPixmap::DrawEllipse(const cRect &Rect, tColor Color, int Quadrants) { if (!oglThread->Active()) return; LOCK_PIXMAPS; @@ -2115,8 +1964,7 @@ void cOglPixmap::DrawEllipse(const cRect & Rect, tColor Color, int Quadrants) MarkDrawPortDirty(Rect); } -void cOglPixmap::DrawSlope(const cRect & Rect, tColor Color, int Type) -{ +void cOglPixmap::DrawSlope(const cRect &Rect, tColor Color, int Type) { if (!oglThread->Active()) return; LOCK_PIXMAPS; @@ -2125,40 +1973,35 @@ void cOglPixmap::DrawSlope(const cRect & Rect, tColor Color, int Type) MarkDrawPortDirty(Rect); } -void cOglPixmap::Render(const cPixmap * Pixmap, const cRect & Source, const cPoint & Dest) -{ +void cOglPixmap::Render(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest) { esyslog("[softhddev] Render %d %d %d not implemented in OpenGl OSD", Pixmap->ViewPort().X(), Source.X(), Dest.X()); } -void cOglPixmap::Copy(const cPixmap * Pixmap, const cRect & Source, const cPoint & Dest) -{ +void cOglPixmap::Copy(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest) { esyslog("[softhddev] Copy %d %d %d not implemented in OpenGl OSD", Pixmap->ViewPort().X(), Source.X(), Dest.X()); } -void cOglPixmap::Scroll(const cPoint & Dest, const cRect & Source) -{ +void cOglPixmap::Scroll(const cPoint &Dest, const cRect &Source) { esyslog("[softhddev] Scroll %d %d not implemented in OpenGl OSD", Source.X(), Dest.X()); } -void cOglPixmap::Pan(const cPoint & Dest, const cRect & Source) -{ +void cOglPixmap::Pan(const cPoint &Dest, const cRect &Source) { esyslog("[softhddev] Pan %d %d not implemented in OpenGl OSD", Source.X(), Dest.X()); } /****************************************************************************** -* cOglOsd -******************************************************************************/ + * cOglOsd + ******************************************************************************/ cOglOutputFb *cOglOsd::oFb = NULL; -cOglOsd::cOglOsd(int Left, int Top, uint Level, std::shared_ptr < cOglThread > oglThread):cOsd(Left, Top, Level) -{ +cOglOsd::cOglOsd(int Left, int Top, uint Level, std::shared_ptr oglThread) : cOsd(Left, Top, Level) { this->oglThread = oglThread; bFb = NULL; isSubtitleOsd = false; int osdWidth = 0; int osdHeight = 0; -// pthread_mutex_lock(&OSDMutex); + // pthread_mutex_lock(&OSDMutex); VideoGetOsdSize(&osdWidth, &osdHeight); // osdWidth = 1920; // osdHeight = 1080; @@ -2167,7 +2010,7 @@ cOglOsd::cOglOsd(int Left, int Top, uint Level, std::shared_ptr < cOglThread > o #if 0 if (posd) - free(posd); + free(posd); posd = (unsigned char *)calloc(osdWidth * osdHeight * 4, 1); #endif // create output framebuffer @@ -2176,19 +2019,17 @@ cOglOsd::cOglOsd(int Left, int Top, uint Level, std::shared_ptr < cOglThread > o oFb = new cOglOutputFb(osdWidth, osdHeight); oglThread->DoCmd(new cOglCmdInitOutputFb(oFb)); } -// pthread_mutex_unlock(&OSDMutex); + // pthread_mutex_unlock(&OSDMutex); } -cOglOsd::~cOglOsd() -{ +cOglOsd::~cOglOsd() { OsdClose(); SetActive(false); oglThread->DoCmd(new cOglCmdDeleteFb(bFb)); } -eOsdError cOglOsd::SetAreas(const tArea * Areas, int NumAreas) -{ +eOsdError cOglOsd::SetAreas(const tArea *Areas, int NumAreas) { cRect r; if (NumAreas > 1) @@ -2196,7 +2037,7 @@ eOsdError cOglOsd::SetAreas(const tArea * Areas, int NumAreas) for (int i = 0; i < NumAreas; i++) r.Combine(cRect(Areas[i].x1, Areas[i].y1, Areas[i].Width(), Areas[i].Height())); - tArea area = { r.Left(), r.Top(), r.Right(), r.Bottom(), 32 }; + tArea area = {r.Left(), r.Top(), r.Right(), r.Bottom(), 32}; // now we know the actuaL osd size, create double buffer frame buffer if (bFb) { @@ -2212,17 +2053,18 @@ eOsdError cOglOsd::SetAreas(const tArea * Areas, int NumAreas) return cOsd::SetAreas(&area, 1); } -cPixmap *cOglOsd::CreatePixmap(int Layer, const cRect & ViewPort, const cRect & DrawPort) -{ +cPixmap *cOglOsd::CreatePixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort) { if (!oglThread->Active()) return NULL; LOCK_PIXMAPS; - int width = DrawPort.IsEmpty()? ViewPort.Width() : DrawPort.Width(); - int height = DrawPort.IsEmpty()? ViewPort.Height() : DrawPort.Height(); + int width = DrawPort.IsEmpty() ? ViewPort.Width() : DrawPort.Width(); + int height = DrawPort.IsEmpty() ? ViewPort.Height() : DrawPort.Height(); if (width > oglThread->MaxTextureSize() || height > oglThread->MaxTextureSize()) { - esyslog("[softhddev] cannot allocate pixmap of %dpx x %dpx, clipped to %dpx x %dpx!", width, height, - std::min(width, oglThread->MaxTextureSize()), std::min(height, oglThread->MaxTextureSize())); + esyslog("[softhddev] cannot allocate pixmap of %dpx x %dpx, clipped to " + "%dpx x %dpx!", + width, height, std::min(width, oglThread->MaxTextureSize()), + std::min(height, oglThread->MaxTextureSize())); width = std::min(width, oglThread->MaxTextureSize()); height = std::min(height, oglThread->MaxTextureSize()); } @@ -2243,8 +2085,7 @@ cPixmap *cOglOsd::CreatePixmap(int Layer, const cRect & ViewPort, const cRect & return NULL; } -void cOglOsd::DestroyPixmap(cPixmap * Pixmap) -{ +void cOglOsd::DestroyPixmap(cPixmap *Pixmap) { if (!oglThread->Active()) return; if (!Pixmap) @@ -2266,8 +2107,7 @@ void cOglOsd::DestroyPixmap(cPixmap * Pixmap) } } -void cOglOsd::Flush(void) -{ +void cOglOsd::Flush(void) { if (!oglThread->Active()) return; LOCK_PIXMAPS; @@ -2290,9 +2130,10 @@ void cOglOsd::Flush(void) for (int i = 0; i < oglPixmaps.Size(); i++) { if (oglPixmaps[i]) { if (oglPixmaps[i]->Layer() == layer) { - oglThread->DoCmd(new cOglCmdRenderFbToBufferFb(oglPixmaps[i]->Fb(), bFb, - oglPixmaps[i]->ViewPort().X(), (!isSubtitleOsd) ? oglPixmaps[i]->ViewPort().Y() : 0, - oglPixmaps[i]->Alpha(), oglPixmaps[i]->DrawPort().X(), oglPixmaps[i]->DrawPort().Y())); + oglThread->DoCmd(new cOglCmdRenderFbToBufferFb( + oglPixmaps[i]->Fb(), bFb, oglPixmaps[i]->ViewPort().X(), + (!isSubtitleOsd) ? oglPixmaps[i]->ViewPort().Y() : 0, oglPixmaps[i]->Alpha(), + oglPixmaps[i]->DrawPort().X(), oglPixmaps[i]->DrawPort().Y())); oglPixmaps[i]->SetDirty(false); } } @@ -2300,11 +2141,11 @@ void cOglOsd::Flush(void) } oglThread->DoCmd(new cOglCmdCopyBufferToOutputFb(bFb, oFb, Left(), Top())); - // dsyslog("[softhddev]End Flush at %" PRIu64 ", duration %d", cTimeMs::Now(), (int)(cTimeMs::Now()-start)); + // dsyslog("[softhddev]End Flush at %" PRIu64 ", duration %d", cTimeMs::Now(), + // (int)(cTimeMs::Now()-start)); } -void cOglOsd::DrawScaledBitmap(int x, int y, const cBitmap & Bitmap, double FactorX, double FactorY, bool AntiAlias) -{ +void cOglOsd::DrawScaledBitmap(int x, int y, const cBitmap &Bitmap, double FactorX, double FactorY, bool AntiAlias) { (void)FactorX; (void)FactorY; (void)AntiAlias; diff --git a/openglosd.h b/openglosd.h index e8c198c..c7cb9cf 100644 --- a/openglosd.h +++ b/openglosd.h @@ -1,6 +1,7 @@ #ifndef __SOFTHDDEVICE_OPENGLOSD_H #define __SOFTHDDEVICE_OPENGLOSD_H +// clang-format off #include #include #include @@ -17,9 +18,9 @@ #include FT_STROKER_H #undef __FTERRORS_H__ -#define FT_ERRORDEF( e, v, s ) { e, s }, -#define FT_ERROR_START_LIST { -#define FT_ERROR_END_LIST { 0, 0 } }; +#define FT_ERRORDEF( e, v, s ) { e, s }, +#define FT_ERROR_START_LIST { +#define FT_ERROR_END_LIST { 0, 0 } }; const struct { int code; @@ -36,17 +37,15 @@ extern "C" { #include #include - #include "audio.h" #include "video.h" #include "codec.h" - } +// clang-format on extern "C" pthread_mutex_t OSDMutex; -struct sOglImage -{ +struct sOglImage { GLuint texture; GLint width; GLint height; @@ -54,54 +53,42 @@ struct sOglImage }; /**************************************************************************************** -* Helpers -****************************************************************************************/ + * Helpers + ****************************************************************************************/ -void ConvertColor(const GLint & colARGB, glm::vec4 & col); +void ConvertColor(const GLint &colARGB, glm::vec4 &col); /**************************************************************************************** -* cShader -****************************************************************************************/ -enum eShaderType -{ - stRect, - stTexture, - stText, - stCount -}; + * cShader + ****************************************************************************************/ +enum eShaderType { stRect, stTexture, stText, stCount }; -class cShader -{ +class cShader { private: eShaderType type; GLuint id; bool Compile(const char *vertexCode, const char *fragmentCode); bool CheckCompileErrors(GLuint object, bool program = false); + public: - cShader(void) - { - }; - virtual ~ cShader(void) - { - }; + cShader(void){}; + virtual ~cShader(void){}; bool Load(eShaderType type); void Use(void); - void SetFloat(const GLchar * name, GLfloat value); - void SetInteger(const GLchar * name, GLint value); - void SetVector2f(const GLchar * name, GLfloat x, GLfloat y); - void SetVector3f(const GLchar * name, GLfloat x, GLfloat y, GLfloat z); - void SetVector4f(const GLchar * name, GLfloat x, GLfloat y, GLfloat z, GLfloat w); - void SetMatrix4(const GLchar * name, const glm::mat4 & matrix); + void SetFloat(const GLchar *name, GLfloat value); + void SetInteger(const GLchar *name, GLint value); + void SetVector2f(const GLchar *name, GLfloat x, GLfloat y); + void SetVector3f(const GLchar *name, GLfloat x, GLfloat y, GLfloat z); + void SetVector4f(const GLchar *name, GLfloat x, GLfloat y, GLfloat z, GLfloat w); + void SetMatrix4(const GLchar *name, const glm::mat4 &matrix); }; /**************************************************************************************** -* cOglGlyph -****************************************************************************************/ -class cOglGlyph:public cListObject -{ + * cOglGlyph + ****************************************************************************************/ +class cOglGlyph : public cListObject { private: - struct tKerning - { + struct tKerning { public: tKerning(FT_ULong prevSym, GLfloat kerning = 0.0f) { this->prevSym = prevSym; @@ -117,47 +104,28 @@ class cOglGlyph:public cListObject int height; int advanceX; - cVector < tKerning > kerningCache; + cVector kerningCache; GLuint texture; void LoadTexture(FT_BitmapGlyph ftGlyph); public: cOglGlyph(FT_ULong charCode, FT_BitmapGlyph ftGlyph); - virtual ~ cOglGlyph(); - FT_ULong CharCode(void) - { - return charCode; - } - int AdvanceX(void) - { - return advanceX; - } - int BearingLeft(void) const - { - return bearingLeft; - } - int BearingTop(void) const - { - return bearingTop; - } - int Width(void) const - { - return width; - } - int Height(void) const - { - return height; - } + virtual ~cOglGlyph(); + FT_ULong CharCode(void) { return charCode; } + int AdvanceX(void) { return advanceX; } + int BearingLeft(void) const { return bearingLeft; } + int BearingTop(void) const { return bearingTop; } + int Width(void) const { return width; } + int Height(void) const { return height; } int GetKerningCache(FT_ULong prevSym); void SetKerningCache(FT_ULong prevSym, int kerning); void BindTexture(void); }; /**************************************************************************************** -* cOglFont -****************************************************************************************/ -class cOglFont:public cListObject -{ + * cOglFont + ****************************************************************************************/ +class cOglFont : public cListObject { private: static bool initiated; cString name; @@ -166,40 +134,28 @@ class cOglFont:public cListObject int bottom; static FT_Library ftLib; FT_Face face; - static cList < cOglFont > *fonts; - mutable cList < cOglGlyph > glyphCache; - cOglFont(const char *fontName, int charHeight); + static cList *fonts; + mutable cList glyphCache; + cOglFont(const char *fontName, int charHeight); static void Init(void); + public: - virtual ~ cOglFont(void); + virtual ~cOglFont(void); static cOglFont *Get(const char *name, int charHeight); static void Cleanup(void); - const char *Name(void) - { - return *name; - }; - int Size(void) - { - return size; - }; - int Bottom(void) - { - return bottom; - }; - int Height(void) - { - return height; - }; + const char *Name(void) { return *name; }; + int Size(void) { return size; }; + int Bottom(void) { return bottom; }; + int Height(void) { return height; }; cOglGlyph *Glyph(FT_ULong charCode) const; - int Kerning(cOglGlyph * glyph, FT_ULong prevSym) const; + int Kerning(cOglGlyph *glyph, FT_ULong prevSym) const; }; /**************************************************************************************** -* cOglFb -* Framebuffer Object - OpenGL part of a Pixmap -****************************************************************************************/ -class cOglFb -{ + * cOglFb + * Framebuffer Object - OpenGL part of a Pixmap + ****************************************************************************************/ +class cOglFb { protected: bool initiated; // GLuint fb; @@ -207,16 +163,14 @@ class cOglFb GLint width, height; GLint viewPortWidth, viewPortHeight; bool scrollable; + public: - GLuint fb; + GLuint fb; GLuint texture; - cOglFb(GLint width, GLint height, GLint viewPortWidth, GLint viewPortHeight); - virtual ~ cOglFb(void); - bool Initiated(void) - { - return initiated; - } + cOglFb(GLint width, GLint height, GLint viewPortWidth, GLint viewPortHeight); + virtual ~cOglFb(void); + bool Initiated(void) { return initiated; } virtual bool Init(void); void Bind(void); void BindRead(void); @@ -224,64 +178,42 @@ class cOglFb virtual void Unbind(void); bool BindTexture(void); void Blit(GLint destX1, GLint destY1, GLint destX2, GLint destY2); - GLint Width(void) - { - return width; - }; - GLint Height(void) - { - return height; - }; - bool Scrollable(void) - { - return scrollable; - }; - GLint ViewportWidth(void) - { - return viewPortWidth; - }; - GLint ViewportHeight(void) - { - return viewPortHeight; - }; + GLint Width(void) { return width; }; + GLint Height(void) { return height; }; + bool Scrollable(void) { return scrollable; }; + GLint ViewportWidth(void) { return viewPortWidth; }; + GLint ViewportHeight(void) { return viewPortHeight; }; }; /**************************************************************************************** -* cOglOutputFb -* Output Framebuffer Object - holds Vdpau Output Surface which is our "output framebuffer" -****************************************************************************************/ -class cOglOutputFb:public cOglFb -{ + * cOglOutputFb + * Output Framebuffer Object - holds Vdpau Output Surface which is our "output + *framebuffer" + ****************************************************************************************/ +class cOglOutputFb : public cOglFb { protected: bool initiated; + private: - GLvdpauSurfaceNV surface; + GLvdpauSurfaceNV surface; + public: - GLuint fb; + GLuint fb; GLuint texture; - cOglOutputFb(GLint width, GLint height); - virtual ~ cOglOutputFb(void); + cOglOutputFb(GLint width, GLint height); + virtual ~cOglOutputFb(void); virtual bool Init(void); virtual void BindWrite(void); virtual void Unbind(void); }; /**************************************************************************************** -* cOglVb -* Vertex Buffer - OpenGl Vertices for the different drawing commands -****************************************************************************************/ -enum eVertexBufferType -{ - vbRect, - vbEllipse, - vbSlope, - vbTexture, - vbText, - vbCount -}; + * cOglVb + * Vertex Buffer - OpenGl Vertices for the different drawing commands + ****************************************************************************************/ +enum eVertexBufferType { vbRect, vbEllipse, vbSlope, vbTexture, vbText, vbCount }; -class cOglVb -{ +class cOglVb { private: eVertexBufferType type; eShaderType shader; @@ -291,9 +223,10 @@ class cOglVb int sizeVertex2; int numVertices; GLuint drawMode; + public: - cOglVb(int type); - virtual ~ cOglVb(void); + cOglVb(int type); + virtual ~cOglVb(void); bool Init(void); void Bind(void); void Unbind(void); @@ -303,148 +236,106 @@ class cOglVb void SetShaderColor(GLint color); void SetShaderAlpha(GLint alpha); void SetShaderProjectionMatrix(GLint width, GLint height); - void SetVertexData(GLfloat * vertices, int count = 0); + void SetVertexData(GLfloat *vertices, int count = 0); void DrawArrays(int count = 0); }; /**************************************************************************************** -* cOpenGLCmd -****************************************************************************************/ -class cOglCmd -{ + * cOpenGLCmd + ****************************************************************************************/ +class cOglCmd { protected: - cOglFb * fb; + cOglFb *fb; + public: - cOglCmd(cOglFb * fb) - { - this->fb = fb; - }; - virtual ~ cOglCmd(void) - { - }; + cOglCmd(cOglFb *fb) { this->fb = fb; }; + virtual ~cOglCmd(void){}; virtual const char *Description(void) = 0; virtual bool Execute(void) = 0; }; -class cOglCmdInitOutputFb:public cOglCmd -{ +class cOglCmdInitOutputFb : public cOglCmd { private: - cOglOutputFb * oFb; + cOglOutputFb *oFb; + public: - cOglCmdInitOutputFb(cOglOutputFb * oFb); - virtual ~ cOglCmdInitOutputFb(void) - { - }; - virtual const char *Description(void) - { - return "InitOutputFramebuffer"; - } + cOglCmdInitOutputFb(cOglOutputFb *oFb); + virtual ~cOglCmdInitOutputFb(void){}; + virtual const char *Description(void) { return "InitOutputFramebuffer"; } virtual bool Execute(void); }; -class cOglCmdInitFb:public cOglCmd -{ +class cOglCmdInitFb : public cOglCmd { private: - cCondWait * wait; + cCondWait *wait; + public: - cOglCmdInitFb(cOglFb * fb, cCondWait * wait = NULL); - virtual ~ cOglCmdInitFb(void) - { - }; - virtual const char *Description(void) - { - return "InitFramebuffer"; - } + cOglCmdInitFb(cOglFb *fb, cCondWait *wait = NULL); + virtual ~cOglCmdInitFb(void){}; + virtual const char *Description(void) { return "InitFramebuffer"; } virtual bool Execute(void); }; -class cOglCmdDeleteFb:public cOglCmd -{ +class cOglCmdDeleteFb : public cOglCmd { public: - cOglCmdDeleteFb(cOglFb * fb); - virtual ~ cOglCmdDeleteFb(void) - { - }; - virtual const char *Description(void) - { - return "DeleteFramebuffer"; - } + cOglCmdDeleteFb(cOglFb *fb); + virtual ~cOglCmdDeleteFb(void){}; + virtual const char *Description(void) { return "DeleteFramebuffer"; } virtual bool Execute(void); }; -class cOglCmdRenderFbToBufferFb:public cOglCmd -{ +class cOglCmdRenderFbToBufferFb : public cOglCmd { private: - cOglFb * buffer; + cOglFb *buffer; GLfloat x, y; GLfloat drawPortX, drawPortY; GLint transparency; + public: - cOglCmdRenderFbToBufferFb(cOglFb * fb, cOglFb * buffer, GLint x, GLint y, GLint transparency, GLint drawPortX, - GLint drawPortY); - virtual ~ cOglCmdRenderFbToBufferFb(void) - { - }; - virtual const char *Description(void) - { - return "Render Framebuffer to Buffer"; - } + cOglCmdRenderFbToBufferFb(cOglFb *fb, cOglFb *buffer, GLint x, GLint y, GLint transparency, GLint drawPortX, + GLint drawPortY); + virtual ~cOglCmdRenderFbToBufferFb(void){}; + virtual const char *Description(void) { return "Render Framebuffer to Buffer"; } virtual bool Execute(void); }; -class cOglCmdCopyBufferToOutputFb:public cOglCmd -{ +class cOglCmdCopyBufferToOutputFb : public cOglCmd { private: - cOglOutputFb * oFb; + cOglOutputFb *oFb; GLint x, y; + public: - cOglCmdCopyBufferToOutputFb(cOglFb * fb, cOglOutputFb * oFb, GLint x, GLint y); - virtual ~ cOglCmdCopyBufferToOutputFb(void) - { - }; - virtual const char *Description(void) - { - return "Copy buffer to OutputFramebuffer"; - } + cOglCmdCopyBufferToOutputFb(cOglFb *fb, cOglOutputFb *oFb, GLint x, GLint y); + virtual ~cOglCmdCopyBufferToOutputFb(void){}; + virtual const char *Description(void) { return "Copy buffer to OutputFramebuffer"; } virtual bool Execute(void); }; -class cOglCmdFill:public cOglCmd -{ +class cOglCmdFill : public cOglCmd { private: GLint color; + public: - cOglCmdFill(cOglFb * fb, GLint color); - virtual ~ cOglCmdFill(void) - { - }; - virtual const char *Description(void) - { - return "Fill"; - } + cOglCmdFill(cOglFb *fb, GLint color); + virtual ~cOglCmdFill(void){}; + virtual const char *Description(void) { return "Fill"; } virtual bool Execute(void); }; -class cOglCmdDrawRectangle:public cOglCmd -{ +class cOglCmdDrawRectangle : public cOglCmd { private: GLint x, y; GLint width, height; GLint color; + public: - cOglCmdDrawRectangle(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color); - virtual ~ cOglCmdDrawRectangle(void) - { - }; - virtual const char *Description(void) - { - return "DrawRectangle"; - } + cOglCmdDrawRectangle(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color); + virtual ~cOglCmdDrawRectangle(void){}; + virtual const char *Description(void) { return "DrawRectangle"; } virtual bool Execute(void); }; -class cOglCmdDrawEllipse:public cOglCmd -{ +class cOglCmdDrawEllipse : public cOglCmd { private: GLint x, y; GLint width, height; @@ -453,39 +344,29 @@ class cOglCmdDrawEllipse:public cOglCmd GLfloat *CreateVerticesFull(int &numVertices); GLfloat *CreateVerticesQuadrant(int &numVertices); GLfloat *CreateVerticesHalf(int &numVertices); + public: - cOglCmdDrawEllipse(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint quadrants); - virtual ~ cOglCmdDrawEllipse(void) - { - }; - virtual const char *Description(void) - { - return "DrawEllipse"; - } + cOglCmdDrawEllipse(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint quadrants); + virtual ~cOglCmdDrawEllipse(void){}; + virtual const char *Description(void) { return "DrawEllipse"; } virtual bool Execute(void); }; -class cOglCmdDrawSlope:public cOglCmd -{ +class cOglCmdDrawSlope : public cOglCmd { private: GLint x, y; GLint width, height; GLint color; GLint type; + public: - cOglCmdDrawSlope(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint type); - virtual ~ cOglCmdDrawSlope(void) - { - }; - virtual const char *Description(void) - { - return "DrawSlope"; - } + cOglCmdDrawSlope(cOglFb *fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint type); + virtual ~cOglCmdDrawSlope(void){}; + virtual const char *Description(void) { return "DrawSlope"; } virtual bool Execute(void); }; -class cOglCmdDrawText:public cOglCmd -{ +class cOglCmdDrawText : public cOglCmd { private: GLint x, y; GLint limitX; @@ -493,97 +374,78 @@ class cOglCmdDrawText:public cOglCmd cString fontName; int fontSize; unsigned int *symbols; + public: - cOglCmdDrawText(cOglFb * fb, GLint x, GLint y, unsigned int *symbols, GLint limitX, const char *name, - int fontSize, tColor colorText); - virtual ~ cOglCmdDrawText(void); - virtual const char *Description(void) - { - return "DrawText"; - } + cOglCmdDrawText(cOglFb *fb, GLint x, GLint y, unsigned int *symbols, GLint limitX, const char *name, int fontSize, + tColor colorText); + virtual ~cOglCmdDrawText(void); + virtual const char *Description(void) { return "DrawText"; } virtual bool Execute(void); }; -class cOglCmdDrawImage:public cOglCmd -{ +class cOglCmdDrawImage : public cOglCmd { private: - tColor * argb; + tColor *argb; GLint x, y, width, height; bool overlay; GLfloat scaleX, scaleY; + public: - cOglCmdDrawImage(cOglFb * fb, tColor * argb, GLint width, GLint height, GLint x, GLint y, bool overlay = - true, double scaleX = 1.0f, double scaleY = 1.0f); - virtual ~ cOglCmdDrawImage(void); - virtual const char *Description(void) - { - return "Draw Image"; - } + cOglCmdDrawImage(cOglFb *fb, tColor *argb, GLint width, GLint height, GLint x, GLint y, bool overlay = true, + double scaleX = 1.0f, double scaleY = 1.0f); + virtual ~cOglCmdDrawImage(void); + virtual const char *Description(void) { return "Draw Image"; } virtual bool Execute(void); }; -class cOglCmdDrawTexture:public cOglCmd -{ +class cOglCmdDrawTexture : public cOglCmd { private: - sOglImage * imageRef; + sOglImage *imageRef; GLint x, y; + public: - cOglCmdDrawTexture(cOglFb * fb, sOglImage * imageRef, GLint x, GLint y); - virtual ~ cOglCmdDrawTexture(void) - { - }; - virtual const char *Description(void) - { - return "Draw Texture"; - } + cOglCmdDrawTexture(cOglFb *fb, sOglImage *imageRef, GLint x, GLint y); + virtual ~cOglCmdDrawTexture(void){}; + virtual const char *Description(void) { return "Draw Texture"; } virtual bool Execute(void); }; -class cOglCmdStoreImage:public cOglCmd -{ +class cOglCmdStoreImage : public cOglCmd { private: - sOglImage * imageRef; + sOglImage *imageRef; tColor *data; + public: - cOglCmdStoreImage(sOglImage * imageRef, tColor * argb); - virtual ~ cOglCmdStoreImage(void); - virtual const char *Description(void) - { - return "Store Image"; - } + cOglCmdStoreImage(sOglImage *imageRef, tColor *argb); + virtual ~cOglCmdStoreImage(void); + virtual const char *Description(void) { return "Store Image"; } virtual bool Execute(void); }; -class cOglCmdDropImage:public cOglCmd -{ +class cOglCmdDropImage : public cOglCmd { private: - sOglImage * imageRef; + sOglImage *imageRef; cCondWait *wait; + public: - cOglCmdDropImage(sOglImage * imageRef, cCondWait * wait); - virtual ~ cOglCmdDropImage(void) - { - }; - virtual const char *Description(void) - { - return "Drop Image"; - } + cOglCmdDropImage(sOglImage *imageRef, cCondWait *wait); + virtual ~cOglCmdDropImage(void){}; + virtual const char *Description(void) { return "Drop Image"; } virtual bool Execute(void); }; /****************************************************************************** -* cOglThread -******************************************************************************/ + * cOglThread + ******************************************************************************/ #define OGL_MAX_OSDIMAGES 256 #define OGL_CMDQUEUE_SIZE 100 -class cOglThread:public cThread -{ +class cOglThread : public cThread { private: - cCondWait * startWait; + cCondWait *startWait; cCondWait *wait; bool stalled; - std::queue < cOglCmd * >commands; + std::queue commands; GLint maxTextureSize; sOglImage imageCache[OGL_MAX_OSDIMAGES]; long memCached; @@ -597,96 +459,81 @@ class cOglThread:public cThread void Cleanup(void); int GetFreeSlot(void); void ClearSlot(int slot); + protected: - virtual void Action(void); + virtual void Action(void); + public: - cOglThread(cCondWait * startWait, int maxCacheSize); - virtual ~ cOglThread(); + cOglThread(cCondWait *startWait, int maxCacheSize); + virtual ~cOglThread(); void Stop(void); - void DoCmd(cOglCmd * cmd); - int StoreImage(const cImage & image); + void DoCmd(cOglCmd *cmd); + int StoreImage(const cImage &image); void DropImageData(int imageHandle); sOglImage *GetImageRef(int slot); - int MaxTextureSize(void) - { - return maxTextureSize; - }; + int MaxTextureSize(void) { return maxTextureSize; }; }; /**************************************************************************************** -* cOglPixmap -****************************************************************************************/ -class cOglPixmap:public cPixmap -{ + * cOglPixmap + ****************************************************************************************/ +class cOglPixmap : public cPixmap { private: - cOglFb * fb; - std::shared_ptr < cOglThread > oglThread; + cOglFb *fb; + std::shared_ptr oglThread; bool dirty; + public: - cOglPixmap(std::shared_ptr < cOglThread > oglThread, int Layer, const cRect & ViewPort, const cRect & DrawPort = - cRect::Null); - virtual ~ cOglPixmap(void); - cOglFb *Fb(void) - { - return fb; - }; - int X(void) - { - return ViewPort().X(); - }; - int Y(void) - { - return ViewPort().Y(); - }; - virtual bool IsDirty(void) - { - return dirty; - } - virtual void SetDirty(bool dirty = true) { - this->dirty = dirty; - } + cOglPixmap(std::shared_ptr oglThread, int Layer, const cRect &ViewPort, + const cRect &DrawPort = cRect::Null); + virtual ~cOglPixmap(void); + cOglFb *Fb(void) { return fb; }; + int X(void) { return ViewPort().X(); }; + int Y(void) { return ViewPort().Y(); }; + virtual bool IsDirty(void) { return dirty; } + virtual void SetDirty(bool dirty = true) { this->dirty = dirty; } virtual void SetAlpha(int Alpha); virtual void SetTile(bool Tile); - virtual void SetViewPort(const cRect & Rect); - virtual void SetDrawPortPoint(const cPoint & Point, bool Dirty = true); + virtual void SetViewPort(const cRect &Rect); + virtual void SetDrawPortPoint(const cPoint &Point, bool Dirty = true); virtual void Clear(void); virtual void Fill(tColor Color); - virtual void DrawImage(const cPoint & Point, const cImage & Image); - virtual void DrawImage(const cPoint & Point, int ImageHandle); - virtual void DrawPixel(const cPoint & Point, tColor Color); - virtual void DrawBitmap(const cPoint & Point, const cBitmap & Bitmap, tColor ColorFg = 0, tColor ColorBg = - 0, bool Overlay = false); - virtual void DrawText(const cPoint & Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont * Font, - int Width = 0, int Height = 0, int Alignment = taDefault); - virtual void DrawRectangle(const cRect & Rect, tColor Color); - virtual void DrawEllipse(const cRect & Rect, tColor Color, int Quadrants = 0); - virtual void DrawSlope(const cRect & Rect, tColor Color, int Type); - virtual void Render(const cPixmap * Pixmap, const cRect & Source, const cPoint & Dest); - virtual void Copy(const cPixmap * Pixmap, const cRect & Source, const cPoint & Dest); - virtual void Scroll(const cPoint & Dest, const cRect & Source = cRect::Null); - virtual void Pan(const cPoint & Dest, const cRect & Source = cRect::Null); + virtual void DrawImage(const cPoint &Point, const cImage &Image); + virtual void DrawImage(const cPoint &Point, int ImageHandle); + virtual void DrawPixel(const cPoint &Point, tColor Color); + virtual void DrawBitmap(const cPoint &Point, const cBitmap &Bitmap, tColor ColorFg = 0, tColor ColorBg = 0, + bool Overlay = false); + virtual void DrawText(const cPoint &Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont *Font, + int Width = 0, int Height = 0, int Alignment = taDefault); + virtual void DrawRectangle(const cRect &Rect, tColor Color); + virtual void DrawEllipse(const cRect &Rect, tColor Color, int Quadrants = 0); + virtual void DrawSlope(const cRect &Rect, tColor Color, int Type); + virtual void Render(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest); + virtual void Copy(const cPixmap *Pixmap, const cRect &Source, const cPoint &Dest); + virtual void Scroll(const cPoint &Dest, const cRect &Source = cRect::Null); + virtual void Pan(const cPoint &Dest, const cRect &Source = cRect::Null); }; /****************************************************************************** -* cOglOsd -******************************************************************************/ -class cOglOsd:public cOsd -{ + * cOglOsd + ******************************************************************************/ +class cOglOsd : public cOsd { private: - cOglFb * bFb; - std::shared_ptr < cOglThread > oglThread; - cVector < cOglPixmap * >oglPixmaps; + cOglFb *bFb; + std::shared_ptr oglThread; + cVector oglPixmaps; bool isSubtitleOsd; + protected: public: - cOglOsd(int Left, int Top, uint Level, std::shared_ptr < cOglThread > oglThread); - virtual ~ cOglOsd(); - virtual eOsdError SetAreas(const tArea * Areas, int NumAreas); - virtual cPixmap *CreatePixmap(int Layer, const cRect & ViewPort, const cRect & DrawPort = cRect::Null); - virtual void DestroyPixmap(cPixmap * Pixmap); + cOglOsd(int Left, int Top, uint Level, std::shared_ptr oglThread); + virtual ~cOglOsd(); + virtual eOsdError SetAreas(const tArea *Areas, int NumAreas); + virtual cPixmap *CreatePixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null); + virtual void DestroyPixmap(cPixmap *Pixmap); virtual void Flush(void); - virtual void DrawScaledBitmap(int x, int y, const cBitmap & Bitmap, double FactorX, double FactorY, - bool AntiAlias = false); + virtual void DrawScaledBitmap(int x, int y, const cBitmap &Bitmap, double FactorX, double FactorY, + bool AntiAlias = false); static cOglOutputFb *oFb; }; diff --git a/po/de_DE.po b/po/de_DE.po index d61418d..9c62554 100644 --- a/po/de_DE.po +++ b/po/de_DE.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: VDR \n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-04-15 18:57+0200\n" +"POT-Creation-Date: 2021-12-30 10:23+0100\n" "PO-Revision-Date: blabla\n" "Last-Translator: blabla\n" "Language-Team: blabla\n" @@ -140,91 +140,6 @@ msgstr "" msgid "snd_pcm_drop(): %s\n" msgstr "" -#, c-format -msgid "audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: write error: %s\n" -msgstr "" - -msgid "audio/oss: error not all bytes written\n" -msgstr "" - -#, c-format -msgid "audio/oss: ioctl(SNDCTL_DSP_HALT_OUTPUT): %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: error poll %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: using %sdevice '%s'\n" -msgstr "" - -#, c-format -msgid "audio/oss: can't open dsp device '%s': %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: ioctl(MIXER_WRITE): %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: can't open mixer device '%s': %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: ioctl(SOUND_MIXER_READ_DEVMASK): %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: channel '%s' not supported\n" -msgstr "" - -#, c-format -msgid "audio/oss: channel '%s' not found\n" -msgstr "" - -msgid "audio/oss: should not happen\n" -msgstr "" - -#, c-format -msgid "audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: ioctl(SNDCTL_DSP_SETFMT): %s\n" -msgstr "" - -msgid "audio/oss: device doesn't support 16 bit sample format.\n" -msgstr "" - -#, c-format -msgid "audio/oss: ioctl(SNDCTL_DSP_CHANNELS): %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: device doesn't support %d channels.\n" -msgstr "" - -#, c-format -msgid "audio/oss: ioctl(SNDCTL_DSP_SPEED): %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: device doesn't support %dHz sample rate.\n" -msgstr "" - -#, c-format -msgid "audio/oss: ioctl(SNDCTL_DSP_POLICY): %s\n" -msgstr "" - -#, c-format -msgid "audio/oss: delay %ums\n" -msgstr "" - #, c-format msgid "audio: can't set channels %d sample-rate %dHz\n" msgstr "" @@ -306,9 +221,6 @@ msgstr "" msgid "codec/audio: can't setup resample\n" msgstr "" -msgid "codec/audio: can't open resample\n" -msgstr "" - msgid "A software and GPU emulated UHD device" msgstr "" @@ -408,7 +320,7 @@ msgstr "" msgid "Hue (-314..314) " msgstr "" -msgid "Monitor Colorspace" +msgid "Temperature 6500K + x * 100K" msgstr "" msgid "Color Blindness" @@ -417,6 +329,9 @@ msgstr "" msgid "Color Correction (-100..100) " msgstr "" +msgid "Monitor Type" +msgstr "" + msgid "Scaling" msgstr "Skalierung" @@ -574,15 +489,15 @@ msgstr "" msgid "Suspend SoftHdDevice" msgstr "Unterbreche SoftHdDevice" +msgid "Toggle LUT on/off" +msgstr "" + msgid "PIP toggle on/off: off" msgstr "PIP deaktivieren" msgid "PIP toggle on/off: on" msgstr "PIP aktivieren" -msgid "PIP zapmode (not working)" -msgstr "" - msgid "PIP channel +" msgstr "PIP Kanal +" @@ -715,6 +630,10 @@ msgstr "" msgid "[softhddev] no codec known for still picture\n" msgstr "" +#, c-format +msgid "Too much shaders definded\n" +msgstr "" + #, c-format msgid "Bad formated geometry please use: [=][{xX}][{+-}{+-}]\n" msgstr "" @@ -884,6 +803,9 @@ msgstr "" msgid "Failed initializing libplacebo\n" msgstr "" +msgid "Failed to create placebo opengl \n" +msgstr "" + msgid "Failed to create XCB Surface\n" msgstr "" @@ -893,6 +815,9 @@ msgstr "" msgid "Failed creating vulkan swapchain!" msgstr "" +msgid "libplacebo: failed initializing swapchain\n" +msgstr "" + msgid "Failed initializing libplacebo renderer\n" msgstr "" diff --git a/ringbuffer.c b/ringbuffer.c index bdf8ce2..60b3eda 100644 --- a/ringbuffer.c +++ b/ringbuffer.c @@ -1,5 +1,5 @@ /// -/// @file ringbuffer.c @brief Ringbuffer module +/// @file ringbuffer.c @brief Ringbuffer module /// /// Copyright (c) 2009, 2011, 2014 by Johns. All Rights Reserved. /// @@ -34,17 +34,16 @@ #include "ringbuffer.h" /// ring buffer structure -struct _ring_buffer_ -{ - char *Buffer; ///< ring buffer data - const char *BufferEnd; ///< end of buffer - size_t Size; ///< bytes in buffer (for faster calc) +struct _ring_buffer_ { + char *Buffer; ///< ring buffer data + const char *BufferEnd; ///< end of buffer + size_t Size; ///< bytes in buffer (for faster calc) - const char *ReadPointer; ///< only used by reader - char *WritePointer; ///< only used by writer + const char *ReadPointer; ///< only used by reader + char *WritePointer; ///< only used by writer /// The only thing modified by both - atomic_t Filled; ///< how many of the buffer is used + atomic_t Filled; ///< how many of the buffer is used }; /** @@ -52,8 +51,7 @@ struct _ring_buffer_ ** ** @param rb Ring buffer to reset read/write pointers. */ -void RingBufferReset(RingBuffer * rb) -{ +void RingBufferReset(RingBuffer *rb) { rb->ReadPointer = rb->Buffer; rb->WritePointer = rb->Buffer; atomic_set(&rb->Filled, 0); @@ -67,11 +65,10 @@ void RingBufferReset(RingBuffer * rb) ** @returns Allocated ring buffer, must be freed with ** RingBufferDel(), NULL for out of memory. */ -RingBuffer *RingBufferNew(size_t size) -{ +RingBuffer *RingBufferNew(size_t size) { RingBuffer *rb; - if (!(rb = malloc(sizeof(*rb)))) { // allocate structure + if (!(rb = malloc(sizeof(*rb)))) { // allocate structure return rb; } if (!(rb->Buffer = malloc(size))) { // allocate buffer @@ -89,8 +86,7 @@ RingBuffer *RingBufferNew(size_t size) /** ** Free an allocated ring buffer. */ -void RingBufferDel(RingBuffer * rb) -{ +void RingBufferDel(RingBuffer *rb) { free(rb->Buffer); free(rb); } @@ -103,21 +99,20 @@ void RingBufferDel(RingBuffer * rb) ** ** @returns Number of bytes that could be advanced in ring buffer. */ -size_t RingBufferWriteAdvance(RingBuffer * rb, size_t cnt) -{ +size_t RingBufferWriteAdvance(RingBuffer *rb, size_t cnt) { size_t n; n = rb->Size - atomic_read(&rb->Filled); - if (cnt > n) { // not enough space + if (cnt > n) { // not enough space cnt = n; } // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->WritePointer; - if (n > cnt) { // don't cross the end + if (n > cnt) { // don't cross the end rb->WritePointer += cnt; - } else { // reached or cross the end + } else { // reached or cross the end rb->WritePointer = rb->Buffer; if (n < cnt) { n = cnt - n; @@ -126,7 +121,7 @@ size_t RingBufferWriteAdvance(RingBuffer * rb, size_t cnt) } // - // Only atomic modification! + // Only atomic modification! // atomic_add(cnt, &rb->Filled); return cnt; @@ -142,22 +137,21 @@ size_t RingBufferWriteAdvance(RingBuffer * rb, size_t cnt) ** @returns The number of bytes that could be placed in the ring ** buffer. */ -size_t RingBufferWrite(RingBuffer * rb, const void *buf, size_t cnt) -{ +size_t RingBufferWrite(RingBuffer *rb, const void *buf, size_t cnt) { size_t n; n = rb->Size - atomic_read(&rb->Filled); - if (cnt > n) { // not enough space + if (cnt > n) { // not enough space cnt = n; } // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->WritePointer; - if (n > cnt) { // don't cross the end + if (n > cnt) { // don't cross the end memcpy(rb->WritePointer, buf, cnt); rb->WritePointer += cnt; - } else { // reached or cross the end + } else { // reached or cross the end memcpy(rb->WritePointer, buf, n); rb->WritePointer = rb->Buffer; if (n < cnt) { @@ -169,7 +163,7 @@ size_t RingBufferWrite(RingBuffer * rb, const void *buf, size_t cnt) } // - // Only atomic modification! + // Only atomic modification! // atomic_add(cnt, &rb->Filled); return cnt; @@ -184,21 +178,20 @@ size_t RingBufferWrite(RingBuffer * rb, const void *buf, size_t cnt) ** @returns The number of bytes that could be placed in the ring ** buffer at the write pointer. */ -size_t RingBufferGetWritePointer(RingBuffer * rb, void **wp) -{ +size_t RingBufferGetWritePointer(RingBuffer *rb, void **wp) { size_t n; size_t cnt; - // Total free bytes available in ring buffer + // Total free bytes available in ring buffer cnt = rb->Size - atomic_read(&rb->Filled); *wp = rb->WritePointer; // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->WritePointer; - if (n <= cnt) { // reached or cross the end + if (n <= cnt) { // reached or cross the end return n; } return cnt; @@ -212,21 +205,20 @@ size_t RingBufferGetWritePointer(RingBuffer * rb, void **wp) ** ** @returns Number of bytes that could be advanced in ring buffer. */ -size_t RingBufferReadAdvance(RingBuffer * rb, size_t cnt) -{ +size_t RingBufferReadAdvance(RingBuffer *rb, size_t cnt) { size_t n; n = atomic_read(&rb->Filled); - if (cnt > n) { // not enough filled + if (cnt > n) { // not enough filled cnt = n; } // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->ReadPointer; - if (n > cnt) { // don't cross the end + if (n > cnt) { // don't cross the end rb->ReadPointer += cnt; - } else { // reached or cross the end + } else { // reached or cross the end rb->ReadPointer = rb->Buffer; if (n < cnt) { n = cnt - n; @@ -235,7 +227,7 @@ size_t RingBufferReadAdvance(RingBuffer * rb, size_t cnt) } // - // Only atomic modification! + // Only atomic modification! // atomic_sub(cnt, &rb->Filled); return cnt; @@ -250,22 +242,21 @@ size_t RingBufferReadAdvance(RingBuffer * rb, size_t cnt) ** ** @returns Number of bytes that could be read from ring buffer. */ -size_t RingBufferRead(RingBuffer * rb, void *buf, size_t cnt) -{ +size_t RingBufferRead(RingBuffer *rb, void *buf, size_t cnt) { size_t n; n = atomic_read(&rb->Filled); - if (cnt > n) { // not enough filled + if (cnt > n) { // not enough filled cnt = n; } // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->ReadPointer; - if (n > cnt) { // don't cross the end + if (n > cnt) { // don't cross the end memcpy(buf, rb->ReadPointer, cnt); rb->ReadPointer += cnt; - } else { // reached or cross the end + } else { // reached or cross the end memcpy(buf, rb->ReadPointer, n); rb->ReadPointer = rb->Buffer; if (n < cnt) { @@ -277,7 +268,7 @@ size_t RingBufferRead(RingBuffer * rb, void *buf, size_t cnt) } // - // Only atomic modification! + // Only atomic modification! // atomic_sub(cnt, &rb->Filled); return cnt; @@ -292,21 +283,20 @@ size_t RingBufferRead(RingBuffer * rb, void *buf, size_t cnt) ** @returns The number of bytes that could be read from the ring ** buffer at the read pointer. */ -size_t RingBufferGetReadPointer(RingBuffer * rb, const void **rp) -{ +size_t RingBufferGetReadPointer(RingBuffer *rb, const void **rp) { size_t n; size_t cnt; - // Total used bytes in ring buffer + // Total used bytes in ring buffer cnt = atomic_read(&rb->Filled); *rp = rb->ReadPointer; // - // Hitting end of buffer? + // Hitting end of buffer? // n = rb->BufferEnd - rb->ReadPointer; - if (n <= cnt) { // reached or cross the end + if (n <= cnt) { // reached or cross the end return n; } return cnt; @@ -319,10 +309,7 @@ size_t RingBufferGetReadPointer(RingBuffer * rb, const void **rp) ** ** @returns Number of bytes free in buffer. */ -size_t RingBufferFreeBytes(RingBuffer * rb) -{ - return rb->Size - atomic_read(&rb->Filled); -} +size_t RingBufferFreeBytes(RingBuffer *rb) { return rb->Size - atomic_read(&rb->Filled); } /** ** Get used bytes in ring buffer. @@ -331,7 +318,4 @@ size_t RingBufferFreeBytes(RingBuffer * rb) ** ** @returns Number of bytes used in buffer. */ -size_t RingBufferUsedBytes(RingBuffer * rb) -{ - return atomic_read(&rb->Filled); -} +size_t RingBufferUsedBytes(RingBuffer *rb) { return atomic_read(&rb->Filled); } diff --git a/ringbuffer.h b/ringbuffer.h index eba4dcb..8cd2ff8 100644 --- a/ringbuffer.h +++ b/ringbuffer.h @@ -1,7 +1,7 @@ /// -/// @file ringbuffer.h @brief Ringbuffer module header file +/// @file ringbuffer.h @brief Ringbuffer module header file /// -/// Copyright (c) 2009, 2011 by Johns. All Rights Reserved. +/// Copyright (c) 2009, 2011 by Johns. All Rights Reserved. /// /// Contributor(s): /// diff --git a/shaders.h b/shaders.h index cc140b3..320e96d 100644 --- a/shaders.h +++ b/shaders.h @@ -13,9 +13,9 @@ const char *gl_version = "#version 300 es "; /* Color conversion matrix: RGB = m * YUV + c * m is in row-major matrix, with m[row][col], e.g.: - * [ a11 a12 a13 ] float m[3][3] = { { a11, a12, a13 }, - * [ a21 a22 a23 ] { a21, a22, a23 }, - * [ a31 a32 a33 ] { a31, a32, a33 } }; + * [ a11 a12 a13 ] float m[3][3] = { { a11, a12, a13 }, + * [ a21 a22 a23 ] { a21, a22, a23 }, + * [ a31 a32 a33 ] { a31, a32, a33 } }; * This is accessed as e.g.: m[2-1][1-1] = a21 * In particular, each row contains all the coefficients for one of R, G, B, * while each column contains all the coefficients for one of Y, U, V: @@ -24,88 +24,67 @@ const char *gl_version = "#version 300 es "; * is the Y vector (1, 1, 1), the 2nd is the U vector, the 3rd the V vector. * The matrix might also be used for other conversions and colorspaces. */ -struct mp_cmat -{ - GLfloat m[3][3]; // colormatrix - GLfloat c[3]; //colormatrix_c +struct mp_cmat { + GLfloat m[3][3]; // colormatrix + GLfloat c[3]; // colormatrix_c }; -struct mp_mat -{ +struct mp_mat { GLfloat m[3][3]; }; // YUV input limited range (16-235 for luma, 16-240 for chroma) // ITU-R BT.601 (SD) -struct mp_cmat yuv_bt601 = { {{1.164384, 1.164384, 1.164384}, - {0.00000, -0.391762, 2.017232}, - {1.596027, -0.812968, 0.000000}}, - {-0.874202, 0.531668, -1.085631} -}; +struct mp_cmat yuv_bt601 = { + {{1.164384, 1.164384, 1.164384}, {0.00000, -0.391762, 2.017232}, {1.596027, -0.812968, 0.000000}}, + {-0.874202, 0.531668, -1.085631}}; // ITU-R BT.709 (HD) -struct mp_cmat yuv_bt709 = { {{1.164384, 1.164384, 1.164384}, - {0.00000, -0.213249, 2.112402}, - {1.792741, -0.532909, 0.000000}}, - {-0.972945, 0.301483, -1.133402} -}; +struct mp_cmat yuv_bt709 = { + {{1.164384, 1.164384, 1.164384}, {0.00000, -0.213249, 2.112402}, {1.792741, -0.532909, 0.000000}}, + {-0.972945, 0.301483, -1.133402}}; // ITU-R BT.2020 non-constant luminance system -struct mp_cmat yuv_bt2020ncl = { {{1.164384, 1.164384, 1.164384}, - {0.00000, -0.187326, 2.141772}, - {1.678674, -0.650424, 0.000000}}, - {-0.915688, 0.347459, -1.148145} -}; +struct mp_cmat yuv_bt2020ncl = { + {{1.164384, 1.164384, 1.164384}, {0.00000, -0.187326, 2.141772}, {1.678674, -0.650424, 0.000000}}, + {-0.915688, 0.347459, -1.148145}}; // ITU-R BT.2020 constant luminance system -struct mp_cmat yuv_bt2020cl = { {{0.0000, 1.164384, 0.000000}, - {0.00000, 0.000000, 1.138393}, - {1.138393, 0.000000, 0.000000}}, - {-0.571429, -0.073059, -0.571429} -}; +struct mp_cmat yuv_bt2020cl = { + {{0.0000, 1.164384, 0.000000}, {0.00000, 0.000000, 1.138393}, {1.138393, 0.000000, 0.000000}}, + {-0.571429, -0.073059, -0.571429}}; -float cms_matrix[3][3] = { {1.660497, -0.124547, -0.018154}, - {-0.587657, 1.132895, -0.100597}, - {-0.072840, -0.008348, 1.118751} -}; +float cms_matrix[3][3] = { + {1.660497, -0.124547, -0.018154}, {-0.587657, 1.132895, -0.100597}, {-0.072840, -0.008348, 1.118751}}; // Common constants for SMPTE ST.2084 (PQ) -static const float PQ_M1 = 2610. / 4096 * 1. / 4, - PQ_M2 = 2523. / 4096 * 128, - PQ_C1 = 3424. / 4096, - PQ_C2 = 2413. / 4096 * 32, - PQ_C3 = 2392. / 4096 * 32; +static const float PQ_M1 = 2610. / 4096 * 1. / 4, PQ_M2 = 2523. / 4096 * 128, PQ_C1 = 3424. / 4096, + PQ_C2 = 2413. / 4096 * 32, PQ_C3 = 2392. / 4096 * 32; // Common constants for ARIB STD-B67 (HLG) -static const float HLG_A = 0.17883277, - HLG_B = 0.28466892, - HLG_C = 0.55991073; +static const float HLG_A = 0.17883277, HLG_B = 0.28466892, HLG_C = 0.55991073; -struct gl_vao_entry -{ +struct gl_vao_entry { // used for shader / glBindAttribLocation const char *name; // glVertexAttribPointer() arguments - int num_elems; // size (number of elements) + int num_elems; // size (number of elements) GLenum type; bool normalized; int offset; }; -struct vertex_pt -{ +struct vertex_pt { float x, y; }; -struct vertex_pi -{ +struct vertex_pi { GLint x, y; }; #define TEXUNIT_VIDEO_NUM 6 -struct vertex -{ +struct vertex { struct vertex_pt position; struct vertex_pt texcoord[TEXUNIT_VIDEO_NUM]; }; @@ -114,27 +93,19 @@ static const struct gl_vao_entry vertex_vao[] = { {"position", 2, GL_FLOAT, false, offsetof(struct vertex, position)}, {"texcoord0", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[0])}, {"texcoord1", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[1])}, - {0} -}; + {0}}; -#define GLSL(...) pl_shader_append(__VA_ARGS__) -#define GLSLV(...) pl_shader_append_v(__VA_ARGS__) +#define GLSL(...) pl_shader_append(__VA_ARGS__) +#define GLSLV(...) pl_shader_append_v(__VA_ARGS__) char sh[SHADER_LENGTH]; char shv[SHADER_LENGTH]; -void GL_init() -{ - sh[0] = 0; -} +void GL_init() { sh[0] = 0; } -void GLV_init() -{ - shv[0] = 0; -} +void GLV_init() { shv[0] = 0; } -void pl_shader_append(const char *fmt, ...) -{ +void pl_shader_append(const char *fmt, ...) { char temp[1000]; va_list ap; @@ -145,11 +116,9 @@ void pl_shader_append(const char *fmt, ...) if (strlen(sh) + strlen(temp) > SHADER_LENGTH) Fatal(_("Shaderlenght fault\n")); strcat(sh, temp); - } -void pl_shader_append_v(const char *fmt, ...) -{ +void pl_shader_append_v(const char *fmt, ...) { char temp[1000]; va_list ap; @@ -160,18 +129,16 @@ void pl_shader_append_v(const char *fmt, ...) if (strlen(shv) + strlen(temp) > SHADER_LENGTH) Fatal(_("Shaderlenght fault\n")); strcat(shv, temp); - } -static void compile_attach_shader(GLuint program, GLenum type, const char *source) -{ +static void compile_attach_shader(GLuint program, GLenum type, const char *source) { GLuint shader; GLint status = 1234, log_length; char log[4000]; GLsizei len; shader = glCreateShader(type); - glShaderSource(shader, 1, (const GLchar **)&source, NULL); // &buffer, NULL); + glShaderSource(shader, 1, (const GLchar **)&source, NULL); // &buffer, NULL); glCompileShader(shader); status = 0; glGetShaderiv(shader, GL_COMPILE_STATUS, &status); @@ -183,11 +150,9 @@ static void compile_attach_shader(GLuint program, GLenum type, const char *sourc glAttachShader(program, shader); glDeleteShader(shader); - } -static void link_shader(GLuint program) -{ +static void link_shader(GLuint program) { GLint status, log_length; glLinkProgram(program); @@ -198,8 +163,7 @@ static void link_shader(GLuint program) Debug(3, "Link Status %d loglen %d\n", status, log_length); } -static GLuint sc_generate_osd(GLuint gl_prog) -{ +static GLuint sc_generate_osd(GLuint gl_prog) { Debug(3, "vor create osd\n"); gl_prog = glCreateProgram(); @@ -215,7 +179,7 @@ static GLuint sc_generate_osd(GLuint gl_prog) GLSL("}\n"); Debug(3, "vor compile vertex osd\n"); - compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh); // vertex_osd); + compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh); // vertex_osd); GL_init(); GLSL("%s\n", gl_version); GLSL("#define texture1D texture\n"); @@ -227,14 +191,14 @@ static GLuint sc_generate_osd(GLuint gl_prog) GLSL("vec4 color; \n"); GLSL("color = vec4(texture(texture0, texcoord0));\n"); #ifdef GAMMA - GLSL("// delinearize gamma \n"); - GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // delinearize gamma + GLSL("// delinearize gamma \n"); + GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // delinearize gamma GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n"); #endif GLSL("out_color = color;\n"); GLSL("}\n"); Debug(3, "vor compile fragment osd \n"); - compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, sh); //fragment_osd); + compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, sh); // fragment_osd); glBindAttribLocation(gl_prog, 0, "vertex_position"); glBindAttribLocation(gl_prog, 1, "vertex_texcoord0"); @@ -243,8 +207,7 @@ static GLuint sc_generate_osd(GLuint gl_prog) return gl_prog; } -static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) -{ +static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) { char vname[80]; int n; @@ -254,39 +217,39 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) GL_init(); GLSL("%s\n", gl_version); - GLSL("in vec2 vertex_position; \n"); - GLSL("in vec2 vertex_texcoord0; \n"); - GLSL("out vec2 texcoord0; \n"); - GLSL("in vec2 vertex_texcoord1; \n"); - GLSL("out vec2 texcoord1; \n"); + GLSL("in vec2 vertex_position; \n"); + GLSL("in vec2 vertex_texcoord0; \n"); + GLSL("out vec2 texcoord0; \n"); + GLSL("in vec2 vertex_texcoord1; \n"); + GLSL("out vec2 texcoord1; \n"); if (Planes == 3) { GLSL("in vec2 vertex_texcoord2; \n"); - GLSL("out vec2 texcoord2; \n"); + GLSL("out vec2 texcoord2; \n"); } - GLSL("void main() { \n"); + GLSL("void main() { \n"); GLSL("gl_Position = vec4(vertex_position, 1.0, 1.0);\n"); GLSL("texcoord0 = vertex_texcoord0; \n"); GLSL("texcoord1 = vertex_texcoord1; \n"); if (Planes == 3) { - GLSL("texcoord2 = vertex_texcoord1; \n"); // texcoord1 ist hier richtig + GLSL("texcoord2 = vertex_texcoord1; \n"); // texcoord1 ist hier richtig } - GLSL("} \n"); + GLSL("} \n"); Debug(3, "vor create\n"); gl_prog = glCreateProgram(); Debug(3, "vor compile vertex\n"); -// printf("%s",sh); + // printf("%s",sh); compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh); switch (colorspace) { case AVCOL_SPC_RGB: - case AVCOL_SPC_BT470BG: + case AVCOL_SPC_BT470BG: m = &yuv_bt601.m[0][0]; c = &yuv_bt601.c[0]; Debug(3, "BT601 Colorspace used\n"); break; case AVCOL_SPC_BT709: - case AVCOL_SPC_UNSPECIFIED: // comes with UHD + case AVCOL_SPC_UNSPECIFIED: // comes with UHD m = &yuv_bt709.m[0][0]; c = &yuv_bt709.c[0]; Debug(3, "BT709 Colorspace used\n"); @@ -297,7 +260,7 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) cms = &cms_matrix[0][0]; Debug(3, "BT2020NCL Colorspace used\n"); break; - default: // fallback + default: // fallback m = &yuv_bt709.m[0][0]; c = &yuv_bt709.c[0]; Debug(3, "default BT709 Colorspace used %d\n", colorspace); @@ -309,8 +272,8 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) GLSL("%s\n", gl_version); GLSL("precision mediump float; \n"); GLSL("layout(location = 0) out vec4 out_color;\n"); - GLSL("in vec2 texcoord0; \n"); - GLSL("in vec2 texcoord1; \n"); + GLSL("in vec2 texcoord0; \n"); + GLSL("in vec2 texcoord1; \n"); if (Planes == 3) GLSL("in vec2 texcoord2; \n"); GLSL("uniform mat3 colormatrix; \n"); @@ -321,8 +284,8 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) GLSL("uniform sampler2D texture1; \n"); if (Planes == 3) GLSL("uniform sampler2D texture2; \n"); - GLSL("void main() { \n"); - GLSL("vec4 color; \n"); + GLSL("void main() { \n"); + GLSL("vec4 color; \n"); if (colorspace == AVCOL_SPC_BT2020_NCL) { GLSL("color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r; \n"); @@ -333,55 +296,64 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) GLSL("color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n"); } GLSL("// color conversion\n"); - GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n"); - GLSL("color.a = 1.0; \n"); + GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; " + "\n"); + GLSL("color.a = 1.0; \n"); - GLSL("// pl_shader_linearize \n"); - GLSL("color.rgb = max(color.rgb, 0.0); \n"); -// GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); -// GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n"); -// GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - vec3(%f)) * vec3(1.0/%f)) + vec3(%f) , bvec3(lessThan(vec3(0.5), color.rgb)));\n",HLG_C, HLG_A, HLG_B); - GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - vec3(0.55991073)) * vec3(1.0/0.17883277)) + vec3(0.28466892), bvec3(lessThan(vec3(0.5), color.rgb)));\n"); - GLSL("color.rgb *= vec3(1.0/3.17955); \n"); // PL_COLOR_SDR_WHITE_HLG - GLSL("// color mapping \n"); - GLSL("color.rgb = cms_matrix * color.rgb; \n"); + GLSL("// pl_shader_linearize \n"); + GLSL("color.rgb = max(color.rgb, 0.0); \n"); + // GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); + // GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n"); + // GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - + // vec3(%f)) * vec3(1.0/%f)) + vec3(%f) , + // bvec3(lessThan(vec3(0.5), color.rgb)));\n",HLG_C, HLG_A, HLG_B); + GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - " + "vec3(0.55991073)) * vec3(1.0/0.17883277)) + vec3(0.28466892), " + "bvec3(lessThan(vec3(0.5), color.rgb)));\n"); + GLSL("color.rgb *= vec3(1.0/3.17955); \n"); // PL_COLOR_SDR_WHITE_HLG + GLSL("// color mapping \n"); + GLSL("color.rgb = cms_matrix * color.rgb; \n"); #ifndef GAMMA - GLSL("// pl_shader_delinearize \n"); - GLSL("color.rgb = max(color.rgb, 0.0); \n"); -// GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); -// GLSL("color.rgb = pow(color.rgb, vec3(1.0/2.4)); \n"); - GLSL("color.rgb *= vec3(3.17955); \n"); // PL_COLOR_SDR_WHITE_HLG - GLSL("color.rgb = mix(vec3(0.5) * sqrt(color.rgb), vec3(0.17883277) * log(color.rgb - vec3(0.28466892)) + vec3(0.55991073), bvec3(lessThan(vec3(1.0), color.rgb))); \n"); + GLSL("// pl_shader_delinearize \n"); + GLSL("color.rgb = max(color.rgb, 0.0); \n"); + // GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); + // GLSL("color.rgb = pow(color.rgb, vec3(1.0/2.4)); \n"); + GLSL("color.rgb *= vec3(3.17955); \n"); // PL_COLOR_SDR_WHITE_HLG + GLSL("color.rgb = mix(vec3(0.5) * sqrt(color.rgb), vec3(0.17883277) * " + "log(color.rgb - vec3(0.28466892)) + vec3(0.55991073), " + "bvec3(lessThan(vec3(1.0), color.rgb))); \n"); #endif - GLSL("out_color = color; \n"); + GLSL("out_color = color; \n"); GLSL("} \n"); } else { - GLSL("color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r; \n"); + GLSL("color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r; \n"); if (Planes == 3) { GLSL("color.g = 1.000000 * vec4(texture(texture1, texcoord1)).r;\n"); GLSL("color.b = 1.000000 * vec4(texture(texture2, texcoord2)).r;\n"); } else { GLSL("color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg; \n"); } - GLSL("// color conversion \n"); - GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n"); - GLSL("color.a = 1.0; \n"); + GLSL("// color conversion \n"); + GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n"); + GLSL("color.a = 1.0; \n"); - GLSL("// linearize gamma \n"); - GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // linearize gamma + GLSL("// linearize gamma \n"); + GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // linearize gamma GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n"); #ifndef GAMMA - GLSL("// delinearize gamma to sRGB \n"); - GLSL("color.rgb = max(color.rgb, 0.0); \n"); - GLSL("color.rgb = mix(color.rgb * vec3(12.92), vec3(1.055) * pow(color.rgb, vec3(1.0/2.4)) - vec3(0.055), bvec3(lessThanEqual(vec3(0.0031308), color.rgb))); \n"); + GLSL("// delinearize gamma to sRGB \n"); + GLSL("color.rgb = max(color.rgb, 0.0); \n"); + GLSL("color.rgb = mix(color.rgb * vec3(12.92), vec3(1.055) * " + "pow(color.rgb, vec3(1.0/2.4)) - vec3(0.055), " + "bvec3(lessThanEqual(vec3(0.0031308), color.rgb))); \n"); #endif - GLSL("// color mapping \n"); - GLSL("out_color = color; \n"); + GLSL("// color mapping \n"); + GLSL("out_color = color; \n"); GLSL("} \n"); } -//printf(">%s<",sh); + // printf(">%s<",sh); Debug(3, "vor compile fragment\n"); compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, sh); glBindAttribLocation(gl_prog, 0, "vertex_position"); @@ -416,8 +388,7 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) return gl_prog; } -static void render_pass_quad(int flip, float xcrop, float ycrop) -{ +static void render_pass_quad(int flip, float xcrop, float ycrop) { struct vertex va[4]; int n; const struct gl_vao_entry *e; @@ -444,21 +415,21 @@ static void render_pass_quad(int flip, float xcrop, float ycrop) } va[0].texcoord[0].x = (float)0.0 + xcrop; - va[0].texcoord[0].y = (float)0.0 + ycrop; // abgeschnitten von links oben + va[0].texcoord[0].y = (float)0.0 + ycrop; // abgeschnitten von links oben va[0].texcoord[1].x = (float)0.0 + xcrop; - va[0].texcoord[1].y = (float)0.0 + ycrop; // abgeschnitten von links oben + va[0].texcoord[1].y = (float)0.0 + ycrop; // abgeschnitten von links oben va[1].texcoord[0].x = (float)0.0 + xcrop; - va[1].texcoord[0].y = (float)1.0 - ycrop; // abgeschnitten links unten 1.0 - Wert + va[1].texcoord[0].y = (float)1.0 - ycrop; // abgeschnitten links unten 1.0 - Wert va[1].texcoord[1].x = (float)0.0 + xcrop; - va[1].texcoord[1].y = (float)1.0 - ycrop; // abgeschnitten links unten 1.0 - Wert + va[1].texcoord[1].y = (float)1.0 - ycrop; // abgeschnitten links unten 1.0 - Wert va[2].texcoord[0].x = (float)1.0 - xcrop; - va[2].texcoord[0].y = (float)0.0 + ycrop; // abgeschnitten von rechts oben + va[2].texcoord[0].y = (float)0.0 + ycrop; // abgeschnitten von rechts oben va[2].texcoord[1].x = (float)1.0 - xcrop; - va[2].texcoord[1].y = (float)0.0 + ycrop; // abgeschnitten von rechts oben + va[2].texcoord[1].y = (float)0.0 + ycrop; // abgeschnitten von rechts oben va[3].texcoord[0].x = (float)1.0 - xcrop; - va[3].texcoord[0].y = (float)1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert + va[3].texcoord[0].y = (float)1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert va[3].texcoord[1].x = (float)1.0 - xcrop; - va[3].texcoord[1].y = (float)1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert + va[3].texcoord[1].y = (float)1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert glBindBuffer(GL_ARRAY_BUFFER, vao_buffer); glBufferData(GL_ARRAY_BUFFER, 4 * sizeof(struct vertex), va, GL_DYNAMIC_DRAW); @@ -470,7 +441,7 @@ static void render_pass_quad(int flip, float xcrop, float ycrop) e = &vertex_vao[n]; glEnableVertexAttribArray(n); glVertexAttribPointer(n, e->num_elems, e->type, e->normalized, sizeof(struct vertex), - (void *)(intptr_t) e->offset); + (void *)(intptr_t)e->offset); } glBindBuffer(GL_ARRAY_BUFFER, 0); diff --git a/shaders/KrigBilateral.glsl b/shaders/KrigBilateral.glsl index f77432f..247b08b 100644 --- a/shaders/KrigBilateral.glsl +++ b/shaders/KrigBilateral.glsl @@ -4,12 +4,12 @@ // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3.0 of the License, or (at your option) any later version. -// +// // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. -// +// // You should have received a copy of the GNU Lesser General Public // License along with this library. diff --git a/shaders/LumaSharpenHook.glsl b/shaders/LumaSharpenHook.glsl index af4af68..a173599 100644 --- a/shaders/LumaSharpenHook.glsl +++ b/shaders/LumaSharpenHook.glsl @@ -25,24 +25,24 @@ // -- Advanced sharpening settings -- #define pattern 2 //[1|2|3|4] Choose a sample pattern. 1 = Fast, 2 = Normal, 3 = Wider, 4 = Pyramid shaped. //[8|9] Experimental slower patterns. 8 = 9 tap 9 fetch gaussian, 9 = 9 tap 9 fetch high pass. - + #define offset_bias 1.0 //[0.0 to 6.0] Offset bias adjusts the radius of the sampling pattern. vec4 hook(){ vec4 colorInput = LUMA_tex(LUMA_pos); - - + + //We are on luma plane: xyzw = [luma_val, 0.0, 0.0, 1.0] float ori = colorInput.x; - + // -- Combining the strength and luma multipliers -- float sharp_strength_luma = sharp_strength; //I'll be combining even more multipliers with it later on - + float px = 1.0; float py = 1.0; - - // Sampling patterns - + + // Sampling patterns + // [ NW, , NE ] Each texture lookup (except ori) // [ ,ori, ] samples 4 pixels // [ SW, , SE ] @@ -54,39 +54,39 @@ vec4 hook(){ // [ 1/9, 2/9, ] [ 1 , 2 , ] // [ 2/9, 8/9, 2/9] = [ 2 , 8 , 2 ] // [ , 2/9, 1/9] [ , 2 , 1 ] - + px = (px / 3.0) * offset_bias; py = (py / 3.0) * offset_bias; - + float blur_ori = LUMA_texOff(vec2(px,py)).x; // North West blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South East - + //blur_ori += LUMA_texOff(vec2(px,py)).x; // North East //blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West - + blur_ori *= 0.5; //Divide by the number of texture fetches sharp_strength_luma *= 1.5; // Adjust strength to aproximate the strength of pattern 2 #endif - + // -- Pattern 2 -- A 9 tap gaussian using 4+1 texture fetches. #if pattern == 2 // -- Gaussian filter -- // [ .25, .50, .25] [ 1 , 2 , 1 ] // [ .50, 1, .50] = [ 2 , 4 , 2 ] // [ .25, .50, .25] [ 1 , 2 , 1 ] - + px = px * 0.5 * offset_bias; py = py * 0.5 * offset_bias; - + float blur_ori = LUMA_texOff(vec2(px,-py)).x; // South East blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West blur_ori += LUMA_texOff(vec2(px,py)).x; // North East blur_ori += LUMA_texOff(vec2(-px,py)).x; // North West - + blur_ori *= 0.25; // ( /= 4) Divide by the number of texture fetches #endif - + // -- Pattern 3 -- An experimental 17 tap gaussian using 4+1 texture fetches. #if pattern == 3 @@ -96,7 +96,7 @@ vec4 hook(){ // [ 6 ,24 , ,24 , 6 ] // [ 4 ,16 ,24 ,16 , ] // [ , , 6 , 4 , ] - + px = px * offset_bias; py = py * offset_bias; @@ -104,97 +104,97 @@ vec4 hook(){ blur_ori += LUMA_texOff(vec2(-1.2*px,-0.4*py)).x; // West South West blur_ori += LUMA_texOff(vec2(1.2*px,0.4*py)).x; // East North East blur_ori += LUMA_texOff(vec2(-0.4*px,1.2*py)).x; // North North West - + blur_ori *= 0.25; // ( /= 4) Divide by the number of texture fetches - + sharp_strength_luma *= 0.51; #endif - + // -- Pattern 4 -- A 9 tap high pass (pyramid filter) using 4+1 texture fetches. #if pattern == 4 // -- Gaussian filter -- // [ .50, .50, .50] [ 1 , 1 , 1 ] // [ .50, , .50] = [ 1 , , 1 ] // [ .50, .50, .50] [ 1 , 1 , 1 ] - + float blur_ori = LUMA_texOff(vec2(0.5 * px,-py * offset_bias)).x; // South South East blur_ori += LUMA_texOff(vec2(offset_bias * -px,0.5 * -py)).x; // West South West blur_ori += LUMA_texOff(vec2(offset_bias * px,0.5 * py)).x; // East North East blur_ori += LUMA_texOff(vec2(0.5 * -px,py * offset_bias)).x; // North North West - + //blur_ori += (2.0 * ori); // Probably not needed. Only serves to lessen the effect. - + blur_ori *= 0.25; //Divide by the number of texture fetches sharp_strength_luma *= 0.666; // Adjust strength to aproximate the strength of pattern 2 #endif - + // -- Pattern 8 -- A (slower) 9 tap gaussian using 9 texture fetches. #if pattern == 8 - + // -- Gaussian filter -- // [ 1 , 2 , 1 ] // [ 2 , 4 , 2 ] // [ 1 , 2 , 1 ] - + px = px * offset_bias; py = py * offset_bias; - + float blur_ori = LUMA_texOff(vec2(-px,py)).x; // North West blur_ori += LUMA_texOff(vec2(px,-py)).x; // South East blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West blur_ori += LUMA_texOff(vec2(px,py)).x; // North East - + float blur_ori2 = LUMA_texOff(vec2(0.0,py)).x; // North blur_ori2 += LUMA_texOff(vec2(0.0,-py)).x; // South blur_ori2 += LUMA_texOff(vec2(-px,0.0)).x; // West blur_ori2 += LUMA_texOff(vec2(px,0.0)).x; // East blur_ori2 *= 2.0; - + blur_ori += blur_ori2; blur_ori += (ori * 4.0); // Probably not needed. Only serves to lessen the effect. - + // dot()s with gaussian strengths here? - + blur_ori /= 16.0; //Divide by the number of texture fetches - + sharp_strength_luma *= 0.75; // Adjust strength to aproximate the strength of pattern 2 #endif - + // -- Pattern 9 -- A (slower) 9 tap high pass using 9 texture fetches. #if pattern == 9 - + // -- Gaussian filter -- // [ 1 , 1 , 1 ] // [ 1 , 1 , 1 ] // [ 1 , 1 , 1 ] - + px = px * offset_bias; py = py * offset_bias; - + float blur_ori = LUMA_texOff(vec2(-px,py)).x; // North West blur_ori += LUMA_texOff(vec2(px,-py)).x; // South East blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West blur_ori += LUMA_texOff(vec2(px,py)).x; // North East - + blur_ori += ori; // Probably not needed. Only serves to lessen the effect. - + blur_ori += LUMA_texOff(vec2(0.0,py)).x; // North blur_ori += LUMA_texOff(vec2(0.0,-py)).x; // South blur_ori += LUMA_texOff(vec2(-px,0.0)).x; // West blur_ori += LUMA_texOff(vec2(px,0.0)).x; // East - + blur_ori /= 9.0; //Divide by the number of texture fetches - + sharp_strength_luma *= (8.0/9.0); // Adjust strength to aproximate the strength of pattern 2 #endif - + // -- Calculate the sharpening -- float sharp = ori - blur_ori; //Subtracting the blurred image from the original image - + // -- Adjust strength of the sharpening and clamp it-- float sharp_strength_luma_clamp = sharp_strength_luma / (2.0 * sharp_clamp); //Roll part of the clamp into the dot - + float sharp_luma = clamp((sharp * sharp_strength_luma_clamp + 0.5), 0.0,1.0 ); //Calculate the luma, adjust the strength, scale up and clamp sharp_luma = (sharp_clamp * 2.0) * sharp_luma - sharp_clamp; //scale down diff --git a/shaders/adaptive-sharpen.glsl b/shaders/adaptive-sharpen.glsl index 4288a81..2745f2c 100644 --- a/shaders/adaptive-sharpen.glsl +++ b/shaders/adaptive-sharpen.glsl @@ -33,7 +33,7 @@ //--------------------------------------- Settings ------------------------------------------------ -#define curve_height 1.6 // Main control of sharpening strength [>0] +#define curve_height 1.6 // Main control of sharpening strength [>0] // 0.3 <-> 2.0 is a reasonable range of values // Defined values under this row are "optimal" DO NOT CHANGE IF YOU DO NOT KNOW WHAT YOU ARE DOING! diff --git a/softhdcuvid.cpp b/softhdcuvid.cpp index b4643c2..627e02c 100644 --- a/softhdcuvid.cpp +++ b/softhdcuvid.cpp @@ -1,7 +1,7 @@ /// /// @file softhddevice.cpp @brief A software HD device plugin for VDR. /// -/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. /// /// Contributor(s): /// @@ -20,13 +20,13 @@ /// $Id: fa6a877682f47297580ff5f502425fc7948cb2fa $ ////////////////////////////////////////////////////////////////////////////// -#define __STDC_CONSTANT_MACROS ///< needed for ffmpeg UINT64_C +#define __STDC_CONSTANT_MACROS ///< needed for ffmpeg UINT64_C -#include -#include -#include -#include #include +#include +#include +#include +#include #include #include @@ -42,20 +42,18 @@ #include "openglosd.h" #endif -extern "C" -{ -#include +extern "C" { #include +#include #ifndef USE_OPENGLOSD #include "audio.h" -#include "video.h" #include "codec.h" +#include "video.h" #endif #ifdef PLACEBO #include extern void ToggleLUT(); #endif - } ////////////////////////////////////////////////////////////////////////////// @@ -65,7 +63,7 @@ extern void ToggleLUT(); /// for the distribution archive. static const char *const VERSION = "3.5.4" #ifdef GIT_REV - "-GIT" GIT_REV + "-GIT" GIT_REV #endif ; @@ -80,36 +78,35 @@ static class cSoftHdDevice *MyDevice; ////////////////////////////////////////////////////////////////////////////// -#define RESOLUTIONS 5 ///< number of resolutions +#define RESOLUTIONS 5 ///< number of resolutions /// resolutions names -static const char *const Resolution[RESOLUTIONS] = { - "576i", "720p", "1080i_fake", "1080i", "UHD" -}; +static const char *const Resolution[RESOLUTIONS] = {"576i", "720p", "1080i_fake", "1080i", "UHD"}; -static char ConfigMakePrimary; ///< config primary wanted -static char ConfigHideMainMenuEntry; ///< config hide main menu entry -static char ConfigDetachFromMainMenu; ///< detach from main menu entry instead of suspend -static char ConfigSuspendClose; ///< suspend should close devices -static char ConfigSuspendX11; ///< suspend should stop x11 +static char ConfigMakePrimary; ///< config primary wanted +static char ConfigHideMainMenuEntry; ///< config hide main menu entry +static char ConfigDetachFromMainMenu; ///< detach from main menu entry instead + ///< of suspend +static char ConfigSuspendClose; ///< suspend should close devices +static char ConfigSuspendX11; ///< suspend should stop x11 -static char Config4to3DisplayFormat = 1; ///< config 4:3 display format -static char ConfigOtherDisplayFormat = 1; ///< config other display format -static uint32_t ConfigVideoBackground; ///< config video background color -static int ConfigOsdWidth; ///< config OSD width -static int ConfigOsdHeight; ///< config OSD height -static char ConfigVideoStudioLevels; ///< config use studio levels -static char ConfigVideo60HzMode; ///< config use 60Hz display mode -static char ConfigVideoSoftStartSync; ///< config use softstart sync -static char ConfigVideoBlackPicture; ///< config enable black picture mode -char ConfigVideoClearOnSwitch; ///< config enable Clear on channel switch +static char Config4to3DisplayFormat = 1; ///< config 4:3 display format +static char ConfigOtherDisplayFormat = 1; ///< config other display format +static uint32_t ConfigVideoBackground; ///< config video background color +static int ConfigOsdWidth; ///< config OSD width +static int ConfigOsdHeight; ///< config OSD height +static char ConfigVideoStudioLevels; ///< config use studio levels +static char ConfigVideo60HzMode; ///< config use 60Hz display mode +static char ConfigVideoSoftStartSync; ///< config use softstart sync +static char ConfigVideoBlackPicture; ///< config enable black picture mode +char ConfigVideoClearOnSwitch; ///< config enable Clear on channel switch static int ConfigVideoBrightness; ///< config video brightness static int ConfigVideoContrast = 100; ///< config video contrast static int ConfigVideoSaturation = 100; ///< config video saturation static int ConfigVideoHue; ///< config video hue -static int ConfigGamma=100; ///< config Gamma -static int ConfigTemperature=0; ///< config Temperature +static int ConfigGamma = 100; ///< config Gamma +static int ConfigTemperature = 0; ///< config Temperature static int ConfigTargetColorSpace; ///< config Target Colrospace static int ConfigScalerTest; /// Test for Scalers static int ConfigColorBlindness; @@ -139,58 +136,59 @@ static int ConfigVideoCutTopBottom[RESOLUTIONS]; /// config cut left and right pixels static int ConfigVideoCutLeftRight[RESOLUTIONS]; -static int ConfigVideoAudioDelay; ///< config audio delay -static char ConfigAudioDrift; ///< config audio drift -static char ConfigAudioPassthrough; ///< config audio pass-through mask -static char AudioPassthroughState; ///< flag audio pass-through on/off -static char ConfigAudioDownmix; ///< config ffmpeg audio downmix -static char ConfigAudioSoftvol; ///< config use software volume -static char ConfigAudioNormalize; ///< config use normalize volume -static int ConfigAudioMaxNormalize; ///< config max normalize factor -static char ConfigAudioCompression; ///< config use volume compression -static int ConfigAudioMaxCompression; ///< config max volume compression -static int ConfigAudioStereoDescent; ///< config reduce stereo loudness -int ConfigAudioBufferTime; ///< config size ms of audio buffer -static int ConfigAudioAutoAES; ///< config automatic AES handling +static int ConfigVideoAudioDelay; ///< config audio delay +static char ConfigAudioDrift; ///< config audio drift +static char ConfigAudioPassthrough; ///< config audio pass-through mask +static char AudioPassthroughState; ///< flag audio pass-through on/off +static char ConfigAudioDownmix; ///< config ffmpeg audio downmix +static char ConfigAudioSoftvol; ///< config use software volume +static char ConfigAudioNormalize; ///< config use normalize volume +static int ConfigAudioMaxNormalize; ///< config max normalize factor +static char ConfigAudioCompression; ///< config use volume compression +static int ConfigAudioMaxCompression; ///< config max volume compression +static int ConfigAudioStereoDescent; ///< config reduce stereo loudness +int ConfigAudioBufferTime; ///< config size ms of audio buffer +static int ConfigAudioAutoAES; ///< config automatic AES handling -static char *ConfigX11Display; ///< config x11 display -static char *ConfigAudioDevice; ///< config audio stereo device -static char *ConfigPassthroughDevice; ///< config audio pass-through device +static char *ConfigX11Display; ///< config x11 display +static char *ConfigAudioDevice; ///< config audio stereo device +static char *ConfigPassthroughDevice; ///< config audio pass-through device #ifdef USE_PIP -static int ConfigPipX = 100 - 3 - 18; ///< config pip pip x in % -static int ConfigPipY = 100 - 4 - 18; ///< config pip pip y in % -static int ConfigPipWidth = 18; ///< config pip pip width in % -static int ConfigPipHeight = 18; ///< config pip pip height in % -static int ConfigPipVideoX; ///< config pip video x in % -static int ConfigPipVideoY; ///< config pip video y in % -static int ConfigPipVideoWidth; ///< config pip video width in % -static int ConfigPipVideoHeight; ///< config pip video height in % -static int ConfigPipAltX; ///< config pip alt. pip x in % -static int ConfigPipAltY = 50; ///< config pip alt. pip y in % -static int ConfigPipAltWidth; ///< config pip alt. pip width in % -static int ConfigPipAltHeight = 50; ///< config pip alt. pip height in % -static int ConfigPipAltVideoX; ///< config pip alt. video x in % -static int ConfigPipAltVideoY; ///< config pip alt. video y in % -static int ConfigPipAltVideoWidth; ///< config pip alt. video width in % -static int ConfigPipAltVideoHeight = 50; ///< config pip alt. video height in % +static int ConfigPipX = 100 - 3 - 18; ///< config pip pip x in % +static int ConfigPipY = 100 - 4 - 18; ///< config pip pip y in % +static int ConfigPipWidth = 18; ///< config pip pip width in % +static int ConfigPipHeight = 18; ///< config pip pip height in % +static int ConfigPipVideoX; ///< config pip video x in % +static int ConfigPipVideoY; ///< config pip video y in % +static int ConfigPipVideoWidth; ///< config pip video width in % +static int ConfigPipVideoHeight; ///< config pip video height in % +static int ConfigPipAltX; ///< config pip alt. pip x in % +static int ConfigPipAltY = 50; ///< config pip alt. pip y in % +static int ConfigPipAltWidth; ///< config pip alt. pip width in % +static int ConfigPipAltHeight = 50; ///< config pip alt. pip height in % +static int ConfigPipAltVideoX; ///< config pip alt. video x in % +static int ConfigPipAltVideoY; ///< config pip alt. video y in % +static int ConfigPipAltVideoWidth; ///< config pip alt. video width in % +static int ConfigPipAltVideoHeight = 50; ///< config pip alt. video height in % #endif #ifdef USE_SCREENSAVER -static char ConfigEnableDPMSatBlackScreen; ///< Enable DPMS(Screensaver) while displaying black screen(radio) +static char ConfigEnableDPMSatBlackScreen; ///< Enable DPMS(Screensaver) while + ///< displaying black screen(radio) #endif #ifdef USE_OPENGLOSD -static int ConfigMaxSizeGPUImageCache = 128; ///< maximum size of GPU mem to be used for image caching +static int ConfigMaxSizeGPUImageCache = 128; ///< maximum size of GPU mem to be used for image caching #endif -static volatile int DoMakePrimary; ///< switch primary device to this +static volatile int DoMakePrimary; ///< switch primary device to this -#define SUSPEND_EXTERNAL -1 ///< play external suspend mode -#define NOT_SUSPENDED 0 ///< not suspend mode -#define SUSPEND_NORMAL 1 ///< normal suspend mode -#define SUSPEND_DETACHED 2 ///< detached suspend mode -static signed char SuspendMode; ///< suspend mode +#define SUSPEND_EXTERNAL -1 ///< play external suspend mode +#define NOT_SUSPENDED 0 ///< not suspend mode +#define SUSPEND_NORMAL 1 ///< normal suspend mode +#define SUSPEND_DETACHED 2 ///< detached suspend mode +static signed char SuspendMode; ///< suspend mode ////////////////////////////////////////////////////////////////////////////// @@ -201,37 +199,29 @@ static signed char SuspendMode; ///< suspend mode /** ** Soft device plugin remote class. */ -class cSoftRemote:public cRemote, private cThread -{ +class cSoftRemote : public cRemote, private cThread { private: cMutex mutex; cCondVar keyReceived; cString Command; virtual void Action(void); + public: + /** + ** Soft device remote class constructor. + ** + ** @param name remote name + */ + cSoftRemote(void) : cRemote("XKeySym") { Start(); } + + virtual ~cSoftRemote() { Cancel(3); } /** - ** Soft device remote class constructor. + ** Receive keycode. ** - ** @param name remote name + ** @param code key code */ - cSoftRemote(void):cRemote("XKeySym") - { - Start(); - } - - virtual ~ cSoftRemote() - { - Cancel(3); - } - - /** - ** Receive keycode. - ** - ** @param code key code - */ - void Receive(const char *code) - { + void Receive(const char *code) { cMutexLock MutexLock(&mutex); Command = code; @@ -239,8 +229,7 @@ class cSoftRemote:public cRemote, private cThread } }; -void cSoftRemote::Action(void) -{ +void cSoftRemote::Action(void) { // see also VDR's cKbdRemote::Action() cTimeMs FirstTime; cTimeMs LastTime; @@ -254,14 +243,15 @@ void cSoftRemote::Action(void) if (keyReceived.TimedWait(mutex, Setup.RcRepeatDelta * 3 / 2) && **Command) { if (strcmp(Command, LastCommand) == 0) { - // If two keyboard events with the same command come in without an intermediate - // timeout, this is a long key press that caused the repeat function to kick in: + // If two keyboard events with the same command come in without an + // intermediate timeout, this is a long key press that caused the repeat + // function to kick in: Delayed = false; FirstCommand = ""; - if (FirstTime.Elapsed() < (uint) Setup.RcRepeatDelay) - continue; // repeat function kicks in after a short delay - if (LastTime.Elapsed() < (uint) Setup.RcRepeatDelta) - continue; // skip same keys coming in too fast + if (FirstTime.Elapsed() < (uint)Setup.RcRepeatDelay) + continue; // repeat function kicks in after a short delay + if (LastTime.Elapsed() < (uint)Setup.RcRepeatDelta) + continue; // skip same keys coming in too fast cRemote::Put(Command, true); Repeat = true; LastTime.Set(); @@ -288,7 +278,7 @@ void cSoftRemote::Action(void) Delayed = false; FirstCommand = ""; FirstTime.Set(); - } else if (**FirstCommand && FirstTime.Elapsed() > (uint) Setup.RcRepeatDelay) { + } else if (**FirstCommand && FirstTime.Elapsed() > (uint)Setup.RcRepeatDelay) { Delayed = false; FirstCommand = ""; FirstTime.Set(); @@ -304,13 +294,12 @@ static cSoftRemote *csoft = NULL; ** Feed key press as remote input (called from C part). ** ** @param keymap target keymap "XKeymap" name (obsolete, ignored) -** @param key pressed/released key name +** @param key pressed/released key name ** @param repeat repeated key flag (obsolete, ignored) ** @param release released key flag (obsolete, ignored) ** @param letter x11 character string (system setting locale) */ -extern "C" void FeedKeyPress(const char *keymap, const char *key, int repeat, int release, const char *letter) -{ +extern "C" void FeedKeyPress(const char *keymap, const char *key, int repeat, int release, const char *letter) { if (!csoft || !keymap || !key) { return; } @@ -325,38 +314,36 @@ extern "C" void FeedKeyPress(const char *keymap, const char *key, int repeat, in /** ** Soft device plugin OSD class. */ -class cSoftOsd:public cOsd -{ +class cSoftOsd : public cOsd { public: - static volatile char Dirty; ///< flag force redraw everything - int OsdLevel; ///< current osd level FIXME: remove + static volatile char Dirty; ///< flag force redraw everything + int OsdLevel; ///< current osd level FIXME: remove - cSoftOsd(int, int, uint); ///< osd constructor - virtual ~ cSoftOsd(void); ///< osd destructor + cSoftOsd(int, int, uint); ///< osd constructor + virtual ~cSoftOsd(void); ///< osd destructor /// set the sub-areas to the given areas virtual eOsdError SetAreas(const tArea *, int); - virtual void Flush(void); ///< commits all data to the hardware - virtual void SetActive(bool); ///< sets OSD to be the active one + virtual void Flush(void); ///< commits all data to the hardware + virtual void SetActive(bool); ///< sets OSD to be the active one }; -volatile char cSoftOsd::Dirty; ///< flag force redraw everything +volatile char cSoftOsd::Dirty; ///< flag force redraw everything /** ** Sets this OSD to be the active one. ** -** @param on true on, false off +** @param on true on, false off ** ** @note only needed as workaround for text2skin plugin with ** undrawn areas. */ -void cSoftOsd::SetActive(bool on) -{ +void cSoftOsd::SetActive(bool on) { #ifdef OSD_DEBUG dsyslog("[softhddev]%s: %d level %d\n", __FUNCTION__, on, OsdLevel); #endif if (Active() == on) { - return; // already active, no action + return; // already active, no action } cOsd::SetActive(on); @@ -377,12 +364,10 @@ void cSoftOsd::SetActive(bool on) ** Initializes the OSD with the given coordinates. ** ** @param left x-coordinate of osd on display -** @param top y-coordinate of osd on display +** @param top y-coordinate of osd on display ** @param level level of the osd (smallest is shown) */ -cSoftOsd::cSoftOsd(int left, int top, uint level) -:cOsd(left, top, level) -{ +cSoftOsd::cSoftOsd(int left, int top, uint level) : cOsd(left, top, level) { #ifdef OSD_DEBUG /* FIXME: OsdWidth/OsdHeight not correct! */ @@ -397,8 +382,7 @@ cSoftOsd::cSoftOsd(int left, int top, uint level) ** ** Shuts down the OSD. */ -cSoftOsd::~cSoftOsd(void) -{ +cSoftOsd::~cSoftOsd(void) { #ifdef OSD_DEBUG dsyslog("[softhddev]%s: level %d\n", __FUNCTION__, OsdLevel); #endif @@ -408,7 +392,7 @@ cSoftOsd::~cSoftOsd(void) #ifdef USE_YAEPG // support yaepghd, video window - if (vidWin.bpp) { // restore fullsized video + if (vidWin.bpp) { // restore fullsized video int width; int height; double video_aspect; @@ -423,8 +407,7 @@ cSoftOsd::~cSoftOsd(void) /** ** Set the sub-areas to the given areas */ -eOsdError cSoftOsd::SetAreas(const tArea * areas, int n) -{ +eOsdError cSoftOsd::SetAreas(const tArea *areas, int n) { #ifdef OSD_DEBUG dsyslog("[softhddev]%s: %d areas \n", __FUNCTION__, n); #endif @@ -448,15 +431,14 @@ eOsdError cSoftOsd::SetAreas(const tArea * areas, int n) /** ** Actually commits all data to the OSD hardware. */ -void cSoftOsd::Flush(void) -{ +void cSoftOsd::Flush(void) { cPixmapMemory *pm; #ifdef OSD_DEBUG dsyslog("[softhddev]%s: level %d active %d\n", __FUNCTION__, OsdLevel, Active()); #endif - if (!Active()) { // this osd is not active + if (!Active()) { // this osd is not active return; } #ifdef USE_YAEPG @@ -499,13 +481,13 @@ void cSoftOsd::Flush(void) int y2; // get dirty bounding box - if (Dirty) { // forced complete update + if (Dirty) { // forced complete update x1 = 0; y1 = 0; x2 = bitmap->Width() - 1; y2 = bitmap->Height() - 1; } else if (!bitmap->Dirty(x1, y1, x2, y2)) { - continue; // nothing dirty continue + continue; // nothing dirty continue } // convert and upload only visible dirty areas xs = bitmap->X0() + Left(); @@ -514,7 +496,7 @@ void cSoftOsd::Flush(void) w = x2 - x1 + 1; h = y2 - y1 + 1; // clip to screen - if (1) { // just for the case it makes trouble + if (1) { // just for the case it makes trouble int width; int height; double video_aspect; @@ -561,10 +543,10 @@ void cSoftOsd::Flush(void) abort(); } #endif - argb = (uint8_t *) malloc(w * h * sizeof(uint32_t)); + argb = (uint8_t *)malloc(w * h * sizeof(uint32_t)); for (y = y1; y <= y2; ++y) { for (x = x1; x <= x2; ++x) { - ((uint32_t *) argb)[x - x1 + (y - y1) * w] = bitmap->GetColor(x, y); + ((uint32_t *)argb)[x - x1 + (y - y1) * w] = bitmap->GetColor(x, y); } } #ifdef OSD_DEBUG @@ -581,7 +563,7 @@ void cSoftOsd::Flush(void) } LOCK_PIXMAPS; - while ((pm = (dynamic_cast < cPixmapMemory * >(RenderPixmaps())))) { + while ((pm = (dynamic_cast(RenderPixmaps())))) { int xp; int yp; int stride; @@ -622,7 +604,7 @@ void cSoftOsd::Flush(void) y += Top(); // clip to screen - if (1) { // just for the case it makes trouble + if (1) { // just for the case it makes trouble // and it can happen! int width; int height; @@ -649,7 +631,7 @@ void cSoftOsd::Flush(void) } #ifdef OSD_DEBUG dsyslog("[softhddev]%s: draw %dx%d%+d%+d*%d -> %+d%+d %p\n", __FUNCTION__, w, h, xp, yp, stride, x, y, - pm->Data()); + pm->Data()); #endif OsdDrawARGB(xp, yp, w, h, stride, pm->Data(), x, y); @@ -659,72 +641,55 @@ void cSoftOsd::Flush(void) } #ifdef USE_OPENGLOSD -//Dummy OSD for OpenGL OSD if no X Server is available -class cDummyOsd:public cOsd -{ +// Dummy OSD for OpenGL OSD if no X Server is available +class cDummyOsd : public cOsd { public: - cDummyOsd(int Left, int Top, uint Level):cOsd(Left, Top, Level) - { - } - virtual ~ cDummyOsd() - { - } - virtual cPixmap *CreatePixmap(int Layer, const cRect & ViewPort, const cRect & DrawPort = cRect::Null) { + cDummyOsd(int Left, int Top, uint Level) : cOsd(Left, Top, Level) {} + virtual ~cDummyOsd() {} + virtual cPixmap *CreatePixmap(int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null) { (void)Layer; (void)ViewPort; (void)DrawPort; return NULL; } - virtual void DestroyPixmap(cPixmap * Pixmap) - { - (void)Pixmap; - } - virtual void DrawImage(const cPoint & Point, const cImage & Image) - { + virtual void DestroyPixmap(cPixmap *Pixmap) { (void)Pixmap; } + virtual void DrawImage(const cPoint &Point, const cImage &Image) { (void)Point; (void)Image; } - virtual void DrawImage(const cPoint & Point, int ImageHandle) - { + virtual void DrawImage(const cPoint &Point, int ImageHandle) { (void)Point; (void)ImageHandle; } - virtual eOsdError CanHandleAreas(const tArea * Areas, int NumAreas) - { + virtual eOsdError CanHandleAreas(const tArea *Areas, int NumAreas) { (void)Areas; (void)NumAreas; return oeOk; } - virtual eOsdError SetAreas(const tArea * Areas, int NumAreas) - { + virtual eOsdError SetAreas(const tArea *Areas, int NumAreas) { (void)Areas; (void)NumAreas; return oeOk; } - virtual void SaveRegion(int x1, int y1, int x2, int y2) - { + virtual void SaveRegion(int x1, int y1, int x2, int y2) { (void)x1; (void)y1; (void)x2; (void)y2; } - virtual void RestoreRegion(void) - { - } - virtual eOsdError SetPalette(const cPalette & Palette, int Area) - { + virtual void RestoreRegion(void) {} + virtual eOsdError SetPalette(const cPalette &Palette, int Area) { (void)Palette; (void)Area; return oeOk; } - virtual void DrawPixel(int x, int y, tColor Color) - { + virtual void DrawPixel(int x, int y, tColor Color) { (void)x; (void)y; (void)Color; } - virtual void DrawBitmap(int x, int y, const cBitmap & Bitmap, tColor ColorFg = 0, tColor ColorBg = - 0, bool ReplacePalette = false, bool Overlay = false) { + virtual void DrawBitmap(int x, int y, const cBitmap &Bitmap, tColor ColorFg = 0, tColor ColorBg = 0, + bool ReplacePalette = false, bool Overlay = false) { (void)x; (void)y; (void)Bitmap; @@ -733,8 +698,8 @@ class cDummyOsd:public cOsd (void)ReplacePalette; (void)Overlay; } - virtual void DrawText(int x, int y, const char *s, tColor ColorFg, tColor ColorBg, const cFont * Font, int Width = - 0, int Height = 0, int Alignment = taDefault) { + virtual void DrawText(int x, int y, const char *s, tColor ColorFg, tColor ColorBg, const cFont *Font, + int Width = 0, int Height = 0, int Alignment = taDefault) { (void)x; (void)y; (void)s; @@ -745,8 +710,7 @@ class cDummyOsd:public cOsd (void)Height; (void)Alignment; } - virtual void DrawRectangle(int x1, int y1, int x2, int y2, tColor Color) - { + virtual void DrawRectangle(int x1, int y1, int x2, int y2, tColor Color) { (void)x1; (void)y1; (void)x2; @@ -761,8 +725,7 @@ class cDummyOsd:public cOsd (void)Color; (void)Quadrants; } - virtual void DrawSlope(int x1, int y1, int x2, int y2, tColor Color, int Type) - { + virtual void DrawSlope(int x1, int y1, int x2, int y2, tColor Color, int Type) { (void)x1; (void)y1; (void)x2; @@ -770,9 +733,7 @@ class cDummyOsd:public cOsd (void)Color; (void)Type; } - virtual void Flush(void) - { - } + virtual void Flush(void) {} }; #endif @@ -783,36 +744,35 @@ class cDummyOsd:public cOsd /** ** Soft device plugin OSD provider class. */ -class cSoftOsdProvider:public cOsdProvider -{ +class cSoftOsdProvider : public cOsdProvider { private: - static cOsd *Osd; ///< single OSD + static cOsd *Osd; ///< single OSD #ifdef USE_OPENGLOSD - static std::shared_ptr < cOglThread > oglThread; + static std::shared_ptr oglThread; static bool StartOpenGlThread(void); + protected: - virtual int StoreImageData(const cImage & Image); + virtual int StoreImageData(const cImage &Image); virtual void DropImageData(int ImageHandle); #endif public: - virtual cOsd * CreateOsd(int, int, uint); + virtual cOsd *CreateOsd(int, int, uint); virtual bool ProvidesTrueColor(void); #ifdef USE_OPENGLOSD static void StopOpenGlThread(void); static const cImage *GetImageData(int ImageHandle); static void OsdSizeChanged(void); #endif - cSoftOsdProvider(void); ///< OSD provider constructor - virtual ~ cSoftOsdProvider(); ///< OSD provider destructor + cSoftOsdProvider(void); ///< OSD provider constructor + virtual ~cSoftOsdProvider(); ///< OSD provider destructor }; -cOsd *cSoftOsdProvider::Osd; ///< single osd +cOsd *cSoftOsdProvider::Osd; ///< single osd #ifdef USE_OPENGLOSD -std::shared_ptr < cOglThread > cSoftOsdProvider::oglThread; ///< openGL worker Thread +std::shared_ptr cSoftOsdProvider::oglThread; ///< openGL worker Thread -int cSoftOsdProvider::StoreImageData(const cImage & Image) -{ +int cSoftOsdProvider::StoreImageData(const cImage &Image) { if (StartOpenGlThread()) { int imgHandle = oglThread->StoreImage(Image); @@ -821,8 +781,7 @@ int cSoftOsdProvider::StoreImageData(const cImage & Image) return 0; } -void cSoftOsdProvider::DropImageData(int ImageHandle) -{ +void cSoftOsdProvider::DropImageData(int ImageHandle) { if (StartOpenGlThread()) oglThread->DropImageData(ImageHandle); } @@ -832,16 +791,15 @@ void cSoftOsdProvider::DropImageData(int ImageHandle) ** Create a new OSD. ** ** @param left x-coordinate of OSD -** @param top y-coordinate of OSD +** @param top y-coordinate of OSD ** @param level layer level of OSD */ -cOsd *cSoftOsdProvider::CreateOsd(int left, int top, uint level) -{ +cOsd *cSoftOsdProvider::CreateOsd(int left, int top, uint level) { #ifdef USE_OPENGLOSD dsyslog("[softhddev]%s: left %d, top %d, level %d, using OpenGL OSD support\n", __FUNCTION__, left, top, level); if (StartOpenGlThread()) return Osd = new cOglOsd(left, top, level, oglThread); - //return dummy osd if shd is detached + // return dummy osd if shd is detached dsyslog("[softhddev]OpenGl Thread not started successfully, using Dummy OSD"); return Osd = new cDummyOsd(left, top, 999); #else @@ -855,26 +813,18 @@ cOsd *cSoftOsdProvider::CreateOsd(int left, int top, uint level) ** ** @returns true we are able to handle a true color OSD. */ -bool cSoftOsdProvider::ProvidesTrueColor(void) -{ - return true; -} +bool cSoftOsdProvider::ProvidesTrueColor(void) { return true; } #ifdef USE_OPENGLOSD -const cImage *cSoftOsdProvider::GetImageData(int ImageHandle) -{ - return cOsdProvider::GetImageData(ImageHandle); -} +const cImage *cSoftOsdProvider::GetImageData(int ImageHandle) { return cOsdProvider::GetImageData(ImageHandle); } -void cSoftOsdProvider::OsdSizeChanged(void) -{ +void cSoftOsdProvider::OsdSizeChanged(void) { // cleanup OpenGl Context cSoftOsdProvider::StopOpenGlThread(); cOsdProvider::UpdateOsdSize(); } -bool cSoftOsdProvider::StartOpenGlThread(void) -{ +bool cSoftOsdProvider::StartOpenGlThread(void) { // only try to start worker thread if shd is attached // otherwise glutInit() crashes if (SuspendMode != NOT_SUSPENDED) { @@ -900,8 +850,7 @@ bool cSoftOsdProvider::StartOpenGlThread(void) return false; } -void cSoftOsdProvider::StopOpenGlThread(void) -{ +void cSoftOsdProvider::StopOpenGlThread(void) { dsyslog("[softhddev]stopping OpenGL Worker Thread "); if (oglThread) { // OsdClose(); @@ -915,9 +864,7 @@ void cSoftOsdProvider::StopOpenGlThread(void) /** ** Create cOsdProvider class. */ -cSoftOsdProvider::cSoftOsdProvider(void) -:cOsdProvider() -{ +cSoftOsdProvider::cSoftOsdProvider(void) : cOsdProvider() { #ifdef OSD_DEBUG dsyslog("[softhddev]%s:\n", __FUNCTION__); #endif @@ -925,14 +872,12 @@ cSoftOsdProvider::cSoftOsdProvider(void) StopOpenGlThread(); VideoSetVideoEventCallback(&OsdSizeChanged); #endif - } /** ** Destroy cOsdProvider class. */ -cSoftOsdProvider::~cSoftOsdProvider() -{ +cSoftOsdProvider::~cSoftOsdProvider() { #ifdef OSD_DEBUG dsyslog("[softhddev]%s:\n", __FUNCTION__); #endif @@ -948,8 +893,7 @@ cSoftOsdProvider::~cSoftOsdProvider() /** ** Soft device plugin menu setup page class. */ -class cMenuSetupSoft:public cMenuSetupPage -{ +class cMenuSetupSoft : public cMenuSetupPage { protected: /// /// local copies of global setup variables: @@ -980,7 +924,7 @@ class cMenuSetupSoft:public cMenuSetupPage int Saturation; int Hue; int Gamma; - int Temperature; + int Temperature; int TargetColorSpace; int ScalerTest; int ColorBlindnessFaktor; @@ -1042,12 +986,13 @@ class cMenuSetupSoft:public cMenuSetupPage #endif /// @} private: - inline cOsdItem * CollapsedItem(const char *, int &, const char * = NULL); - void Create(void); // create sub-menu + inline cOsdItem *CollapsedItem(const char *, int &, const char * = NULL); + void Create(void); // create sub-menu protected: - virtual void Store(void); + virtual void Store(void); + public: - cMenuSetupSoft(void); + cMenuSetupSoft(void); virtual eOSState ProcessKey(eKeys); // handle input }; @@ -1056,8 +1001,7 @@ class cMenuSetupSoft:public cMenuSetupPage ** ** @param label text inside separator */ -static inline cOsdItem *SeparatorItem(const char *label) -{ +static inline cOsdItem *SeparatorItem(const char *label) { cOsdItem *item; item = new cOsdItem(cString::sprintf("* %s: ", label)); @@ -1071,10 +1015,9 @@ static inline cOsdItem *SeparatorItem(const char *label) ** ** @param label text inside collapsed ** @param flag flag handling collapsed or opened -** @param msg open message +** @param msg open message */ -inline cOsdItem *cMenuSetupSoft::CollapsedItem(const char *label, int &flag, const char *msg) -{ +inline cOsdItem *cMenuSetupSoft::CollapsedItem(const char *label, int &flag, const char *msg) { cOsdItem *item; item = new cMenuEditBoolItem(cString::sprintf("* %s", label), &flag, msg ? msg : tr("show"), tr("hide")); @@ -1085,37 +1028,36 @@ inline cOsdItem *cMenuSetupSoft::CollapsedItem(const char *label, int &flag, con /** ** Create setup menu. */ -void cMenuSetupSoft::Create(void) -{ +void cMenuSetupSoft::Create(void) { static const char *const osd_size[] = { - "auto", "1920x1080", "1280x720", "custom", - }; - static const char *const video_display_formats_4_3[] = { - "pan&scan", "letterbox", "center cut-out", "original" - }; - static const char *const video_display_formats_16_9[] = { - "pan&scan", "pillarbox", "center cut-out", "original" + "auto", + "1920x1080", + "1280x720", + "custom", }; + static const char *const video_display_formats_4_3[] = {"pan&scan", "letterbox", "center cut-out", "original"}; + static const char *const video_display_formats_16_9[] = {"pan&scan", "pillarbox", "center cut-out", "original"}; #ifdef YADIF static const char *const deinterlace[] = { - "Cuda", "Yadif", + "Cuda", + "Yadif", }; static const char *const deinterlace_short[] = { - "C", "Y", + "C", + "Y", }; #endif - static const char *const audiodrift[] = { - "None", "PCM", "AC-3", "PCM + AC-3" - }; - static const char *const resolution[RESOLUTIONS] = { - "576i", "720p", "fake 1080", "1080", "2160p" - }; + static const char *const audiodrift[] = {"None", "PCM", "AC-3", "PCM + AC-3"}; + static const char *const resolution[RESOLUTIONS] = {"576i", "720p", "fake 1080", "1080", "2160p"}; static const char *const target_colorspace[] = { - "default Monitor", "sRGB Monitor", "HD TV (BT.709)", "UHD-HDR TV (BT.2020)", + "default Monitor", + "sRGB Monitor", + "HD TV (BT.709)", + "UHD-HDR TV (BT.2020)", }; -#ifdef PLACEBO +#ifdef PLACEBO static const char *const target_colorblindness[] = { "None", "Protanomaly", "Deuteranomaly", "Tritanomaly", "Monochromacy", }; @@ -1139,11 +1081,11 @@ void cMenuSetupSoft::Create(void) } #endif - current = Current(); // get current menu item index - Clear(); // clear the menu + current = Current(); // get current menu item index + Clear(); // clear the menu // - // general + // general // Add(CollapsedItem(tr("General"), General)); @@ -1170,23 +1112,23 @@ void cMenuSetupSoft::Create(void) Add(new cMenuEditBoolItem(tr("Suspend stops x11"), &SuspendX11, trVDR("no"), trVDR("yes"))); } // - // video + // video // Add(CollapsedItem(tr("Video"), Video)); if (Video) { #ifdef USE_SCREENSAVER Add(new cMenuEditBoolItem(tr("Enable Screensaver(DPMS) at black screen"), &EnableDPMSatBlackScreen, - trVDR("no"), trVDR("yes"))); + trVDR("no"), trVDR("yes"))); #endif Add(new cMenuEditStraItem(trVDR("4:3 video display format"), &Video4to3DisplayFormat, 4, - video_display_formats_4_3)); + video_display_formats_4_3)); Add(new cMenuEditStraItem(trVDR("16:9+other video display format"), &VideoOtherDisplayFormat, 4, - video_display_formats_16_9)); + video_display_formats_16_9)); #if 0 - // FIXME: switch config gray/color configuration - Add(new cMenuEditIntItem(tr("Video background color (RGB)"), (int *)&Background, 0, 0x00FFFFFF)); - Add(new cMenuEditIntItem(tr("Video background color (Alpha)"), (int *)&BackgroundAlpha, 0, 0xFF)); + // FIXME: switch config gray/color configuration + Add(new cMenuEditIntItem(tr("Video background color (RGB)"), (int *)&Background, 0, 0x00FFFFFF)); + Add(new cMenuEditIntItem(tr("Video background color (Alpha)"), (int *)&BackgroundAlpha, 0, 0xFF)); #endif #ifdef PLACEBO Add(new cMenuEditBoolItem(tr("Use studio levels"), &StudioLevels, trVDR("no"), trVDR("yes"))); @@ -1205,11 +1147,12 @@ void cMenuSetupSoft::Create(void) Add(new cMenuEditIntItem(tr("Gamma (0..100)"), &Gamma, 0, 100, tr("min"), tr("max"))); Add(new cMenuEditIntItem(tr("Hue (-314..314) "), &Hue, -314, 314, tr("min"), tr("max"))); Add(new cMenuEditIntItem(tr("Temperature 6500K + x * 100K"), &Temperature, -35, 35, NULL, NULL)); - + Add(new cMenuEditStraItem(tr("Color Blindness"), &ColorBlindness, 5, target_colorblindness)); - Add(new cMenuEditIntItem(tr("Color Correction (-100..100) "), &ColorBlindnessFaktor, -100, 100, tr("min"), tr("max"))); + Add(new cMenuEditIntItem(tr("Color Correction (-100..100) "), &ColorBlindnessFaktor, -100, 100, tr("min"), + tr("max"))); #endif - Add(new cMenuEditStraItem(tr("Monitor Type"), &TargetColorSpace, 4, target_colorspace)); + Add(new cMenuEditStraItem(tr("Monitor Type"), &TargetColorSpace, 4, target_colorspace)); for (i = 0; i < RESOLUTIONS; ++i) { cString msg; @@ -1227,13 +1170,13 @@ void cMenuSetupSoft::Create(void) } #endif #if 0 - Add(new cMenuEditBoolItem(tr("SkipChromaDeinterlace (vdpau)"), &SkipChromaDeinterlace[i], trVDR("no"), - trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("Inverse Telecine (vdpau)"), &InverseTelecine[i], trVDR("no"), - trVDR("yes"))); - Add(new cMenuEditIntItem(tr("Denoise (0..1000) (vdpau)"), &Denoise[i], 0, 1000, tr("off"), tr("max"))); - Add(new cMenuEditIntItem(tr("Sharpen (-1000..1000) (vdpau)"), &Sharpen[i], -1000, 1000, tr("blur max"), - tr("sharpen max"))); + Add(new cMenuEditBoolItem(tr("SkipChromaDeinterlace (vdpau)"), &SkipChromaDeinterlace[i], trVDR("no"), + trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("Inverse Telecine (vdpau)"), &InverseTelecine[i], trVDR("no"), + trVDR("yes"))); + Add(new cMenuEditIntItem(tr("Denoise (0..1000) (vdpau)"), &Denoise[i], 0, 1000, tr("off"), tr("max"))); + Add(new cMenuEditIntItem(tr("Sharpen (-1000..1000) (vdpau)"), &Sharpen[i], -1000, 1000, tr("blur max"), + tr("sharpen max"))); #endif Add(new cMenuEditIntItem(tr("Cut top and bottom (pixel)"), &CutTopBottom[i], 0, 250)); Add(new cMenuEditIntItem(tr("Cut left and right (pixel)"), &CutLeftRight[i], 0, 250)); @@ -1241,22 +1184,26 @@ void cMenuSetupSoft::Create(void) } } // - // audio + // audio // Add(CollapsedItem(tr("Audio"), Audio)); if (Audio) { Add(new cMenuEditIntItem(tr("Audio/Video delay (ms)"), &AudioDelay, -1000, 1000)); Add(new cMenuEditStraItem(tr("Audio drift correction"), &AudioDrift, 4, audiodrift)); - + Add(new cMenuEditBoolItem(tr("Pass-through default"), &AudioPassthroughDefault, trVDR("off"), trVDR("on"))); - if (AudioPassthroughDefault) { - Add(new cMenuEditBoolItem(tr("\040\040PCM pass-through"), &AudioPassthroughPCM, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("\040\040AC-3 pass-through"), &AudioPassthroughAC3, trVDR("no"), trVDR("yes"))); - Add(new cMenuEditBoolItem(tr("\040\040E-AC-3 pass-through"), &AudioPassthroughEAC3, trVDR("no"),trVDR("yes"))); - } else { - Add(new cMenuEditBoolItem(tr("Enable (E-)AC-3 (decoder) downmix"), &AudioDownmix, trVDR("no"), trVDR("yes"))); - } + if (AudioPassthroughDefault) { + Add(new cMenuEditBoolItem(tr("\040\040PCM pass-through"), &AudioPassthroughPCM, trVDR("no"), + trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("\040\040AC-3 pass-through"), &AudioPassthroughAC3, trVDR("no"), + trVDR("yes"))); + Add(new cMenuEditBoolItem(tr("\040\040E-AC-3 pass-through"), &AudioPassthroughEAC3, trVDR("no"), + trVDR("yes"))); + } else { + Add(new cMenuEditBoolItem(tr("Enable (E-)AC-3 (decoder) downmix"), &AudioDownmix, trVDR("no"), + trVDR("yes"))); + } Add(new cMenuEditBoolItem(tr("Volume control"), &AudioSoftvol, tr("Hardware"), tr("Software"))); Add(new cMenuEditBoolItem(tr("Enable normalize volume"), &AudioNormalize, trVDR("no"), trVDR("yes"))); Add(new cMenuEditIntItem(tr(" Max normalize factor (/1000)"), &AudioMaxNormalize, 0, 10000)); @@ -1268,7 +1215,7 @@ void cMenuSetupSoft::Create(void) } #ifdef USE_PIP // - // PIP + // PIP // Add(CollapsedItem(tr("Picture-In-Picture"), Pip)); if (Pip) { @@ -1292,15 +1239,14 @@ void cMenuSetupSoft::Create(void) } #endif - SetCurrent(Get(current)); // restore selected menu entry - Display(); // display build menu + SetCurrent(Get(current)); // restore selected menu entry + Display(); // display build menu } /** ** Process key for setup menu. */ -eOSState cMenuSetupSoft::ProcessKey(eKeys key) -{ +eOSState cMenuSetupSoft::ProcessKey(eKeys key) { eOSState state; int old_general; int old_video; @@ -1312,12 +1258,12 @@ eOSState cMenuSetupSoft::ProcessKey(eKeys key) int old_osd_size; int old_resolution_shown[RESOLUTIONS]; int i; - int old_pass; + int old_pass; old_general = General; old_video = Video; old_audio = Audio; - old_pass = AudioPassthroughDefault; + old_pass = AudioPassthroughDefault; #ifdef USE_PIP old_pip = Pip; #endif @@ -1332,13 +1278,12 @@ eOSState cMenuSetupSoft::ProcessKey(eKeys key) #ifdef USE_PIP || old_pip != Pip #endif - || old_pass != AudioPassthroughDefault - || old_osd_size != OsdSize) { - Create(); // update menu + || old_pass != AudioPassthroughDefault || old_osd_size != OsdSize) { + Create(); // update menu } else { for (i = 0; i < RESOLUTIONS; ++i) { if (old_resolution_shown[i] != ResolutionShown[i]) { - Create(); // update menu + Create(); // update menu break; } } @@ -1353,19 +1298,18 @@ eOSState cMenuSetupSoft::ProcessKey(eKeys key) ** ** Import global config variables into setup. */ -cMenuSetupSoft::cMenuSetupSoft(void) -{ +cMenuSetupSoft::cMenuSetupSoft(void) { int i; // - // general + // general // General = 0; MakePrimary = ConfigMakePrimary; HideMainMenuEntry = ConfigHideMainMenuEntry; DetachFromMainMenu = ConfigDetachFromMainMenu; // - // osd + // osd // OsdWidth = ConfigOsdWidth; OsdHeight = ConfigOsdHeight; @@ -1379,13 +1323,13 @@ cMenuSetupSoft::cMenuSetupSoft(void) OsdSize = 3; } // - // suspend + // suspend // SuspendClose = ConfigSuspendClose; SuspendX11 = ConfigSuspendX11; // - // video + // video // Video = 0; Video4to3DisplayFormat = Config4to3DisplayFormat; @@ -1404,7 +1348,7 @@ cMenuSetupSoft::cMenuSetupSoft(void) Saturation = ConfigVideoSaturation; Hue = ConfigVideoHue; Gamma = ConfigGamma; - Temperature = ConfigTemperature; + Temperature = ConfigTemperature; TargetColorSpace = ConfigTargetColorSpace; ColorBlindness = ConfigColorBlindness; ColorBlindnessFaktor = ConfigColorBlindnessFaktor; @@ -1423,7 +1367,7 @@ cMenuSetupSoft::cMenuSetupSoft(void) CutLeftRight[i] = ConfigVideoCutLeftRight[i]; } // - // audio + // audio // Audio = 0; AudioDelay = ConfigVideoAudioDelay; @@ -1444,7 +1388,7 @@ cMenuSetupSoft::cMenuSetupSoft(void) #ifdef USE_PIP // - // PIP + // PIP // Pip = 0; PipX = ConfigPipX; @@ -1479,8 +1423,7 @@ cMenuSetupSoft::cMenuSetupSoft(void) /** ** Store setup. */ -void cMenuSetupSoft::Store(void) -{ +void cMenuSetupSoft::Store(void) { int i; SetupStore("MakePrimary", ConfigMakePrimary = MakePrimary); @@ -1537,7 +1480,7 @@ void cMenuSetupSoft::Store(void) VideoSetSaturation(ConfigVideoSaturation); SetupStore("Gamma", ConfigGamma = Gamma); VideoSetGamma(ConfigGamma); - SetupStore("Temperature", ConfigTemperature = Temperature); + SetupStore("Temperature", ConfigTemperature = Temperature); VideoSetTemperature(ConfigTemperature); SetupStore("TargetColorSpace", ConfigTargetColorSpace = TargetColorSpace); VideoSetTargetColor(ConfigTargetColorSpace); @@ -1590,9 +1533,8 @@ void cMenuSetupSoft::Store(void) if (ConfigAudioDownmix != AudioDownmix) { ResetChannelId(); } - ConfigAudioPassthrough = (AudioPassthroughPCM ? CodecPCM : 0) - | (AudioPassthroughAC3 ? CodecAC3 : 0) - | (AudioPassthroughEAC3 ? CodecEAC3 : 0); + ConfigAudioPassthrough = (AudioPassthroughPCM ? CodecPCM : 0) | (AudioPassthroughAC3 ? CodecAC3 : 0) | + (AudioPassthroughEAC3 ? CodecEAC3 : 0); AudioPassthroughState = AudioPassthroughDefault; if (AudioPassthroughState) { SetupStore("AudioPassthrough", ConfigAudioPassthrough); @@ -1653,22 +1595,16 @@ void cMenuSetupSoft::Store(void) /** ** Dummy player for suspend mode. */ -class cSoftHdPlayer:public cPlayer -{ +class cSoftHdPlayer : public cPlayer { protected: public: cSoftHdPlayer(void); - virtual ~ cSoftHdPlayer(); + virtual ~cSoftHdPlayer(); }; -cSoftHdPlayer::cSoftHdPlayer(void) -{ -} +cSoftHdPlayer::cSoftHdPlayer(void) {} -cSoftHdPlayer::~cSoftHdPlayer() -{ - Detach(); -} +cSoftHdPlayer::~cSoftHdPlayer() { Detach(); } ////////////////////////////////////////////////////////////////////////////// // cControl @@ -1677,31 +1613,27 @@ cSoftHdPlayer::~cSoftHdPlayer() /** ** Dummy control class for suspend mode. */ -class cSoftHdControl:public cControl -{ +class cSoftHdControl : public cControl { public: - static cSoftHdPlayer *Player; ///< dummy player - virtual void Hide(void) ///< hide control - { - } + static cSoftHdPlayer *Player; ///< dummy player + virtual void Hide(void) ///< hide control + {} virtual eOSState ProcessKey(eKeys); ///< process input events - cSoftHdControl(void); ///< control constructor + cSoftHdControl(void); ///< control constructor - virtual ~ cSoftHdControl(); ///< control destructor + virtual ~cSoftHdControl(); ///< control destructor }; -cSoftHdPlayer *cSoftHdControl::Player; ///< dummy player instance +cSoftHdPlayer *cSoftHdControl::Player; ///< dummy player instance /** ** Handle a key event. ** -** @param key key pressed +** @param key key pressed */ -eOSState cSoftHdControl::ProcessKey(eKeys key) -{ - if (SuspendMode == SUSPEND_NORMAL && (!ISMODELESSKEY(key) - || key == kMenu || key == kBack || key == kStop)) { +eOSState cSoftHdControl::ProcessKey(eKeys key) { + if (SuspendMode == SUSPEND_NORMAL && (!ISMODELESSKEY(key) || key == kMenu || key == kBack || key == kStop)) { delete Player; Player = NULL; @@ -1715,16 +1647,12 @@ eOSState cSoftHdControl::ProcessKey(eKeys key) /** ** Player control constructor. */ -cSoftHdControl::cSoftHdControl(void) -:cControl(Player = new cSoftHdPlayer) -{ -} +cSoftHdControl::cSoftHdControl(void) : cControl(Player = new cSoftHdPlayer) {} /** ** Player control destructor. */ -cSoftHdControl::~cSoftHdControl() -{ +cSoftHdControl::~cSoftHdControl() { delete Player; Player = NULL; @@ -1743,8 +1671,8 @@ cSoftHdControl::~cSoftHdControl() #ifdef USE_PIP -extern "C" void DelPip(void); ///< remove PIP -static int PipAltPosition; ///< flag alternative position +extern "C" void DelPip(void); ///< remove PIP +static int PipAltPosition; ///< flag alternative position ////////////////////////////////////////////////////////////////////////////// // cReceiver @@ -1755,14 +1683,14 @@ static int PipAltPosition; ///< flag alternative position /** ** Receiver class for PIP mode. */ -class cSoftReceiver:public cReceiver -{ +class cSoftReceiver : public cReceiver { protected: virtual void Activate(bool); virtual void Receive(const uchar *, int); + public: - cSoftReceiver(const cChannel *); ///< receiver constructor - virtual ~ cSoftReceiver(); ///< receiver destructor + cSoftReceiver(const cChannel *); ///< receiver constructor + virtual ~cSoftReceiver(); ///< receiver destructor }; /** @@ -1770,8 +1698,7 @@ class cSoftReceiver:public cReceiver ** ** @param channel channel to receive */ -cSoftReceiver::cSoftReceiver(const cChannel * channel):cReceiver(NULL, MINPRIORITY) -{ +cSoftReceiver::cSoftReceiver(const cChannel *channel) : cReceiver(NULL, MINPRIORITY) { // cReceiver::channelID not setup, this can cause trouble // we want video only AddPid(channel->Vpid()); @@ -1780,18 +1707,14 @@ cSoftReceiver::cSoftReceiver(const cChannel * channel):cReceiver(NULL, MINPRIORI /** ** Receiver destructor. */ -cSoftReceiver::~cSoftReceiver() -{ - Detach(); -} +cSoftReceiver::~cSoftReceiver() { Detach(); } /** ** Called before the receiver gets attached or detached. ** -** @param on flag attached, detached +** @param on flag attached, detached */ -void cSoftReceiver::Activate(bool on) -{ +void cSoftReceiver::Activate(bool on) { if (on) { int width; int height; @@ -1800,17 +1723,17 @@ void cSoftReceiver::Activate(bool on) GetOsdSize(&width, &height, &video_aspect); if (PipAltPosition) { PipStart((ConfigPipAltVideoX * width) / 100, (ConfigPipAltVideoY * height) / 100, - ConfigPipAltVideoWidth ? (ConfigPipAltVideoWidth * width) / 100 : width, - ConfigPipAltVideoHeight ? (ConfigPipAltVideoHeight * height) / 100 : height, - (ConfigPipAltX * width) / 100, (ConfigPipAltY * height) / 100, - ConfigPipAltWidth ? (ConfigPipAltWidth * width) / 100 : width, - ConfigPipAltHeight ? (ConfigPipAltHeight * height) / 100 : height); + ConfigPipAltVideoWidth ? (ConfigPipAltVideoWidth * width) / 100 : width, + ConfigPipAltVideoHeight ? (ConfigPipAltVideoHeight * height) / 100 : height, + (ConfigPipAltX * width) / 100, (ConfigPipAltY * height) / 100, + ConfigPipAltWidth ? (ConfigPipAltWidth * width) / 100 : width, + ConfigPipAltHeight ? (ConfigPipAltHeight * height) / 100 : height); } else { PipStart((ConfigPipVideoX * width) / 100, (ConfigPipVideoY * height) / 100, - ConfigPipVideoWidth ? (ConfigPipVideoWidth * width) / 100 : width, - ConfigPipVideoHeight ? (ConfigPipVideoHeight * height) / 100 : height, (ConfigPipX * width) / 100, - (ConfigPipY * height) / 100, ConfigPipWidth ? (ConfigPipWidth * width) / 100 : width, - ConfigPipHeight ? (ConfigPipHeight * height) / 100 : height); + ConfigPipVideoWidth ? (ConfigPipVideoWidth * width) / 100 : width, + ConfigPipVideoHeight ? (ConfigPipVideoHeight * height) / 100 : height, (ConfigPipX * width) / 100, + (ConfigPipY * height) / 100, ConfigPipWidth ? (ConfigPipWidth * width) / 100 : width, + ConfigPipHeight ? (ConfigPipHeight * height) / 100 : height); } } else { PipStop(); @@ -1824,8 +1747,7 @@ void cSoftReceiver::Activate(bool on) /// @param size number of payload data bytes /// @param is_start flag, start of pes packet /// -static void PipPesParse(const uint8_t * data, int size, int is_start) -{ +static void PipPesParse(const uint8_t *data, int size, int is_start) { static uint8_t *pes_buf; static int pes_size; static int pes_index; @@ -1834,13 +1756,13 @@ static void PipPesParse(const uint8_t * data, int size, int is_start) if (!pes_buf) { pes_size = 500 * 1024 * 1024; - pes_buf = (uint8_t *) malloc(pes_size); - if (!pes_buf) { // out of memory, should never happen + pes_buf = (uint8_t *)malloc(pes_size); + if (!pes_buf) { // out of memory, should never happen return; } pes_index = 0; } - if (is_start) { // start of pes packet + if (is_start) { // start of pes packet if (pes_index) { if (0) { fprintf(stderr, "pip: PES packet %8d %02x%02x\n", pes_index, pes_buf[2], pes_buf[3]); @@ -1862,8 +1784,8 @@ static void PipPesParse(const uint8_t * data, int size, int is_start) if (pes_index + size > pes_size) { pes_size = (pes_index + size) * 2; } - pes_buf = (uint8_t *) realloc(pes_buf, pes_size); - if (!pes_buf) { // out of memory, should never happen + pes_buf = (uint8_t *)realloc(pes_buf, pes_size); + if (!pes_buf) { // out of memory, should never happen return; } } @@ -1871,10 +1793,10 @@ static void PipPesParse(const uint8_t * data, int size, int is_start) pes_index += size; } - /// Transport stream packet size -#define TS_PACKET_SIZE 188 - /// Transport stream packet sync byte -#define TS_PACKET_SYNC 0x47 +/// Transport stream packet size +#define TS_PACKET_SIZE 188 +/// Transport stream packet sync byte +#define TS_PACKET_SYNC 0x47 /** ** Receive TS packet from device. @@ -1882,8 +1804,7 @@ static void PipPesParse(const uint8_t * data, int size, int is_start) ** @param data ts packet ** @param size size (#TS_PACKET_SIZE=188) of tes packet */ -void cSoftReceiver::Receive(const uchar * data, int size) -{ +void cSoftReceiver::Receive(const uchar *data, int size) { const uint8_t *p; p = data; @@ -1895,7 +1816,7 @@ void cSoftReceiver::Receive(const uchar * data, int size) // FIXME: kill all buffers return; } - if (p[1] & 0x80) { // error indicatord + if (p[1] & 0x80) { // error indicatord dsyslog("[softhddev]tsdemux: transport error\n"); // FIXME: kill all buffers goto next_packet; @@ -1905,18 +1826,18 @@ void cSoftReceiver::Receive(const uchar * data, int size) pid = (p[1] & 0x1F) << 8 | p[2]; fprintf(stderr, "tsdemux: PID: %#04x%s%s\n", pid, p[1] & 0x40 ? " start" : "", - p[3] & 0x10 ? " payload" : ""); + p[3] & 0x10 ? " payload" : ""); } // skip adaptation field - switch (p[3] & 0x30) { // adaption field - case 0x00: // reserved - case 0x20: // adaptation field only + switch (p[3] & 0x30) { // adaption field + case 0x00: // reserved + case 0x20: // adaptation field only default: goto next_packet; - case 0x10: // only payload + case 0x10: // only payload payload = 4; break; - case 0x30: // skip adapation field + case 0x30: // skip adapation field payload = 5 + p[4]; // illegal length, ignore packet if (payload >= TS_PACKET_SIZE) { @@ -1928,7 +1849,7 @@ void cSoftReceiver::Receive(const uchar * data, int size) PipPesParse(p + payload, TS_PACKET_SIZE - payload, p[1] & 0x40); - next_packet: + next_packet: p += TS_PACKET_SIZE; size -= TS_PACKET_SIZE; } @@ -1936,15 +1857,14 @@ void cSoftReceiver::Receive(const uchar * data, int size) ////////////////////////////////////////////////////////////////////////////// -static cSoftReceiver *PipReceiver; ///< PIP receiver -static int PipChannelNr; ///< last PIP channel number -static const cChannel *PipChannel; ///< current PIP channel +static cSoftReceiver *PipReceiver; ///< PIP receiver +static int PipChannelNr; ///< last PIP channel number +static const cChannel *PipChannel; ///< current PIP channel /** ** Stop PIP. */ -extern "C" void DelPip(void) -{ +extern "C" void DelPip(void) { delete PipReceiver; PipReceiver = NULL; @@ -1954,10 +1874,9 @@ extern "C" void DelPip(void) /** ** Prepare new PIP. ** -** @param channel_nr channel number +** @param channel_nr channel number */ -static void NewPip(int channel_nr) -{ +static void NewPip(int channel_nr) { const cChannel *channel; cDevice *device; cSoftReceiver *receiver; @@ -1974,8 +1893,8 @@ static void NewPip(int channel_nr) channel_nr = cDevice::CurrentChannel(); } LOCK_CHANNELS_READ; - if (channel_nr && (channel = Channels->GetByNumber(channel_nr)) - && (device = cDevice::GetDevice(channel, 0, false, false))) { + if (channel_nr && (channel = Channels->GetByNumber(channel_nr)) && + (device = cDevice::GetDevice(channel, 0, false, false))) { DelPip(); @@ -1991,14 +1910,13 @@ static void NewPip(int channel_nr) /** ** Toggle PIP on/off. */ -static void TogglePip(void) -{ +static void TogglePip(void) { if (PipReceiver) { int attached; attached = PipReceiver->IsAttached(); DelPip(); - if (attached) { // turn off only if last PIP was on + if (attached) { // turn off only if last PIP was on return; } } @@ -2008,31 +1926,28 @@ static void TogglePip(void) /** ** Switch PIP to next available channel. ** -** @param direction direction of channel switch +** @param direction direction of channel switch */ -static void PipNextAvailableChannel(int direction) -{ +static void PipNextAvailableChannel(int direction) { const cChannel *channel; const cChannel *first; channel = PipChannel; first = channel; - DelPip(); // disable PIP to free the device + DelPip(); // disable PIP to free the device LOCK_CHANNELS_READ; while (channel) { bool ndr; cDevice *device; - channel = direction > 0 ? Channels->Next(channel) - : Channels->Prev(channel); + channel = direction > 0 ? Channels->Next(channel) : Channels->Prev(channel); if (!channel && Setup.ChannelsWrap) { channel = direction > 0 ? Channels->First() : Channels->Last(); } - if (channel && !channel->GroupSep() - && (device = cDevice::GetDevice(channel, 0, false, true)) - && device->ProvidesChannel(channel, 0, &ndr) && !ndr) { + if (channel && !channel->GroupSep() && (device = cDevice::GetDevice(channel, 0, false, true)) && + device->ProvidesChannel(channel, 0, &ndr) && !ndr) { NewPip(channel->Number()); return; @@ -2047,8 +1962,7 @@ static void PipNextAvailableChannel(int direction) /** ** Swap PIP channels. */ -static void SwapPipChannels(void) -{ +static void SwapPipChannels(void) { const cChannel *channel; channel = PipChannel; @@ -2066,30 +1980,31 @@ static void SwapPipChannels(void) /** ** Swap PIP position. */ -static void SwapPipPosition(void) -{ +static void SwapPipPosition(void) { int width; int height; double video_aspect; PipAltPosition ^= 1; - if (!PipReceiver) { // no PIP visible, no update needed + if (!PipReceiver) { // no PIP visible, no update needed return; } GetOsdSize(&width, &height, &video_aspect); if (PipAltPosition) { PipSetPosition((ConfigPipAltVideoX * width) / 100, (ConfigPipAltVideoY * height) / 100, - ConfigPipAltVideoWidth ? (ConfigPipAltVideoWidth * width) / 100 : width, - ConfigPipAltVideoHeight ? (ConfigPipAltVideoHeight * height) / 100 : height, (ConfigPipAltX * width) / 100, - (ConfigPipAltY * height) / 100, ConfigPipAltWidth ? (ConfigPipAltWidth * width) / 100 : width, - ConfigPipAltHeight ? (ConfigPipAltHeight * height) / 100 : height); + ConfigPipAltVideoWidth ? (ConfigPipAltVideoWidth * width) / 100 : width, + ConfigPipAltVideoHeight ? (ConfigPipAltVideoHeight * height) / 100 : height, + (ConfigPipAltX * width) / 100, (ConfigPipAltY * height) / 100, + ConfigPipAltWidth ? (ConfigPipAltWidth * width) / 100 : width, + ConfigPipAltHeight ? (ConfigPipAltHeight * height) / 100 : height); } else { PipSetPosition((ConfigPipVideoX * width) / 100, (ConfigPipVideoY * height) / 100, - ConfigPipVideoWidth ? (ConfigPipVideoWidth * width) / 100 : width, - ConfigPipVideoHeight ? (ConfigPipVideoHeight * height) / 100 : height, (ConfigPipX * width) / 100, - (ConfigPipY * height) / 100, ConfigPipWidth ? (ConfigPipWidth * width) / 100 : width, - ConfigPipHeight ? (ConfigPipHeight * height) / 100 : height); + ConfigPipVideoWidth ? (ConfigPipVideoWidth * width) / 100 : width, + ConfigPipVideoHeight ? (ConfigPipVideoHeight * height) / 100 : height, + (ConfigPipX * width) / 100, (ConfigPipY * height) / 100, + ConfigPipWidth ? (ConfigPipWidth * width) / 100 : width, + ConfigPipHeight ? (ConfigPipHeight * height) / 100 : height); } } @@ -2102,34 +2017,31 @@ static void SwapPipPosition(void) /** ** Hotkey parsing state machine. */ -typedef enum -{ - HksInitial, ///< initial state - HksBlue, ///< blue button pressed - HksBlue1, ///< blue and 1 number pressed - HksRed, ///< red button pressed +typedef enum { + HksInitial, ///< initial state + HksBlue, ///< blue button pressed + HksBlue1, ///< blue and 1 number pressed + HksRed, ///< red button pressed } HkState; /** ** Soft device plugin menu class. */ -class cSoftHdMenu:public cOsdMenu -{ +class cSoftHdMenu : public cOsdMenu { private: - HkState HotkeyState; ///< current hot-key state - int HotkeyCode; ///< current hot-key code - void Create(void); ///< create plugin main menu + HkState HotkeyState; ///< current hot-key state + int HotkeyCode; ///< current hot-key code + void Create(void); ///< create plugin main menu public: - cSoftHdMenu(const char *, int = 0, int = 0, int = 0, int = 0, int = 0); - virtual ~ cSoftHdMenu(); + cSoftHdMenu(const char *, int = 0, int = 0, int = 0, int = 0, int = 0); + virtual ~cSoftHdMenu(); virtual eOSState ProcessKey(eKeys); }; /** ** Create main menu. */ -void cSoftHdMenu::Create(void) -{ +void cSoftHdMenu::Create(void) { int current; int missed; int duped; @@ -2141,8 +2053,8 @@ void cSoftHdMenu::Create(void) int eotf; char *colorstr, *eotfstr; - current = Current(); // get current menu item index - Clear(); // clear the menu + current = Current(); // get current menu item index + Clear(); // clear the menu SetHasHotkeys(); @@ -2152,7 +2064,7 @@ void cSoftHdMenu::Create(void) Add(new cOsdItem(hk(tr("Suspend SoftHdDevice")), osUser1)); } #ifdef PLACEBO - Add(new cOsdItem(hk(tr("Toggle LUT on/off")), osUser2)); + Add(new cOsdItem(hk(tr("Toggle LUT on/off")), osUser2)); #endif #ifdef USE_PIP if (PipReceiver) { @@ -2184,7 +2096,7 @@ void cSoftHdMenu::Create(void) eotfstr = strdup("BT 1886"); break; case AVCOL_SPC_BT709: - case AVCOL_SPC_UNSPECIFIED: // comes with UHD + case AVCOL_SPC_UNSPECIFIED: // comes with UHD colorstr = strdup("BT 709"); eotfstr = strdup("BT 1886"); break; @@ -2192,26 +2104,27 @@ void cSoftHdMenu::Create(void) colorstr = strdup("BT 2020"); eotfstr = strdup("HDR-HLG"); break; - default: // fallback + default: // fallback colorstr = strdup("Fallback BT 709"); eotfstr = strdup("BT 1886"); break; } - Add(new cOsdItem(cString::sprintf(tr(" Frames missed(%d) duped(%d) dropped(%d) total(%d)"), missed, duped, dropped, - counter), osUnknown, false)); + Add(new cOsdItem( + cString::sprintf(tr(" Frames missed(%d) duped(%d) dropped(%d) total(%d)"), missed, duped, dropped, counter), + osUnknown, false)); Add(new cOsdItem(cString::sprintf(tr(" Video %dx%d Color: %s Gamma: %s"), width, height, colorstr, eotfstr), - osUnknown, false)); - // Add(new cOsdItem(cString::sprintf(tr(" Frame Process time %2.2fms"), frametime), osUnknown, false)); - SetCurrent(Get(current)); // restore selected menu entry - Display(); // display build menu + osUnknown, false)); + // Add(new cOsdItem(cString::sprintf(tr(" Frame Process time %2.2fms"), + // frametime), osUnknown, false)); + SetCurrent(Get(current)); // restore selected menu entry + Display(); // display build menu } /** ** Soft device menu constructor. */ cSoftHdMenu::cSoftHdMenu(const char *title, int c0, int c1, int c2, int c3, int c4) -:cOsdMenu(title, c0, c1, c2, c3, c4) -{ + : cOsdMenu(title, c0, c1, c2, c3, c4) { HotkeyState = HksInitial; Create(); @@ -2220,30 +2133,27 @@ cSoftHdMenu::cSoftHdMenu(const char *title, int c0, int c1, int c2, int c3, int /** ** Soft device menu destructor. */ -cSoftHdMenu::~cSoftHdMenu() -{ -} +cSoftHdMenu::~cSoftHdMenu() {} /** ** Handle hot key commands. ** ** @param code numeric hot key code */ -static void HandleHotkey(int code) -{ +static void HandleHotkey(int code) { switch (code) { - case 10: // disable pass-through + case 10: // disable pass-through AudioPassthroughState = 0; CodecSetAudioPassthrough(0); Skins.QueueMessage(mtInfo, tr("pass-through disabled")); break; - case 11: // enable pass-through + case 11: // enable pass-through // note: you can't enable, without configured pass-through AudioPassthroughState = 1; CodecSetAudioPassthrough(ConfigAudioPassthrough); Skins.QueueMessage(mtInfo, tr("pass-through enabled")); break; - case 12: // toggle pass-through + case 12: // toggle pass-through AudioPassthroughState ^= 1; if (AudioPassthroughState) { CodecSetAudioPassthrough(ConfigAudioPassthrough); @@ -2253,12 +2163,12 @@ static void HandleHotkey(int code) Skins.QueueMessage(mtInfo, tr("pass-through disabled")); } break; - case 13: // decrease audio delay + case 13: // decrease audio delay ConfigVideoAudioDelay -= 10; VideoSetAudioDelay(ConfigVideoAudioDelay); Skins.QueueMessage(mtInfo, cString::sprintf(tr("audio delay changed to %d"), ConfigVideoAudioDelay)); break; - case 14: // increase audio delay + case 14: // increase audio delay ConfigVideoAudioDelay += 10; VideoSetAudioDelay(ConfigVideoAudioDelay); Skins.QueueMessage(mtInfo, cString::sprintf(tr("audio delay changed to %d"), ConfigVideoAudioDelay)); @@ -2275,34 +2185,34 @@ static void HandleHotkey(int code) ResetChannelId(); break; - case 20: // disable full screen + case 20: // disable full screen VideoSetFullscreen(0); break; - case 21: // enable full screen + case 21: // enable full screen VideoSetFullscreen(1); break; - case 22: // toggle full screen + case 22: // toggle full screen VideoSetFullscreen(-1); break; - case 30: // change 4:3 -> window mode + case 30: // change 4:3 -> window mode case 31: case 32: VideoSet4to3DisplayFormat(code - 30); break; - case 39: // rotate 4:3 -> window mode + case 39: // rotate 4:3 -> window mode VideoSet4to3DisplayFormat(-1); break; - case 40: // change 16:9 -> window mode + case 40: // change 16:9 -> window mode case 41: case 42: VideoSetOtherDisplayFormat(code - 40); break; - case 49: // rotate 16:9 -> window mode + case 49: // rotate 16:9 -> window mode VideoSetOtherDisplayFormat(-1); break; #ifdef USE_PIP - case 102: // PIP toggle + case 102: // PIP toggle TogglePip(); break; case 104: @@ -2332,26 +2242,25 @@ static void HandleHotkey(int code) /** ** Handle key event. ** -** @param key key event +** @param key key event */ -eOSState cSoftHdMenu::ProcessKey(eKeys key) -{ +eOSState cSoftHdMenu::ProcessKey(eKeys key) { eOSState state; - //dsyslog("[softhddev]%s: %x\n", __FUNCTION__, key); + // dsyslog("[softhddev]%s: %x\n", __FUNCTION__, key); switch (HotkeyState) { - case HksInitial: // initial state, waiting for hot key + case HksInitial: // initial state, waiting for hot key if (key == kBlue) { - HotkeyState = HksBlue; // blue button + HotkeyState = HksBlue; // blue button return osContinue; } if (key == kRed) { - HotkeyState = HksRed; // red button + HotkeyState = HksRed; // red button return osContinue; } break; - case HksBlue: // blue and first number + case HksBlue: // blue and first number if (k0 <= key && key <= k9) { HotkeyCode = key - k0; HotkeyState = HksBlue1; @@ -2359,7 +2268,7 @@ eOSState cSoftHdMenu::ProcessKey(eKeys key) } HotkeyState = HksInitial; break; - case HksBlue1: // blue and second number/enter + case HksBlue1: // blue and second number/enter if (k0 <= key && key <= k9) { HotkeyCode *= 10; HotkeyCode += key - k0; @@ -2376,7 +2285,7 @@ eOSState cSoftHdMenu::ProcessKey(eKeys key) } HotkeyState = HksInitial; break; - case HksRed: // red and first number + case HksRed: // red and first number if (k0 <= key && key <= k9) { HotkeyCode = 100 + key - k0; HotkeyState = HksInitial; @@ -2414,9 +2323,9 @@ eOSState cSoftHdMenu::ProcessKey(eKeys key) } return osEnd; #ifdef PLACEBO - case osUser2: - ToggleLUT(); - return osEnd; + case osUser2: + ToggleLUT(); + return osEnd; #endif #ifdef USE_PIP case osUser3: @@ -2450,28 +2359,18 @@ eOSState cSoftHdMenu::ProcessKey(eKeys key) // cDevice ////////////////////////////////////////////////////////////////////////////// -class cSoftHdDevice:public cDevice -{ +class cSoftHdDevice : public cDevice { public: cSoftHdDevice(void); - virtual ~ cSoftHdDevice(void); + virtual ~cSoftHdDevice(void); #ifdef CUVID - virtual cString DeviceName(void) const - { - return "softhdcuvid"; - } + virtual cString DeviceName(void) const { return "softhdcuvid"; } #endif -#if defined (VAAPI) && !defined (USE_DRM) - virtual cString DeviceName(void) const - { - return "softhdvaapi"; - } +#if defined(VAAPI) && !defined(USE_DRM) + virtual cString DeviceName(void) const { return "softhdvaapi"; } #endif -#if defined (VAAPI) && defined (USE_DRM) - virtual cString DeviceName(void) const - { - return "softhddrm"; - } +#if defined(VAAPI) && defined(USE_DRM) + virtual cString DeviceName(void) const { return "softhddrm"; } #endif virtual bool HasDecoder(void) const; virtual bool CanReplay(void) const; @@ -2512,9 +2411,10 @@ class cSoftHdDevice:public cDevice #ifdef USE_VDR_SPU // SPU facilities private: - cDvbSpuDecoder * spuDecoder; + cDvbSpuDecoder *spuDecoder; + public: - virtual cSpuDecoder * GetSpuDecoder(void); + virtual cSpuDecoder *GetSpuDecoder(void); #endif protected: @@ -2524,8 +2424,7 @@ class cSoftHdDevice:public cDevice /** ** Constructor device. */ -cSoftHdDevice::cSoftHdDevice(void) -{ +cSoftHdDevice::cSoftHdDevice(void) { // dsyslog("[softhddev]%s\n", __FUNCTION__); #ifdef USE_VDR_SPU @@ -2536,8 +2435,7 @@ cSoftHdDevice::cSoftHdDevice(void) /** ** Destructor device. */ -cSoftHdDevice::~cSoftHdDevice(void) -{ +cSoftHdDevice::~cSoftHdDevice(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); #ifdef USE_VDR_SPU delete spuDecoder; @@ -2547,10 +2445,9 @@ cSoftHdDevice::~cSoftHdDevice(void) /** ** Informs a device that it will be the primary device. ** -** @param on flag if becoming or loosing primary +** @param on flag if becoming or loosing primary */ -void cSoftHdDevice::MakePrimaryDevice(bool on) -{ +void cSoftHdDevice::MakePrimaryDevice(bool on) { dsyslog("[softhddev]%s: %d\n", __FUNCTION__, on); cDevice::MakePrimaryDevice(on); @@ -2579,8 +2476,7 @@ void cSoftHdDevice::MakePrimaryDevice(bool on) ** @returns a pointer to the device's SPU decoder (or NULL, if this ** device doesn't have an SPU decoder) */ -cSpuDecoder *cSoftHdDevice::GetSpuDecoder(void) -{ +cSpuDecoder *cSoftHdDevice::GetSpuDecoder(void) { dsyslog("[softhddev]%s:\n", __FUNCTION__); if (!spuDecoder && IsPrimaryDevice()) { @@ -2594,26 +2490,19 @@ cSpuDecoder *cSoftHdDevice::GetSpuDecoder(void) /** ** Tells whether this device has a MPEG decoder. */ -bool cSoftHdDevice::HasDecoder(void) const -{ - return true; -} +bool cSoftHdDevice::HasDecoder(void) const { return true; } /** ** Returns true if this device can currently start a replay session. */ -bool cSoftHdDevice::CanReplay(void) const -{ - return true; -} +bool cSoftHdDevice::CanReplay(void) const { return true; } /** ** Sets the device into the given play mode. ** -** @param play_mode new play mode (Audio/Video/External...) +** @param play_mode new play mode (Audio/Video/External...) */ -bool cSoftHdDevice::SetPlayMode(ePlayMode play_mode) -{ +bool cSoftHdDevice::SetPlayMode(ePlayMode play_mode) { dsyslog("[softhddev]%s: %d\n", __FUNCTION__, play_mode); switch (play_mode) { @@ -2629,8 +2518,8 @@ bool cSoftHdDevice::SetPlayMode(ePlayMode play_mode) case pmExtern_THIS_SHOULD_BE_AVOIDED: dsyslog("[softhddev] play mode external\n"); // FIXME: what if already suspended? - Setup.CurrentVolume = cDevice::CurrentVolume(); - Setup.Save(); + Setup.CurrentVolume = cDevice::CurrentVolume(); + Setup.Save(); Suspend(1, 1, 0); SuspendMode = SUSPEND_EXTERNAL; #ifdef USE_OPENGLOSD @@ -2650,20 +2539,19 @@ bool cSoftHdDevice::SetPlayMode(ePlayMode play_mode) Resume(); SuspendMode = NOT_SUSPENDED; } - if (!cDevice::IsMute()) + if (!cDevice::IsMute()) SetVolume(cDevice::CurrentVolume(), true); - return::SetPlayMode(play_mode); + return ::SetPlayMode(play_mode); } /** ** Gets the current System Time Counter, which can be used to ** synchronize audio, video and subtitles. */ -int64_t cSoftHdDevice::GetSTC(void) -{ +int64_t cSoftHdDevice::GetSTC(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); - return::GetSTC(); + return ::GetSTC(); } /** @@ -2675,8 +2563,7 @@ int64_t cSoftHdDevice::GetSTC(void) ** @param speed trick speed ** @param forward flag forward direction */ -void cSoftHdDevice::TrickSpeed(int speed, bool forward) -{ +void cSoftHdDevice::TrickSpeed(int speed, bool forward) { dsyslog("[softhddev]%s: %d %d\n", __FUNCTION__, speed, forward); ::TrickSpeed(speed); @@ -2685,8 +2572,7 @@ void cSoftHdDevice::TrickSpeed(int speed, bool forward) /** ** Clears all video and audio data from the device. */ -void cSoftHdDevice::Clear(void) -{ +void cSoftHdDevice::Clear(void) { dsyslog("[softhddev] vom VDR kommt %s:\n", __FUNCTION__); cDevice::Clear(); @@ -2696,8 +2582,7 @@ void cSoftHdDevice::Clear(void) /** ** Sets the device into play mode (after a previous trick mode) */ -void cSoftHdDevice::Play(void) -{ +void cSoftHdDevice::Play(void) { dsyslog("[softhddev]%s:\n", __FUNCTION__); cDevice::Play(); @@ -2707,8 +2592,7 @@ void cSoftHdDevice::Play(void) /** ** Puts the device into "freeze frame" mode. */ -void cSoftHdDevice::Freeze(void) -{ +void cSoftHdDevice::Freeze(void) { dsyslog("[softhddev]%s:\n", __FUNCTION__); cDevice::Freeze(); @@ -2718,8 +2602,7 @@ void cSoftHdDevice::Freeze(void) /** ** Turns off audio while replaying. */ -void cSoftHdDevice::Mute(void) -{ +void cSoftHdDevice::Mute(void) { dsyslog("[softhddev]%s:\n", __FUNCTION__); cDevice::Mute(); @@ -2732,11 +2615,10 @@ void cSoftHdDevice::Mute(void) ** @param data pes or ts data of a frame ** @param length length of data area */ -void cSoftHdDevice::StillPicture(const uchar * data, int length) -{ +void cSoftHdDevice::StillPicture(const uchar *data, int length) { dsyslog("[softhddev]%s: %s %p %d\n", __FUNCTION__, data[0] == 0x47 ? "ts" : "pes", data, length); - if (data[0] == 0x47) { // ts sync + if (data[0] == 0x47) { // ts sync cDevice::StillPicture(data, length); return; } @@ -2747,29 +2629,27 @@ void cSoftHdDevice::StillPicture(const uchar * data, int length) /** ** Check if the device is ready for further action. ** -** @param poller file handles (unused) -** @param timeout_ms timeout in ms to become ready +** @param poller file handles (unused) +** @param timeout_ms timeout in ms to become ready ** ** @retval true if ready ** @retval false if busy */ -bool cSoftHdDevice::Poll( __attribute__((unused)) cPoller & poller, int timeout_ms) -{ +bool cSoftHdDevice::Poll(__attribute__((unused)) cPoller &poller, int timeout_ms) { // dsyslog("[softhddev]%s: %d\n", __FUNCTION__, timeout_ms); - return::Poll(timeout_ms); + return ::Poll(timeout_ms); } /** ** Flush the device output buffers. ** -** @param timeout_ms timeout in ms to become ready +** @param timeout_ms timeout in ms to become ready */ -bool cSoftHdDevice::Flush(int timeout_ms) -{ +bool cSoftHdDevice::Flush(int timeout_ms) { dsyslog("[softhddev]%s: %d ms\n", __FUNCTION__, timeout_ms); - return::Flush(timeout_ms); + return ::Flush(timeout_ms); } // ---------------------------------------------------------------------------- @@ -2778,8 +2658,7 @@ bool cSoftHdDevice::Flush(int timeout_ms) ** Sets the video display format to the given one (only useful if this ** device has an MPEG decoder). */ -void cSoftHdDevice::SetVideoDisplayFormat(eVideoDisplayFormat video_display_format) -{ +void cSoftHdDevice::SetVideoDisplayFormat(eVideoDisplayFormat video_display_format) { dsyslog("[softhddev]%s: %d\n", __FUNCTION__, video_display_format); cDevice::SetVideoDisplayFormat(video_display_format); @@ -2788,10 +2667,10 @@ void cSoftHdDevice::SetVideoDisplayFormat(eVideoDisplayFormat video_display_form // called on every channel switch, no need to kill osd... if (last != video_display_format) { - last = video_display_format; + last = video_display_format; - ::VideoSetDisplayFormat(video_display_format); - cSoftOsd::Dirty = 1; + ::VideoSetDisplayFormat(video_display_format); + cSoftOsd::Dirty = 1; } #endif } @@ -2804,8 +2683,7 @@ void cSoftHdDevice::SetVideoDisplayFormat(eVideoDisplayFormat video_display_form ** ** @param video_format16_9 flag true 16:9. */ -void cSoftHdDevice::SetVideoFormat(bool video_format16_9) -{ +void cSoftHdDevice::SetVideoFormat(bool video_format16_9) { dsyslog("[softhddev]%s: %d\n", __FUNCTION__, video_format16_9); // FIXME: 4:3 / 16:9 video format not supported. @@ -2819,8 +2697,7 @@ void cSoftHdDevice::SetVideoFormat(bool video_format16_9) ** ** @note the video_aspect is used to scale the subtitle. */ -void cSoftHdDevice::GetVideoSize(int &width, int &height, double &video_aspect) -{ +void cSoftHdDevice::GetVideoSize(int &width, int &height, double &video_aspect) { ::GetVideoSize(&width, &height, &video_aspect); } @@ -2829,8 +2706,7 @@ void cSoftHdDevice::GetVideoSize(int &width, int &height, double &video_aspect) ** ** FIXME: Called every second, for nothing (no OSD displayed)? */ -void cSoftHdDevice::GetOsdSize(int &width, int &height, double &pixel_aspect) -{ +void cSoftHdDevice::GetOsdSize(int &width, int &height, double &pixel_aspect) { ::GetOsdSize(&width, &height, &pixel_aspect); } @@ -2841,34 +2717,28 @@ void cSoftHdDevice::GetOsdSize(int &width, int &height, double &pixel_aspect) ** ** @param data exactly one complete PES packet (which is incomplete) ** @param length length of PES packet -** @param id type of audio data this packet holds +** @param id type of audio data this packet holds */ -int cSoftHdDevice::PlayAudio(const uchar * data, int length, uchar id) -{ - // dsyslog("[softhddev]%s: %p %p %d %d\n", __FUNCTION__, this, data, length, id); +int cSoftHdDevice::PlayAudio(const uchar *data, int length, uchar id) { + // dsyslog("[softhddev]%s: %p %p %d %d\n", __FUNCTION__, this, data, length, + // id); - return::PlayAudio(data, length, id); + return ::PlayAudio(data, length, id); } -void cSoftHdDevice::SetAudioTrackDevice( __attribute__((unused)) eTrackType type) -{ +void cSoftHdDevice::SetAudioTrackDevice(__attribute__((unused)) eTrackType type) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); } -void cSoftHdDevice::SetDigitalAudioDevice( __attribute__((unused)) - bool on) -{ +void cSoftHdDevice::SetDigitalAudioDevice(__attribute__((unused)) bool on) { // dsyslog("[softhddev]%s: %s\n", __FUNCTION__, on ? "true" : "false"); } -void cSoftHdDevice::SetAudioChannelDevice( __attribute__((unused)) - int audio_channel) -{ +void cSoftHdDevice::SetAudioChannelDevice(__attribute__((unused)) int audio_channel) { // dsyslog("[softhddev]%s: %d\n", __FUNCTION__, audio_channel); } -int cSoftHdDevice::GetAudioChannelDevice(void) -{ +int cSoftHdDevice::GetAudioChannelDevice(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); return 0; } @@ -2878,8 +2748,7 @@ int cSoftHdDevice::GetAudioChannelDevice(void) ** ** @param volume device volume */ -void cSoftHdDevice::SetVolumeDevice(int volume) -{ +void cSoftHdDevice::SetVolumeDevice(int volume) { dsyslog("[softhddev]%s: %d\n", __FUNCTION__, volume); ::SetVolumeDevice(volume); @@ -2893,10 +2762,9 @@ void cSoftHdDevice::SetVolumeDevice(int volume) ** @param data exactly one complete PES packet (which is incomplete) ** @param length length of PES packet */ -int cSoftHdDevice::PlayVideo(const uchar * data, int length) -{ +int cSoftHdDevice::PlayVideo(const uchar *data, int length) { // dsyslog("[softhddev]%s: %p %d\n", __FUNCTION__, data, length); - return::PlayVideo(data, length); + return ::PlayVideo(data, length); } #ifdef USE_TS_VIDEO @@ -2907,9 +2775,7 @@ int cSoftHdDevice::PlayVideo(const uchar * data, int length) ** @param data ts data buffer ** @param length ts packet length (188) */ -int cSoftHdDevice::PlayTsVideo(const uchar * data, int length) -{ -} +int cSoftHdDevice::PlayTsVideo(const uchar *data, int length) {} #endif @@ -2921,10 +2787,9 @@ int cSoftHdDevice::PlayTsVideo(const uchar * data, int length) ** @param data ts data buffer ** @param length ts packet length (188) */ -int cSoftHdDevice::PlayTsAudio(const uchar * data, int length) -{ +int cSoftHdDevice::PlayTsAudio(const uchar *data, int length) { #ifndef NO_TS_AUDIO - return::PlayTsAudio(data, length); + return ::PlayTsAudio(data, length); #else AudioPoller(); @@ -2943,18 +2808,17 @@ int cSoftHdDevice::PlayTsAudio(const uchar * data, int length) ** @param width number of horizontal pixels in the frame ** @param height number of vertical pixels in the frame */ -uchar *cSoftHdDevice::GrabImage(int &size, bool jpeg, int quality, int width, int height) -{ +uchar *cSoftHdDevice::GrabImage(int &size, bool jpeg, int quality, int width, int height) { dsyslog("[softhddev]%s: %d, %d, %d, %dx%d\n", __FUNCTION__, size, jpeg, quality, width, height); if (SuspendMode != NOT_SUSPENDED) { return NULL; } - if (quality < 0) { // caller should care, but fix it + if (quality < 0) { // caller should care, but fix it quality = 95; } - return::GrabImage(&size, jpeg, quality, width, height); + return ::GrabImage(&size, jpeg, quality, width, height); } /** @@ -2964,19 +2828,14 @@ uchar *cSoftHdDevice::GrabImage(int &size, bool jpeg, int quality, int width, in ** ** @returns the real rectangle or cRect:Null if invalid. */ -cRect cSoftHdDevice::CanScaleVideo(const cRect & rect, __attribute__((unused)) - int alignment) -{ - return rect; -} +cRect cSoftHdDevice::CanScaleVideo(const cRect &rect, __attribute__((unused)) int alignment) { return rect; } /** ** Scale the currently shown video. ** ** @param rect video window rectangle */ -void cSoftHdDevice::ScaleVideo(const cRect & rect) -{ +void cSoftHdDevice::ScaleVideo(const cRect &rect) { #ifdef OSD_DEBUG dsyslog("[softhddev]%s: %dx%d%+d%+d\n", __FUNCTION__, rect.Width(), rect.Height(), rect.X(), rect.Y()); #endif @@ -2986,20 +2845,18 @@ void cSoftHdDevice::ScaleVideo(const cRect & rect) /** ** Call rgb to jpeg for C Plugin. */ -extern "C" uint8_t * CreateJpeg(uint8_t * image, int *size, int quality, int width, int height) -{ - return (uint8_t *) RgbToJpeg((uchar *) image, width, height, *size, quality); +extern "C" uint8_t *CreateJpeg(uint8_t *image, int *size, int quality, int width, int height) { + return (uint8_t *)RgbToJpeg((uchar *)image, width, height, *size, quality); } ////////////////////////////////////////////////////////////////////////////// // cPlugin ////////////////////////////////////////////////////////////////////////////// -class cPluginSoftHdDevice:public cPlugin -{ +class cPluginSoftHdDevice : public cPlugin { public: cPluginSoftHdDevice(void); - virtual ~ cPluginSoftHdDevice(void); + virtual ~cPluginSoftHdDevice(void); virtual const char *Version(void); virtual const char *Description(void); virtual const char *CommandLineHelp(void); @@ -3024,16 +2881,14 @@ class cPluginSoftHdDevice:public cPlugin ** @note DON'T DO ANYTHING ELSE THAT MAY HAVE SIDE EFFECTS, REQUIRE GLOBAL ** VDR OBJECTS TO EXIST OR PRODUCE ANY OUTPUT! */ -cPluginSoftHdDevice::cPluginSoftHdDevice(void) -{ +cPluginSoftHdDevice::cPluginSoftHdDevice(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); } /** ** Clean up after yourself! */ -cPluginSoftHdDevice::~cPluginSoftHdDevice(void) -{ +cPluginSoftHdDevice::~cPluginSoftHdDevice(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); ::SoftHdDeviceExit(); @@ -3046,38 +2901,28 @@ cPluginSoftHdDevice::~cPluginSoftHdDevice(void) ** ** @returns version number as constant string. */ -const char *cPluginSoftHdDevice::Version(void) -{ - return VERSION; -} +const char *cPluginSoftHdDevice::Version(void) { return VERSION; } /** ** Return plugin short description. ** ** @returns short description as constant string. */ -const char *cPluginSoftHdDevice::Description(void) -{ - return tr(DESCRIPTION); -} +const char *cPluginSoftHdDevice::Description(void) { return tr(DESCRIPTION); } /** ** Return a string that describes all known command line options. ** ** @returns command line help as constant string. */ -const char *cPluginSoftHdDevice::CommandLineHelp(void) -{ - return::CommandLineHelp(); -} +const char *cPluginSoftHdDevice::CommandLineHelp(void) { return ::CommandLineHelp(); } /** ** Process the command line arguments. */ -bool cPluginSoftHdDevice::ProcessArgs(int argc, char *argv[]) -{ +bool cPluginSoftHdDevice::ProcessArgs(int argc, char *argv[]) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); - return::ProcessArgs(argc, argv); + return ::ProcessArgs(argc, argv); } /** @@ -3087,13 +2932,13 @@ bool cPluginSoftHdDevice::ProcessArgs(int argc, char *argv[]) ** ** @returns true if any devices are available. */ -bool cPluginSoftHdDevice::Initialize(void) -{ +bool cPluginSoftHdDevice::Initialize(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); #if defined PLACEBO - const char *d; - d = cPlugin::ConfigDirectory("shaders"); - strcpy(MyConfigDir,d); + const char *d; + + d = cPlugin::ConfigDirectory("shaders"); + strcpy(MyConfigDir, d); #endif MyDevice = new cSoftHdDevice(); @@ -3103,8 +2948,7 @@ bool cPluginSoftHdDevice::Initialize(void) /** ** Start any background activities the plugin shall perform. */ -bool cPluginSoftHdDevice::Start(void) -{ +bool cPluginSoftHdDevice::Start(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); if (!MyDevice->IsPrimaryDevice()) { @@ -3120,9 +2964,9 @@ bool cPluginSoftHdDevice::Start(void) switch (::Start()) { case 1: - //cControl::Launch(new cSoftHdControl); - //cControl::Attach(); - // FIXME: VDR overwrites the control + // cControl::Launch(new cSoftHdControl); + // cControl::Attach(); + // FIXME: VDR overwrites the control SuspendMode = SUSPEND_NORMAL; break; case -1: @@ -3140,8 +2984,7 @@ bool cPluginSoftHdDevice::Start(void) ** Shutdown plugin. Stop any background activities the plugin is ** performing. */ -void cPluginSoftHdDevice::Stop(void) -{ +void cPluginSoftHdDevice::Stop(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); ::Stop(); @@ -3153,8 +2996,7 @@ void cPluginSoftHdDevice::Stop(void) /** ** Perform any cleanup or other regular tasks. */ -void cPluginSoftHdDevice::Housekeeping(void) -{ +void cPluginSoftHdDevice::Housekeeping(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); // check if user is inactive, automatic enter suspend mode @@ -3177,8 +3019,7 @@ void cPluginSoftHdDevice::Housekeeping(void) /** ** Create main menu entry. */ -const char *cPluginSoftHdDevice::MainMenuEntry(void) -{ +const char *cPluginSoftHdDevice::MainMenuEntry(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); return ConfigHideMainMenuEntry ? NULL : tr(MAINMENUENTRY); @@ -3187,8 +3028,7 @@ const char *cPluginSoftHdDevice::MainMenuEntry(void) /** ** Perform the action when selected from the main VDR menu. */ -cOsdObject *cPluginSoftHdDevice::MainMenuAction(void) -{ +cOsdObject *cPluginSoftHdDevice::MainMenuAction(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); return new cSoftHdMenu("SoftHdDevice"); @@ -3198,8 +3038,7 @@ cOsdObject *cPluginSoftHdDevice::MainMenuAction(void) ** Called for every plugin once during every cycle of VDR's main program ** loop. */ -void cPluginSoftHdDevice::MainThreadHook(void) -{ +void cPluginSoftHdDevice::MainThreadHook(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); if (DoMakePrimary) { @@ -3214,8 +3053,7 @@ void cPluginSoftHdDevice::MainThreadHook(void) /** ** Return our setup menu. */ -cMenuSetupPage *cPluginSoftHdDevice::SetupMenu(void) -{ +cMenuSetupPage *cPluginSoftHdDevice::SetupMenu(void) { // dsyslog("[softhddev]%s:\n", __FUNCTION__); return new cMenuSetupSoft; @@ -3229,8 +3067,7 @@ cMenuSetupPage *cPluginSoftHdDevice::SetupMenu(void) ** ** @returns true if the parameter is supported. */ -bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value) -{ +bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value) { int i; // dsyslog("[softhddev]%s: '%s' = '%s'\n", __FUNCTION__, name, value); @@ -3331,7 +3168,7 @@ bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value) VideoSetGamma(ConfigGamma); return true; } - if (!strcasecmp(name, "Temperature")) { + if (!strcasecmp(name, "Temperature")) { int i; i = atoi(value); @@ -3357,8 +3194,8 @@ bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value) } #if 0 if (!strcasecmp(name, "ScalerTest")) { - VideoSetScalerTest(ConfigScalerTest = atoi(value)); - return true; + VideoSetScalerTest(ConfigScalerTest = atoi(value)); + return true; } #endif for (i = 0; i < RESOLUTIONS; ++i) { @@ -3567,17 +3404,16 @@ bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value) /** ** Receive requests or messages. ** -** @param id unique identification string that identifies the -** service protocol +** @param id unique identification string that identifies the +** service protocol ** @param data custom data structure */ -bool cPluginSoftHdDevice::Service(const char *id, void *data) -{ +bool cPluginSoftHdDevice::Service(const char *id, void *data) { // dsyslog("[softhddev]%s: id %s\n", __FUNCTION__, id); if (strcmp(id, OSD_3DMODE_SERVICE) == 0) { SoftHDDevice_Osd3DModeService_v1_0_t *r; - r = (SoftHDDevice_Osd3DModeService_v1_0_t *) data; + r = (SoftHDDevice_Osd3DModeService_v1_0_t *)data; VideoSetOsd3DMode(r->Mode); return true; } @@ -3594,14 +3430,14 @@ bool cPluginSoftHdDevice::Service(const char *id, void *data) return false; } - SoftHDDevice_AtmoGrabService_v1_0_t *r = (SoftHDDevice_AtmoGrabService_v1_0_t *) data; + SoftHDDevice_AtmoGrabService_v1_0_t *r = (SoftHDDevice_AtmoGrabService_v1_0_t *)data; - if (r->structSize != sizeof(SoftHDDevice_AtmoGrabService_v1_0_t) - || r->analyseSize < 64 || r->analyseSize > 256 || r->clippedOverscan < 0 || r->clippedOverscan > 200) { + if (r->structSize != sizeof(SoftHDDevice_AtmoGrabService_v1_0_t) || r->analyseSize < 64 || + r->analyseSize > 256 || r->clippedOverscan < 0 || r->clippedOverscan > 200) { return false; } - width = r->analyseSize * -1; // Internal marker for Atmo grab service + width = r->analyseSize * -1; // Internal marker for Atmo grab service height = r->clippedOverscan; r->img = VideoGrabService(&r->imgSize, &width, &height); @@ -3625,7 +3461,7 @@ bool cPluginSoftHdDevice::Service(const char *id, void *data) return false; } - r = (SoftHDDevice_AtmoGrabService_v1_1_t *) data; + r = (SoftHDDevice_AtmoGrabService_v1_1_t *)data; r->img = VideoGrabService(&r->size, &r->width, &r->height); if (!r->img) { return false; @@ -3644,44 +3480,67 @@ bool cPluginSoftHdDevice::Service(const char *id, void *data) ** SVDRP commands help text. ** FIXME: translation? */ -static const char *SVDRPHelpText[] = { - "SUSP\n" "\040 Suspend plugin.\n\n" " The plugin is suspended to save energie. Depending on the setup\n" - " 'softhddevice.Suspend.Close = 0' only the video and audio output\n" - " is stopped or with 'softhddevice.Suspend.Close = 1' the video\n" " and audio devices are closed.\n" - " If 'softhddevice.Suspend.X11 = 1' is set and the X11 server was\n" - " started by the plugin, the X11 server would also be closed.\n" - " (Stopping X11 while suspended isn't supported yet)\n", - "RESU\n" "\040 Resume plugin.\n\n" " Resume the suspended plugin. The plugin could be suspended by\n" - " the command line option '-s' or by a previous SUSP command.\n" - " If the x11 server was stopped by the plugin, it will be\n" " restarted.", - "DETA\n" "\040 Detach plugin.\n\n" " The plugin will be detached from the audio, video and DVB\n" - " devices. Other programs or plugins can use them now.\n", - "ATTA <-d display> <-a audio> <-p pass>\n" " Attach plugin.\n\n" - " Attach the plugin to audio, video and DVB devices. Use:\n" - " -d display\tdisplay of x11 server (fe. :0.0)\n" - " -a audio\taudio device (fe. alsa: hw:0,0 oss: /dev/dsp)\n" - " -p pass\t\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n", - "PRIM \n" " Make the primary device.\n\n" - " is the number of device. Without number softhddevice becomes\n" - " the primary device. If becoming primary, the plugin is attached\n" - " to the devices. If loosing primary, the plugin is detached from\n" " the devices.", - "HOTK key\n" " Execute hotkey.\n\n" " key is the hotkey number, following are supported:\n" - " 10: disable audio pass-through\n" " 11: enable audio pass-through\n" - " 12: toggle audio pass-through\n" " 13: decrease audio delay by 10ms\n" - " 14: increase audio delay by 10ms\n" " 15: toggle ac3 mixdown\n" - " 20: disable fullscreen\n\040 21: enable fullscreen\n" " 22: toggle fullscreen\n" - " 30: stretch 4:3 to display\n\040 31: pillar box 4:3 in display\n" - " 32: center cut-out 4:3 to display\n" " 39: rotate 4:3 to display zoom mode\n" - " 40: stretch other aspect ratios to display\n" " 41: letter box other aspect ratios in display\n" - " 42: center cut-out other aspect ratios to display\n" - " 49: rotate other aspect ratios to display zoom mode\n", - "STAT\n" "\040 Display SuspendMode of the plugin.\n\n" " reply code is 910 + SuspendMode\n" - " SUSPEND_EXTERNAL == -1 (909)\n" " NOT_SUSPENDED == 0 (910)\n" - " SUSPEND_NORMAL == 1 (911)\n" " SUSPEND_DETACHED == 2 (912)\n", - "RAIS\n" "\040 Raise softhddevice window\n\n" " If Xserver is not started by softhddevice, the window which\n" - " contains the softhddevice frontend will be raised to the front.\n", - NULL -}; +static const char *SVDRPHelpText[] = {"SUSP\n" + "\040 Suspend plugin.\n\n" + " The plugin is suspended to save energie. Depending on the setup\n" + " 'softhddevice.Suspend.Close = 0' only the video and audio output\n" + " is stopped or with 'softhddevice.Suspend.Close = 1' the video\n" + " and audio devices are closed.\n" + " If 'softhddevice.Suspend.X11 = 1' is set and the X11 server was\n" + " started by the plugin, the X11 server would also be closed.\n" + " (Stopping X11 while suspended isn't supported yet)\n", + "RESU\n" + "\040 Resume plugin.\n\n" + " Resume the suspended plugin. The plugin could be suspended by\n" + " the command line option '-s' or by a previous SUSP command.\n" + " If the x11 server was stopped by the plugin, it will be\n" + " restarted.", + "DETA\n" + "\040 Detach plugin.\n\n" + " The plugin will be detached from the audio, video and DVB\n" + " devices. Other programs or plugins can use them now.\n", + "ATTA <-d display> <-a audio> <-p pass>\n" + " Attach plugin.\n\n" + " Attach the plugin to audio, video and DVB devices. Use:\n" + " -d display\tdisplay of x11 server (fe. :0.0)\n" + " -a audio\taudio device (fe. alsa: hw:0,0 oss: /dev/dsp)\n" + " -p pass\t\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n", + "PRIM \n" + " Make the primary device.\n\n" + " is the number of device. Without number softhddevice becomes\n" + " the primary device. If becoming primary, the plugin is attached\n" + " to the devices. If loosing primary, the plugin is detached from\n" + " the devices.", + "HOTK key\n" + " Execute hotkey.\n\n" + " key is the hotkey number, following are supported:\n" + " 10: disable audio pass-through\n" + " 11: enable audio pass-through\n" + " 12: toggle audio pass-through\n" + " 13: decrease audio delay by 10ms\n" + " 14: increase audio delay by 10ms\n" + " 15: toggle ac3 mixdown\n" + " 20: disable fullscreen\n\040 21: enable fullscreen\n" + " 22: toggle fullscreen\n" + " 30: stretch 4:3 to display\n\040 31: pillar box 4:3 in display\n" + " 32: center cut-out 4:3 to display\n" + " 39: rotate 4:3 to display zoom mode\n" + " 40: stretch other aspect ratios to display\n" + " 41: letter box other aspect ratios in display\n" + " 42: center cut-out other aspect ratios to display\n" + " 49: rotate other aspect ratios to display zoom mode\n", + "STAT\n" + "\040 Display SuspendMode of the plugin.\n\n" + " reply code is 910 + SuspendMode\n" + " SUSPEND_EXTERNAL == -1 (909)\n" + " NOT_SUSPENDED == 0 (910)\n" + " SUSPEND_NORMAL == 1 (911)\n" + " SUSPEND_DETACHED == 2 (912)\n", + "RAIS\n" + "\040 Raise softhddevice window\n\n" + " If Xserver is not started by softhddevice, the window which\n" + " contains the softhddevice frontend will be raised to the front.\n", + NULL}; /** ** Return SVDRP commands help pages. @@ -3689,21 +3548,17 @@ static const char *SVDRPHelpText[] = { ** return a pointer to a list of help strings for all of the plugin's ** SVDRP commands. */ -const char **cPluginSoftHdDevice::SVDRPHelpPages(void) -{ - return SVDRPHelpText; -} +const char **cPluginSoftHdDevice::SVDRPHelpPages(void) { return SVDRPHelpText; } /** ** Handle SVDRP commands. ** -** @param command SVDRP command -** @param option all command arguments -** @param reply_code reply code +** @param command SVDRP command +** @param option all command arguments +** @param reply_code reply code */ -cString cPluginSoftHdDevice::SVDRPCommand(const char *command, const char *option, __attribute__((unused)) - int &reply_code) -{ +cString cPluginSoftHdDevice::SVDRPCommand(const char *command, const char *option, + __attribute__((unused)) int &reply_code) { if (!strcasecmp(command, "STAT")) { reply_code = 910 + SuspendMode; switch (SuspendMode) { @@ -3718,7 +3573,7 @@ cString cPluginSoftHdDevice::SVDRPCommand(const char *command, const char *optio } } if (!strcasecmp(command, "SUSP")) { - if (cSoftHdControl::Player) { // already suspended + if (cSoftHdControl::Player) { // already suspended return "SoftHdDevice already suspended"; } if (SuspendMode != NOT_SUSPENDED) { @@ -3744,8 +3599,8 @@ cString cPluginSoftHdDevice::SVDRPCommand(const char *command, const char *optio if (ShutdownHandler.GetUserInactiveTime()) { ShutdownHandler.SetUserInactiveTimeout(); } - if (cSoftHdControl::Player) { // suspended - cControl::Shutdown(); // not need, if not suspended + if (cSoftHdControl::Player) { // suspended + cControl::Shutdown(); // not need, if not suspended } Resume(); SuspendMode = NOT_SUSPENDED; @@ -3755,7 +3610,7 @@ cString cPluginSoftHdDevice::SVDRPCommand(const char *command, const char *optio if (SuspendMode == SUSPEND_DETACHED) { return "SoftHdDevice already detached"; } - if (cSoftHdControl::Player) { // already suspended + if (cSoftHdControl::Player) { // already suspended return "can't suspend SoftHdDevice already suspended"; } #ifdef USE_OPENGLOSD @@ -3830,8 +3685,8 @@ cString cPluginSoftHdDevice::SVDRPCommand(const char *command, const char *optio if (ShutdownHandler.GetUserInactiveTime()) { ShutdownHandler.SetUserInactiveTimeout(); } - if (cSoftHdControl::Player) { // suspended - cControl::Shutdown(); // not need, if not suspended + if (cSoftHdControl::Player) { // suspended + cControl::Shutdown(); // not need, if not suspended } Resume(); SuspendMode = NOT_SUSPENDED; @@ -3880,4 +3735,4 @@ cString cPluginSoftHdDevice::SVDRPCommand(const char *command, const char *optio return NULL; } -VDRPLUGINCREATOR(cPluginSoftHdDevice); // Don't touch this! +VDRPLUGINCREATOR(cPluginSoftHdDevice); // Don't touch this! diff --git a/softhddev.c b/softhddev.c index f4528fb..b4a8f0c 100644 --- a/softhddev.c +++ b/softhddev.c @@ -1,7 +1,7 @@ /// -/// @file softhddev.c @brief A software HD device plugin for VDR. +/// @file softhddev.c @brief A software HD device plugin for VDR. /// -/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. /// /// Contributor(s): /// @@ -20,27 +20,27 @@ /// $Id: 8881600a16f475cba7db8911ad88ce2234f72d14 $ ////////////////////////////////////////////////////////////////////////////// -#define noUSE_SOFTLIMIT ///< add soft buffer limits to Play.. -#define noUSE_PIP ///< include PIP support + new API -#define noDUMP_TRICKSPEED ///< dump raw trickspeed packets +#define noUSE_SOFTLIMIT ///< add soft buffer limits to Play.. +#define noUSE_PIP ///< include PIP support + new API +#define noDUMP_TRICKSPEED ///< dump raw trickspeed packets -#include #include +#include #ifdef __FreeBSD__ #include #endif #include +#include +#include #include #include -#include -#include -#include #include +#include #include -#define _(str) gettext(str) ///< gettext shortcut -#define _N(str) str ///< gettext_noop shortcut +#define _(str) gettext(str) ///< gettext shortcut +#define _N(str) str ///< gettext_noop shortcut #include #include @@ -50,50 +50,52 @@ #endif #include -#include "iatomic.h" // portable atomic_t +#include "iatomic.h" // portable atomic_t #include "misc.h" #include "softhddev.h" +// clang-format off #include "audio.h" #include "video.h" #include "codec.h" +// clang-format on #ifdef DEBUG -static int DumpH264(const uint8_t * data, int size); -static void DumpMpeg(const uint8_t * data, int size); +static int DumpH264(const uint8_t *data, int size); +static void DumpMpeg(const uint8_t *data, int size); #endif ////////////////////////////////////////////////////////////////////////////// // Variables ////////////////////////////////////////////////////////////////////////////// -extern int ConfigAudioBufferTime; ///< config size ms of audio buffer -extern int ConfigVideoClearOnSwitch; //< clear decoder on channel switch -char ConfigStartX11Server; ///< flag start the x11 server -static signed char ConfigStartSuspended; ///< flag to start in suspend mode -static char ConfigFullscreen; ///< fullscreen modus -static const char *X11ServerArguments; ///< default command arguments -static char ConfigStillDecoder; ///< hw/sw decoder for still picture +extern int ConfigAudioBufferTime; ///< config size ms of audio buffer +extern int ConfigVideoClearOnSwitch; //< clear decoder on channel switch +char ConfigStartX11Server; ///< flag start the x11 server +static signed char ConfigStartSuspended; ///< flag to start in suspend mode +static char ConfigFullscreen; ///< fullscreen modus +static const char *X11ServerArguments; ///< default command arguments +static char ConfigStillDecoder; ///< hw/sw decoder for still picture -static pthread_mutex_t SuspendLockMutex; ///< suspend lock mutex +static pthread_mutex_t SuspendLockMutex; ///< suspend lock mutex -static volatile char StreamFreezed; ///< stream freezed +static volatile char StreamFreezed; ///< stream freezed ////////////////////////////////////////////////////////////////////////////// // Audio ////////////////////////////////////////////////////////////////////////////// -static volatile char NewAudioStream; ///< new audio stream -static volatile char SkipAudio; ///< skip audio stream -static AudioDecoder *MyAudioDecoder; ///< audio decoder -static enum AVCodecID AudioCodecID; ///< current codec id -static int AudioChannelID; ///< current audio channel id -static VideoStream *AudioSyncStream; ///< video stream for audio/video sync +static volatile char NewAudioStream; ///< new audio stream +static volatile char SkipAudio; ///< skip audio stream +static AudioDecoder *MyAudioDecoder; ///< audio decoder +static enum AVCodecID AudioCodecID; ///< current codec id +static int AudioChannelID; ///< current audio channel id +static VideoStream *AudioSyncStream; ///< video stream for audio/video sync /// Minimum free space in audio buffer 8 packets for 8 channels #define AUDIO_MIN_BUFFER_FREE (3072 * 8 * 8) -#define AUDIO_BUFFER_SIZE (512 * 1024) ///< audio PES buffer default size -static AVPacket AudioAvPkt[1]; ///< audio a/v packet +#define AUDIO_BUFFER_SIZE (512 * 1024) ///< audio PES buffer default size +static AVPacket AudioAvPkt[1]; ///< audio a/v packet int AudioDelay = 0; ////////////////////////////////////////////////////////////////////////////// @@ -108,48 +110,42 @@ int AudioDelay = 0; static const uint16_t BitRateTable[2][4][16] = { // MPEG Version 1 {{}, - {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, - 0}, - {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0}, - {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0}}, + {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 0}, + {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0}, + {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0}}, // MPEG Version 2 & 2.5 {{}, - {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0}, - {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}, - {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0} - } -}; + {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0}, + {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}, + {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}}}; /// /// Mpeg samperate table. /// -static const uint16_t SampleRateTable[4] = { - 44100, 48000, 32000, 0 -}; +static const uint16_t SampleRateTable[4] = {44100, 48000, 32000, 0}; /// /// Fast check for Mpeg audio. /// /// 4 bytes 0xFFExxxxx Mpeg audio /// -static inline int FastMpegCheck(const uint8_t * p) -{ - if (p[0] != 0xFF) { // 11bit frame sync +static inline int FastMpegCheck(const uint8_t *p) { + if (p[0] != 0xFF) { // 11bit frame sync return 0; } if ((p[1] & 0xE0) != 0xE0) { return 0; } - if ((p[1] & 0x18) == 0x08) { // version ID - 01 reserved + if ((p[1] & 0x18) == 0x08) { // version ID - 01 reserved return 0; } - if (!(p[1] & 0x06)) { // layer description - 00 reserved + if (!(p[1] & 0x06)) { // layer description - 00 reserved return 0; } - if ((p[2] & 0xF0) == 0xF0) { // bitrate index - 1111 reserved + if ((p[2] & 0xF0) == 0xF0) { // bitrate index - 1111 reserved return 0; } - if ((p[2] & 0x0C) == 0x0C) { // sampling rate index - 11 reserved + if ((p[2] & 0x0C) == 0x0C) { // sampling rate index - 11 reserved return 0; } return 1; @@ -163,9 +159,9 @@ static inline int FastMpegCheck(const uint8_t * p) /// @param data incomplete PES packet /// @param size number of bytes /// -/// @retval <0 possible mpeg audio, but need more data -/// @retval 0 no valid mpeg audio -/// @retval >0 valid mpeg audio +/// @retval <0 possible mpeg audio, but need more data +/// @retval 0 no valid mpeg audio +/// @retval >0 valid mpeg audio /// /// From: http://www.mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm /// @@ -185,8 +181,7 @@ static inline int FastMpegCheck(const uint8_t * p) /// Layer II & III: /// FrameLengthInBytes = 144 * BitRate / SampleRate + Padding /// -static int MpegCheck(const uint8_t * data, int size) -{ +static int MpegCheck(const uint8_t *data, int size) { int mpeg2; int mpeg25; int layer; @@ -205,16 +200,16 @@ static int MpegCheck(const uint8_t * data, int size) padding = (data[2] >> 1) & 0x01; sample_rate = SampleRateTable[sample_rate_index]; - if (!sample_rate) { // no valid sample rate try next + if (!sample_rate) { // no valid sample rate try next // moved into fast check abort(); return 0; } - sample_rate >>= mpeg2; // mpeg 2 half rate - sample_rate >>= mpeg25; // mpeg 2.5 quarter rate + sample_rate >>= mpeg2; // mpeg 2 half rate + sample_rate >>= mpeg25; // mpeg 2.5 quarter rate bit_rate = BitRateTable[mpeg2 | mpeg25][layer][bit_rate_index]; - if (!bit_rate) { // no valid bit-rate try next + if (!bit_rate) { // no valid bit-rate try next // FIXME: move into fast check? return 0; } @@ -232,8 +227,11 @@ static int MpegCheck(const uint8_t * data, int size) break; } if (0) { - Debug(3, "pesdemux: mpeg%s layer%d bitrate=%d samplerate=%d %d bytes\n", mpeg25 ? "2.5" : mpeg2 ? "2" : "1", - layer, bit_rate, sample_rate, frame_size); + Debug(3, "pesdemux: mpeg%s layer%d bitrate=%d samplerate=%d %d bytes\n", + mpeg25 ? "2.5" + : mpeg2 ? "2" + : "1", + layer, bit_rate, sample_rate, frame_size); } if (frame_size + 4 > size) { @@ -252,9 +250,8 @@ static int MpegCheck(const uint8_t * data, int size) /// /// 3 bytes 0x56Exxx AAC LATM audio /// -static inline int FastLatmCheck(const uint8_t * p) -{ - if (p[0] != 0x56) { // 11bit sync +static inline int FastLatmCheck(const uint8_t *p) { + if (p[0] != 0x56) { // 11bit sync return 0; } if ((p[1] & 0xE0) != 0xE0) { @@ -271,12 +268,11 @@ static inline int FastLatmCheck(const uint8_t * p) /// @param data incomplete PES packet /// @param size number of bytes /// -/// @retval <0 possible AAC LATM audio, but need more data -/// @retval 0 no valid AAC LATM audio -/// @retval >0 valid AAC LATM audio +/// @retval <0 possible AAC LATM audio, but need more data +/// @retval 0 no valid AAC LATM audio +/// @retval >0 valid AAC LATM audio /// -static int LatmCheck(const uint8_t * data, int size) -{ +static int LatmCheck(const uint8_t *data, int size) { int frame_size; // 13 bit frame size without header @@ -300,15 +296,13 @@ static int LatmCheck(const uint8_t * data, int size) /// from ATSC A/52 table 5.18 frame size code table. /// const uint16_t Ac3FrameSizeTable[38][3] = { - {64, 69, 96}, {64, 70, 96}, {80, 87, 120}, {80, 88, 120}, - {96, 104, 144}, {96, 105, 144}, {112, 121, 168}, {112, 122, 168}, - {128, 139, 192}, {128, 140, 192}, {160, 174, 240}, {160, 175, 240}, - {192, 208, 288}, {192, 209, 288}, {224, 243, 336}, {224, 244, 336}, - {256, 278, 384}, {256, 279, 384}, {320, 348, 480}, {320, 349, 480}, - {384, 417, 576}, {384, 418, 576}, {448, 487, 672}, {448, 488, 672}, - {512, 557, 768}, {512, 558, 768}, {640, 696, 960}, {640, 697, 960}, - {768, 835, 1152}, {768, 836, 1152}, {896, 975, 1344}, {896, 976, 1344}, - {1024, 1114, 1536}, {1024, 1115, 1536}, {1152, 1253, 1728}, + {64, 69, 96}, {64, 70, 96}, {80, 87, 120}, {80, 88, 120}, {96, 104, 144}, + {96, 105, 144}, {112, 121, 168}, {112, 122, 168}, {128, 139, 192}, {128, 140, 192}, + {160, 174, 240}, {160, 175, 240}, {192, 208, 288}, {192, 209, 288}, {224, 243, 336}, + {224, 244, 336}, {256, 278, 384}, {256, 279, 384}, {320, 348, 480}, {320, 349, 480}, + {384, 417, 576}, {384, 418, 576}, {448, 487, 672}, {448, 488, 672}, {512, 557, 768}, + {512, 558, 768}, {640, 696, 960}, {640, 697, 960}, {768, 835, 1152}, {768, 836, 1152}, + {896, 975, 1344}, {896, 976, 1344}, {1024, 1114, 1536}, {1024, 1115, 1536}, {1152, 1253, 1728}, {1152, 1254, 1728}, {1280, 1393, 1920}, {1280, 1394, 1920}, }; @@ -317,9 +311,8 @@ const uint16_t Ac3FrameSizeTable[38][3] = { /// /// 5 bytes 0x0B77xxxxxx AC-3 audio /// -static inline int FastAc3Check(const uint8_t * p) -{ - if (p[0] != 0x0B) { // 16bit sync +static inline int FastAc3Check(const uint8_t *p) { + if (p[0] != 0x0B) { // 16bit sync return 0; } if (p[1] != 0x77) { @@ -336,9 +329,9 @@ static inline int FastAc3Check(const uint8_t * p) /// @param data incomplete PES packet /// @param size number of bytes /// -/// @retval <0 possible AC-3 audio, but need more data -/// @retval 0 no valid AC-3 audio -/// @retval >0 valid AC-3 audio +/// @retval <0 possible AC-3 audio, but need more data +/// @retval 0 no valid AC-3 audio +/// @retval >0 valid AC-3 audio /// /// o AC-3 Header /// AAAAAAAA AAAAAAAA BBBBBBBB BBBBBBBB CCDDDDDD EEEEEFFF @@ -360,11 +353,10 @@ static inline int FastAc3Check(const uint8_t * p) /// o e 2x Framesize code /// o f 2x Framesize code 2 /// -static int Ac3Check(const uint8_t * data, int size) -{ +static int Ac3Check(const uint8_t *data, int size) { int frame_size; - if (size < 5) { // need 5 bytes to see if AC-3/E-AC-3 + if (size < 5) { // need 5 bytes to see if AC-3/E-AC-3 return -5; } @@ -374,17 +366,17 @@ static int Ac3Check(const uint8_t * data, int size) } frame_size = ((data[2] & 0x03) << 8) + data[3] + 1; frame_size *= 2; - } else { // AC-3 + } else { // AC-3 int fscod; int frmsizcod; // crc1 crc1 fscod|frmsizcod fscod = data[4] >> 6; - if (fscod == 0x03) { // invalid sample rate + if (fscod == 0x03) { // invalid sample rate return 0; } frmsizcod = data[4] & 0x3F; - if (frmsizcod > 37) { // invalid frame size + if (frmsizcod > 37) { // invalid frame size return 0; } // invalid is checked above @@ -408,15 +400,14 @@ static int Ac3Check(const uint8_t * data, int size) /// /// 7/9 bytes 0xFFFxxxxxxxxxxx(xxxx) ADTS audio /// -static inline int FastAdtsCheck(const uint8_t * p) -{ - if (p[0] != 0xFF) { // 12bit sync +static inline int FastAdtsCheck(const uint8_t *p) { + if (p[0] != 0xFF) { // 12bit sync return 0; } - if ((p[1] & 0xF6) != 0xF0) { // sync + layer must be 0 + if ((p[1] & 0xF6) != 0xF0) { // sync + layer must be 0 return 0; } - if ((p[2] & 0x3C) == 0x3C) { // sampling frequency index != 15 + if ((p[2] & 0x3C) == 0x3C) { // sampling frequency index != 15 return 0; } return 1; @@ -430,9 +421,9 @@ static inline int FastAdtsCheck(const uint8_t * p) /// @param data incomplete PES packet /// @param size number of bytes /// -/// @retval <0 possible ADTS audio, but need more data -/// @retval 0 no valid ADTS audio -/// @retval >0 valid AC-3 audio +/// @retval <0 possible ADTS audio, but need more data +/// @retval 0 no valid ADTS audio +/// @retval >0 valid AC-3 audio /// /// AAAAAAAA AAAABCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP /// (QQQQQQQQ QQQQQQQ) @@ -445,8 +436,7 @@ static inline int FastAdtsCheck(const uint8_t * p) /// o .. /// o M*13 frame length /// -static int AdtsCheck(const uint8_t * data, int size) -{ +static int AdtsCheck(const uint8_t *data, int size) { int frame_size; if (size < 6) { @@ -475,11 +465,10 @@ static int AdtsCheck(const uint8_t * data, int size) /// /// PES type. /// -enum -{ +enum { PES_PROG_STREAM_MAP = 0xBC, PES_PRIVATE_STREAM1 = 0xBD, - PES_PADDING_STREAM = 0xBE, ///< filler, padding stream + PES_PADDING_STREAM = 0xBE, ///< filler, padding stream PES_PRIVATE_STREAM2 = 0xBF, PES_AUDIO_STREAM_S = 0xC0, PES_AUDIO_STREAM_E = 0xDF, @@ -489,7 +478,7 @@ enum PES_EMM_STREAM = 0xF1, PES_DSM_CC_STREAM = 0xF2, PES_ISO13522_STREAM = 0xF3, - PES_TYPE_E_STREAM = 0xF8, ///< ITU-T rec. h.222.1 type E stream + PES_TYPE_E_STREAM = 0xF8, ///< ITU-T rec. h.222.1 type E stream PES_PROG_STREAM_DIR = 0xFF, }; @@ -498,54 +487,51 @@ enum /// /// PES parser state. /// -enum -{ - PES_INIT, ///< unknown codec +enum { + PES_INIT, ///< unknown codec - PES_SKIP, ///< skip packet - PES_SYNC, ///< search packet sync byte - PES_HEADER, ///< copy header - PES_START, ///< pes packet start found - PES_PAYLOAD, ///< copy payload + PES_SKIP, ///< skip packet + PES_SYNC, ///< search packet sync byte + PES_HEADER, ///< copy header + PES_START, ///< pes packet start found + PES_PAYLOAD, ///< copy payload - PES_LPCM_HEADER, ///< copy lcpm header - PES_LPCM_PAYLOAD, ///< copy lcpm payload + PES_LPCM_HEADER, ///< copy lcpm header + PES_LPCM_PAYLOAD, ///< copy lcpm payload }; -#define PES_START_CODE_SIZE 6 ///< size of pes start code with length -#define PES_HEADER_SIZE 9 ///< size of pes header +#define PES_START_CODE_SIZE 6 ///< size of pes start code with length +#define PES_HEADER_SIZE 9 ///< size of pes header #define PES_MAX_HEADER_SIZE (PES_HEADER_SIZE + 256) ///< maximal header size -#define PES_MAX_PAYLOAD (512 * 1024) ///< max pay load size +#define PES_MAX_PAYLOAD (512 * 1024) ///< max pay load size /// /// PES demuxer. /// -typedef struct _pes_demux_ -{ - //int Pid; ///< packet id - //int PcrPid; ///< program clock reference pid - //int StreamType; ///< stream type +typedef struct _pes_demux_ { + // int Pid; ///< packet id + // int PcrPid; ///< program clock reference pid + // int StreamType; ///< stream type - int State; ///< parsing state - uint8_t Header[PES_MAX_HEADER_SIZE]; ///< buffer for pes header - int HeaderIndex; ///< header index - int HeaderSize; ///< size of pes header - uint8_t *Buffer; ///< payload buffer - int Index; ///< buffer index - int Skip; ///< buffer skip - int Size; ///< size of payload buffer + int State; ///< parsing state + uint8_t Header[PES_MAX_HEADER_SIZE]; ///< buffer for pes header + int HeaderIndex; ///< header index + int HeaderSize; ///< size of pes header + uint8_t *Buffer; ///< payload buffer + int Index; ///< buffer index + int Skip; ///< buffer skip + int Size; ///< size of payload buffer - uint8_t StartCode; ///< pes packet start code + uint8_t StartCode; ///< pes packet start code - int64_t PTS; ///< presentation time stamp - int64_t DTS; ///< decode time stamp + int64_t PTS; ///< presentation time stamp + int64_t DTS; ///< decode time stamp } PesDemux; /// /// Reset packetized elementary stream demuxer. /// -static void PesReset(PesDemux * pesdx) -{ +static void PesReset(PesDemux *pesdx) { pesdx->State = PES_INIT; pesdx->Index = 0; pesdx->Skip = 0; @@ -559,8 +545,7 @@ static void PesReset(PesDemux * pesdx) /// /// @param pesdx packetized elementary stream demuxer /// -static void PesInit(PesDemux * pesdx) -{ +static void PesInit(PesDemux *pesdx) { memset(pesdx, 0, sizeof(*pesdx)); pesdx->Size = PES_MAX_PAYLOAD; pesdx->Buffer = av_malloc(PES_MAX_PAYLOAD + AV_INPUT_BUFFER_PADDING_SIZE); @@ -578,12 +563,11 @@ static void PesInit(PesDemux * pesdx) /// @param size number of payload data bytes /// @param is_start flag, start of pes packet /// -static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_start) -{ +static void PesParse(PesDemux *pesdx, const uint8_t *data, int size, int is_start) { const uint8_t *p; const uint8_t *q; - if (is_start) { // start of pes packet + if (is_start) { // start of pes packet if (pesdx->Index && pesdx->Skip) { // copy remaining bytes down pesdx->Index -= pesdx->Skip; @@ -592,7 +576,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st } pesdx->State = PES_SYNC; pesdx->HeaderIndex = 0; - pesdx->PTS = AV_NOPTS_VALUE; // reset if not yet used + pesdx->PTS = AV_NOPTS_VALUE; // reset if not yet used pesdx->DTS = AV_NOPTS_VALUE; } // cleanup, if too much cruft @@ -608,28 +592,28 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st int n; switch (pesdx->State) { - case PES_SKIP: // skip this packet + case PES_SKIP: // skip this packet return; - case PES_START: // at start of pes packet payload + case PES_START: // at start of pes packet payload #if 0 - // Played with PlayAudio - // FIXME: need 0x80 -- 0xA0 state - if (AudioCodecID == AV_CODEC_ID_NONE) { - if ((*p & 0xF0) == 0x80) { // AC-3 & DTS - Debug(3, "pesdemux: dvd ac-3\n"); - } else if ((*p & 0xFF) == 0xA0) { // LPCM - Debug(3, "pesdemux: dvd lpcm\n"); - pesdx->State = PES_LPCM_HEADER; - pesdx->HeaderIndex = 0; - pesdx->HeaderSize = 7; - // FIXME: need harder LPCM check - //break; - } - } + // Played with PlayAudio + // FIXME: need 0x80 -- 0xA0 state + if (AudioCodecID == AV_CODEC_ID_NONE) { + if ((*p & 0xF0) == 0x80) { // AC-3 & DTS + Debug(3, "pesdemux: dvd ac-3\n"); + } else if ((*p & 0xFF) == 0xA0) { // LPCM + Debug(3, "pesdemux: dvd lpcm\n"); + pesdx->State = PES_LPCM_HEADER; + pesdx->HeaderIndex = 0; + pesdx->HeaderSize = 7; + // FIXME: need harder LPCM check + //break; + } + } #endif - case PES_INIT: // find start of audio packet + case PES_INIT: // find start of audio packet // FIXME: increase if needed the buffer // fill buffer @@ -674,7 +658,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st r = AdtsCheck(q, n); codec_id = AV_CODEC_ID_AAC; } - if (r < 0) { // need more bytes + if (r < 0) { // need more bytes break; } if (r > 0) { @@ -698,7 +682,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st pesdx->DTS = AV_NOPTS_VALUE; pesdx->Skip += r; // FIXME: switch to decoder state - //pesdx->State = PES_MPEG_DECODE; + // pesdx->State = PES_MPEG_DECODE; break; } if (AudioCodecID != AV_CODEC_ID_NONE) { @@ -713,7 +697,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st } break; - case PES_SYNC: // wait for pes sync + case PES_SYNC: // wait for pes sync n = PES_START_CODE_SIZE - pesdx->HeaderIndex; if (n > size) { n = size; @@ -728,8 +712,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st unsigned code; // bad mpeg pes packet start code prefix 0x00001xx - if (pesdx->Header[0] || pesdx->Header[1] - || pesdx->Header[2] != 0x01) { + if (pesdx->Header[0] || pesdx->Header[1] || pesdx->Header[2] != 0x01) { Debug(3, "pesdemux: bad pes packet\n"); pesdx->State = PES_SKIP; return; @@ -748,7 +731,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st } break; - case PES_HEADER: // parse PES header + case PES_HEADER: // parse PES header n = pesdx->HeaderSize - pesdx->HeaderIndex; if (n > size) { n = size; @@ -776,25 +759,20 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st int64_t dts; if ((pesdx->Header[7] & 0xC0) == 0x80) { - pts = - (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 - | (data[13] - & 0xFE) >> 1; + pts = (int64_t)(data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | + data[12] << 7 | (data[13] & 0xFE) >> 1; pesdx->PTS = pts; pesdx->DTS = AV_NOPTS_VALUE; } else if ((pesdx->Header[7] & 0xC0) == 0xC0) { - pts = - (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 - | (data[13] - & 0xFE) >> 1; + pts = (int64_t)(data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | + data[12] << 7 | (data[13] & 0xFE) >> 1; pesdx->PTS = pts; - dts = - (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | data[17] << - 7 | (data[18] & 0xFE) >> 1; + dts = (int64_t)(data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | + data[17] << 7 | (data[18] & 0xFE) >> 1; pesdx->DTS = dts; Debug(4, "pesdemux: pts %#012" PRIx64 " %#012" PRIx64 "\n", pts, dts); } - empty_header: + empty_header: pesdx->State = PES_INIT; if (pesdx->StartCode == PES_PRIVATE_STREAM1) { // only private stream 1, has sub streams @@ -804,76 +782,76 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st break; #if 0 - // Played with PlayAudio - case PES_LPCM_HEADER: // lpcm header - n = pesdx->HeaderSize - pesdx->HeaderIndex; - if (n > size) { - n = size; - } - memcpy(pesdx->Header + pesdx->HeaderIndex, p, n); - pesdx->HeaderIndex += n; - p += n; - size -= n; + // Played with PlayAudio + case PES_LPCM_HEADER: // lpcm header + n = pesdx->HeaderSize - pesdx->HeaderIndex; + if (n > size) { + n = size; + } + memcpy(pesdx->Header + pesdx->HeaderIndex, p, n); + pesdx->HeaderIndex += n; + p += n; + size -= n; - if (pesdx->HeaderIndex == pesdx->HeaderSize) { - static int samplerates[] = { 48000, 96000, 44100, 32000 }; - int samplerate; - int channels; - int bits_per_sample; - const uint8_t *q; + if (pesdx->HeaderIndex == pesdx->HeaderSize) { + static int samplerates[] = { 48000, 96000, 44100, 32000 }; + int samplerate; + int channels; + int bits_per_sample; + const uint8_t *q; - if (AudioCodecID != AV_CODEC_ID_PCM_DVD) { + if (AudioCodecID != AV_CODEC_ID_PCM_DVD) { - q = pesdx->Header; - Debug(3, "pesdemux: LPCM %d sr:%d bits:%d chan:%d\n", q[0], q[5] >> 4, - (((q[5] >> 6) & 0x3) + 4) * 4, (q[5] & 0x7) + 1); - CodecAudioClose(MyAudioDecoder); + q = pesdx->Header; + Debug(3, "pesdemux: LPCM %d sr:%d bits:%d chan:%d\n", q[0], q[5] >> 4, + (((q[5] >> 6) & 0x3) + 4) * 4, (q[5] & 0x7) + 1); + CodecAudioClose(MyAudioDecoder); - bits_per_sample = (((q[5] >> 6) & 0x3) + 4) * 4; - if (bits_per_sample != 16) { - Error(_("softhddev: LPCM %d bits per sample aren't supported\n"), bits_per_sample); - // FIXME: handle unsupported formats. - } - samplerate = samplerates[q[5] >> 4]; - channels = (q[5] & 0x7) + 1; - AudioSetup(&samplerate, &channels, 0); - if (samplerate != samplerates[q[5] >> 4]) { - Error(_("softhddev: LPCM %d sample-rate is unsupported\n"), samplerates[q[5] >> 4]); - // FIXME: support resample - } - if (channels != (q[5] & 0x7) + 1) { - Error(_("softhddev: LPCM %d channels are unsupported\n"), (q[5] & 0x7) + 1); - // FIXME: support resample - } - //CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD); - AudioCodecID = AV_CODEC_ID_PCM_DVD; - } - pesdx->State = PES_LPCM_PAYLOAD; - pesdx->Index = 0; - pesdx->Skip = 0; - } - break; + bits_per_sample = (((q[5] >> 6) & 0x3) + 4) * 4; + if (bits_per_sample != 16) { + Error(_("softhddev: LPCM %d bits per sample aren't supported\n"), bits_per_sample); + // FIXME: handle unsupported formats. + } + samplerate = samplerates[q[5] >> 4]; + channels = (q[5] & 0x7) + 1; + AudioSetup(&samplerate, &channels, 0); + if (samplerate != samplerates[q[5] >> 4]) { + Error(_("softhddev: LPCM %d sample-rate is unsupported\n"), samplerates[q[5] >> 4]); + // FIXME: support resample + } + if (channels != (q[5] & 0x7) + 1) { + Error(_("softhddev: LPCM %d channels are unsupported\n"), (q[5] & 0x7) + 1); + // FIXME: support resample + } + //CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD); + AudioCodecID = AV_CODEC_ID_PCM_DVD; + } + pesdx->State = PES_LPCM_PAYLOAD; + pesdx->Index = 0; + pesdx->Skip = 0; + } + break; - case PES_LPCM_PAYLOAD: // lpcm payload - // fill buffer - n = pesdx->Size - pesdx->Index; - if (n > size) { - n = size; - } - memcpy(pesdx->Buffer + pesdx->Index, p, n); - pesdx->Index += n; - p += n; - size -= n; + case PES_LPCM_PAYLOAD: // lpcm payload + // fill buffer + n = pesdx->Size - pesdx->Index; + if (n > size) { + n = size; + } + memcpy(pesdx->Buffer + pesdx->Index, p, n); + pesdx->Index += n; + p += n; + size -= n; - if (pesdx->PTS != (int64_t) AV_NOPTS_VALUE) { - // FIXME: needs bigger buffer - AudioSetClock(pesdx->PTS); - pesdx->PTS = AV_NOPTS_VALUE; - } - swab(pesdx->Buffer, pesdx->Buffer, pesdx->Index); - AudioEnqueue(pesdx->Buffer, pesdx->Index); - pesdx->Index = 0; - break; + if (pesdx->PTS != (int64_t) AV_NOPTS_VALUE) { + // FIXME: needs bigger buffer + AudioSetClock(pesdx->PTS); + pesdx->PTS = AV_NOPTS_VALUE; + } + swab(pesdx->Buffer, pesdx->Buffer, pesdx->Index); + AudioEnqueue(pesdx->Buffer, pesdx->Index); + pesdx->Index = 0; + break; #endif } } while (size > 0); @@ -884,9 +862,9 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st ////////////////////////////////////////////////////////////////////////////// /// Transport stream packet size -#define TS_PACKET_SIZE 188 +#define TS_PACKET_SIZE 188 /// Transport stream packet sync byte -#define TS_PACKET_SYNC 0x47 +#define TS_PACKET_SYNC 0x47 /// /// transport stream demuxer typedef. @@ -896,12 +874,11 @@ typedef struct _ts_demux_ TsDemux; /// /// transport stream demuxer structure. /// -struct _ts_demux_ -{ - int Packets; ///< packets between PCR +struct _ts_demux_ { + int Packets; ///< packets between PCR }; -static PesDemux PesDemuxAudio[1]; ///< audio demuxer +static PesDemux PesDemuxAudio[1]; ///< audio demuxer /// /// Transport stream demuxer. @@ -912,8 +889,7 @@ static PesDemux PesDemuxAudio[1]; ///< audio demuxer /// /// @returns number of bytes consumed from buffer. /// -static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size) -{ +static int TsDemuxer(TsDemux *tsdx, const uint8_t *data, int size) { const uint8_t *p; p = data; @@ -929,7 +905,7 @@ static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size) return size; } ++tsdx->Packets; - if (p[1] & 0x80) { // error indicator + if (p[1] & 0x80) { // error indicator Debug(3, "tsdemux: transport error\n"); // FIXME: kill all buffers goto next_packet; @@ -939,15 +915,15 @@ static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size) Debug(4, "tsdemux: PID: %#04x%s%s\n", pid, p[1] & 0x40 ? " start" : "", p[3] & 0x10 ? " payload" : ""); #endif // skip adaptation field - switch (p[3] & 0x30) { // adaption field - case 0x00: // reserved - case 0x20: // adaptation field only + switch (p[3] & 0x30) { // adaption field + case 0x00: // reserved + case 0x20: // adaptation field only default: goto next_packet; - case 0x10: // only payload + case 0x10: // only payload payload = 4; break; - case 0x30: // skip adapation field + case 0x30: // skip adapation field payload = 5 + p[4]; // illegal length, ignore packet if (payload >= TS_PACKET_SIZE) { @@ -959,18 +935,18 @@ static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size) PesParse(PesDemuxAudio, p + payload, TS_PACKET_SIZE - payload, p[1] & 0x40); #if 0 - int tmp; + int tmp; - // check continuity - tmp = p[3] & 0x0F; // continuity counter - if (((tsdx->CC + 1) & 0x0F) != tmp) { - Debug(3, "tsdemux: OUT OF SYNC: %d %d\n", tmp, tsdx->CC); - // TS discontinuity (received 8, expected 0) for PID - } - tsdx->CC = tmp; + // check continuity + tmp = p[3] & 0x0F; // continuity counter + if (((tsdx->CC + 1) & 0x0F) != tmp) { + Debug(3, "tsdemux: OUT OF SYNC: %d %d\n", tmp, tsdx->CC); + // TS discontinuity (received 8, expected 0) for PID + } + tsdx->CC = tmp; #endif - next_packet: + next_packet: p += TS_PACKET_SIZE; size -= TS_PACKET_SIZE; } @@ -985,10 +961,9 @@ static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size) ** ** @param data data of exactly one complete PES packet ** @param size size of PES packet -** @param id PES packet type +** @param id PES packet type */ -int PlayAudio(const uint8_t * data, int size, uint8_t id) -{ +int PlayAudio(const uint8_t *data, int size, uint8_t id) { int n; const uint8_t *p; @@ -997,7 +972,7 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) if (SkipAudio || !MyAudioDecoder) { // skip audio return size; } - if (StreamFreezed) { // stream freezed + if (StreamFreezed) { // stream freezed return 0; } if (AudioDelay) { @@ -1033,9 +1008,9 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) Error(_("[softhddev] invalid PES audio packet\n")); return size; } - n = data[8]; // header size + n = data[8]; // header size - if (size < 9 + n + 4) { // wrong size + if (size < 9 + n + 4) { // wrong size if (size == 9 + n) { Warning(_("[softhddev] empty audio packet\n")); } else { @@ -1045,27 +1020,26 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) } if (data[7] & 0x80 && n >= 5) { - AudioAvPkt->pts = - (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] & - 0xFE) >> 1; + AudioAvPkt->pts = (int64_t)(data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | + (data[13] & 0xFE) >> 1; // Debug(3, "audio: pts %#012" PRIx64 "\n", AudioAvPkt->pts); } - if (0) { // dts is unused + if (0) { // dts is unused if (data[7] & 0x40) { - AudioAvPkt->dts = (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] - & 0xFE) << 14 | data[17] << 7 | (data[18] & 0xFE) >> 1; + AudioAvPkt->dts = (int64_t)(data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | + data[17] << 7 | (data[18] & 0xFE) >> 1; Debug(3, "audio: dts %#012" PRIx64 "\n", AudioAvPkt->dts); } } p = data + 9 + n; - n = size - 9 - n; // skip pes header + n = size - 9 - n; // skip pes header if (n + AudioAvPkt->stream_index > AudioAvPkt->size) { Fatal(_("[softhddev] audio buffer too small\n")); AudioAvPkt->stream_index = 0; } - if (AudioChannelID != id) { // id changed audio track changed + if (AudioChannelID != id) { // id changed audio track changed AudioChannelID = id; AudioCodecID = AV_CODEC_ID_NONE; Debug(3, "audio/demux: new channel id\n"); @@ -1077,13 +1051,13 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) return size; } if (AudioCodecID != AV_CODEC_ID_PCM_DVD) { - static int samplerates[] = { 48000, 96000, 44100, 32000 }; + static int samplerates[] = {48000, 96000, 44100, 32000}; int samplerate; int channels; int bits_per_sample; Debug(3, "[softhddev]%s: LPCM %d sr:%d bits:%d chan:%d\n", __FUNCTION__, id, p[5] >> 4, - (((p[5] >> 6) & 0x3) + 4) * 4, (p[5] & 0x7) + 1); + (((p[5] >> 6) & 0x3) + 4) * 4, (p[5] & 0x7) + 1); CodecAudioClose(MyAudioDecoder); bits_per_sample = (((p[5] >> 6) & 0x3) + 4) * 4; @@ -1105,11 +1079,11 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) Error(_("[softhddev] LPCM %d channels are unsupported\n"), (p[5] & 0x7) + 1); // FIXME: support resample } - //CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD); + // CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD); AudioCodecID = AV_CODEC_ID_PCM_DVD; } - if (AudioAvPkt->pts != (int64_t) AV_NOPTS_VALUE) { + if (AudioAvPkt->pts != (int64_t)AV_NOPTS_VALUE) { AudioSetClock(AudioAvPkt->pts); AudioAvPkt->pts = AV_NOPTS_VALUE; } @@ -1121,7 +1095,7 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) // DVD track header if ((id & 0xF0) == 0x80 && (p[0] & 0xF0) == 0x80) { p += 4; - n -= 4; // skip track header + n -= 4; // skip track header if (AudioCodecID == AV_CODEC_ID_NONE) { // FIXME: ConfigAudioBufferTime + x AudioSetBufferTime(400); @@ -1144,7 +1118,7 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) // 7/9 bytes 0xFFFxxxxxxxxxxx ADTS audio // PCM audio can't be found r = 0; - codec_id = AV_CODEC_ID_NONE; // keep compiler happy + codec_id = AV_CODEC_ID_NONE; // keep compiler happy if (id != 0xbd && FastMpegCheck(p)) { r = MpegCheck(p, n); codec_id = AV_CODEC_ID_MP2; @@ -1169,7 +1143,7 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) r = AdtsCheck(p, n); codec_id = AV_CODEC_ID_AAC; } - if (r < 0) { // need more bytes + if (r < 0) { // need more bytes break; } if (r > 0) { @@ -1219,14 +1193,13 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id) ** ** @returns number of bytes consumed; */ -int PlayTsAudio(const uint8_t * data, int size) -{ +int PlayTsAudio(const uint8_t *data, int size) { static TsDemux tsdx[1]; if (SkipAudio || !MyAudioDecoder) { // skip audio return size; } - if (StreamFreezed) { // stream freezed + if (StreamFreezed) { // stream freezed return 0; } @@ -1255,8 +1228,7 @@ int PlayTsAudio(const uint8_t * data, int size) Debug(3, "AudioDelay %dms\n", AudioDelay); usleep(AudioDelay * 1000); AudioDelay = 0; - // TsDemuxer(tsdx, data, size); // insert dummy audio - + // TsDemuxer(tsdx, data, size); // insert dummy audio } return TsDemuxer(tsdx, data, size); } @@ -1268,16 +1240,12 @@ int PlayTsAudio(const uint8_t * data, int size) ** ** @param volume VDR volume (0 .. 255) */ -void SetVolumeDevice(int volume) -{ - AudioSetVolume((volume * 1000) / 255); -} +void SetVolumeDevice(int volume) { AudioSetVolume((volume * 1000) / 255); } /** *** Resets channel ID (restarts audio). **/ -void ResetChannelId(void) -{ +void ResetChannelId(void) { AudioChannelID = -1; Debug(3, "audio/demux: reset channel id\n"); } @@ -1292,54 +1260,53 @@ void ResetChannelId(void) /** ** Video output stream device structure. Parser, decoder, display. */ -struct __video_stream__ -{ - VideoHwDecoder *HwDecoder; ///< video hardware decoder - VideoDecoder *Decoder; ///< video decoder - pthread_mutex_t DecoderLockMutex; ///< video decoder lock mutex +struct __video_stream__ { + VideoHwDecoder *HwDecoder; ///< video hardware decoder + VideoDecoder *Decoder; ///< video decoder + pthread_mutex_t DecoderLockMutex; ///< video decoder lock mutex - enum AVCodecID CodecID; ///< current codec id - enum AVCodecID LastCodecID; ///< last codec id + enum AVCodecID CodecID; ///< current codec id + enum AVCodecID LastCodecID; ///< last codec id - volatile char NewStream; ///< flag new video stream - volatile char ClosingStream; ///< flag closing video stream - volatile char SkipStream; ///< skip video stream - volatile char Freezed; ///< stream freezed + volatile char NewStream; ///< flag new video stream + volatile char ClosingStream; ///< flag closing video stream + volatile char SkipStream; ///< skip video stream + volatile char Freezed; ///< stream freezed - volatile char TrickSpeed; ///< current trick speed - volatile char Close; ///< command close video stream - volatile char ClearBuffers; ///< command clear video buffers - volatile char ClearClose; ///< clear video buffers for close + volatile char TrickSpeed; ///< current trick speed + volatile char Close; ///< command close video stream + volatile char ClearBuffers; ///< command clear video buffers + volatile char ClearClose; ///< clear video buffers for close - int InvalidPesCounter; ///< counter of invalid PES packets + int InvalidPesCounter; ///< counter of invalid PES packets enum AVCodecID CodecIDRb[VIDEO_PACKET_MAX]; ///< codec ids in ring buffer - AVPacket PacketRb[VIDEO_PACKET_MAX]; ///< PES packet ring buffer - int StartCodeState; ///< last three bytes start code state + AVPacket PacketRb[VIDEO_PACKET_MAX]; ///< PES packet ring buffer + int StartCodeState; ///< last three bytes start code state - int PacketWrite; ///< ring buffer write pointer - int PacketRead; ///< ring buffer read pointer - atomic_t PacketsFilled; ///< how many of the ring buffer is used + int PacketWrite; ///< ring buffer write pointer + int PacketRead; ///< ring buffer read pointer + atomic_t PacketsFilled; ///< how many of the ring buffer is used }; -static VideoStream MyVideoStream[1]; ///< normal video stream +static VideoStream MyVideoStream[1]; ///< normal video stream #ifdef USE_PIP -static VideoStream PipVideoStream[1]; ///< pip video stream -static int PiPActive = 0, mwx, mwy, mww, mwh; ///< main window frame for PiP +static VideoStream PipVideoStream[1]; ///< pip video stream +static int PiPActive = 0, mwx, mwy, mww, mwh; ///< main window frame for PiP #endif #ifdef DEBUG -uint32_t VideoSwitch; ///< debug video switch ticks -static int VideoMaxPacketSize; ///< biggest used packet buffer +uint32_t VideoSwitch; ///< debug video switch ticks +static int VideoMaxPacketSize; ///< biggest used packet buffer #endif // #define STILL_DEBUG 2 #ifdef STILL_DEBUG -static char InStillPicture; ///< flag still picture +static char InStillPicture; ///< flag still picture #endif -const char *X11DisplayName; ///< x11 display name -static volatile char Usr1Signal; ///< true got usr1 signal +const char *X11DisplayName; ///< x11 display name +static volatile char Usr1Signal; ///< true got usr1 signal ////////////////////////////////////////////////////////////////////////////// @@ -1348,8 +1315,7 @@ static volatile char Usr1Signal; ///< true got usr1 signal ** ** @param stream video stream */ -static void VideoPacketInit(VideoStream * stream) -{ +static void VideoPacketInit(VideoStream *stream) { int i; for (i = 0; i < VIDEO_PACKET_MAX; ++i) { @@ -1371,8 +1337,7 @@ static void VideoPacketInit(VideoStream * stream) ** ** @param stream video stream */ -static void VideoPacketExit(VideoStream * stream) -{ +static void VideoPacketExit(VideoStream *stream) { int i; atomic_set(&stream->PacketsFilled, 0); @@ -1386,18 +1351,17 @@ static void VideoPacketExit(VideoStream * stream) ** Place video data in packet ringbuffer. ** ** @param stream video stream -** @param pts presentation timestamp of pes packet +** @param pts presentation timestamp of pes packet ** @param data data of pes packet ** @param size size of pes packet */ -static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const void *data, int size) -{ +static void VideoEnqueue(VideoStream *stream, int64_t pts, int64_t dts, const void *data, int size) { AVPacket *avpkt; // Debug(3, "video: enqueue %d\n", size); avpkt = &stream->PacketRb[stream->PacketWrite]; - if (!avpkt->stream_index) { // add pts only for first added + if (!avpkt->stream_index) { // add pts only for first added avpkt->pts = pts; avpkt->dts = dts; } @@ -1408,8 +1372,7 @@ static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const v // avpkt->stream_index + size); // new + grow reserves FF_INPUT_BUFFER_PADDING_SIZE - av_grow_packet(avpkt, ((size + VIDEO_BUFFER_SIZE / 2) - / (VIDEO_BUFFER_SIZE / 2)) * (VIDEO_BUFFER_SIZE / 2)); + av_grow_packet(avpkt, ((size + VIDEO_BUFFER_SIZE / 2) / (VIDEO_BUFFER_SIZE / 2)) * (VIDEO_BUFFER_SIZE / 2)); // FIXME: out of memory! #ifdef DEBUG if (avpkt->size <= avpkt->stream_index + size) { @@ -1435,11 +1398,10 @@ static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const v ** ** @param stream video stream */ -static void VideoResetPacket(VideoStream * stream) -{ +static void VideoResetPacket(VideoStream *stream) { AVPacket *avpkt; - stream->StartCodeState = 0; // reset start code state + stream->StartCodeState = 0; // reset start code state stream->CodecIDRb[stream->PacketWrite] = AV_CODEC_ID_NONE; avpkt = &stream->PacketRb[stream->PacketWrite]; @@ -1454,12 +1416,11 @@ static void VideoResetPacket(VideoStream * stream) ** @param stream video stream ** @param codec_id codec id of packet (MPEG/H264) */ -static void VideoNextPacket(VideoStream * stream, int codec_id) -{ +static void VideoNextPacket(VideoStream *stream, int codec_id) { AVPacket *avpkt; avpkt = &stream->PacketRb[stream->PacketWrite]; - if (!avpkt->stream_index) { // ignore empty packets + if (!avpkt->stream_index) { // ignore empty packets if (codec_id != AV_CODEC_ID_NONE) { return; } @@ -1502,13 +1463,12 @@ static void VideoNextPacket(VideoStream * stream, int codec_id) ** FIXME: this code can be written much faster ** ** @param stream video stream -** @param pts presentation timestamp of pes packet +** @param pts presentation timestamp of pes packet ** @param data data of pes packet ** @param size size of pes packet */ -static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const uint8_t * data, int size) -{ - static const char startcode[3] = { 0x00, 0x00, 0x01 }; +static void VideoMpegEnqueue(VideoStream *stream, int64_t pts, int64_t dts, const uint8_t *data, int size) { + static const char startcode[3] = {0x00, 0x00, 0x01}; const uint8_t *p; int n; int first; @@ -1526,14 +1486,14 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con } #endif - switch (stream->StartCodeState) { // prefix starting in last packet - case 3: // 0x00 0x00 0x01 seen + switch (stream->StartCodeState) { // prefix starting in last packet + case 3: // 0x00 0x00 0x01 seen #ifdef DEBUG fprintf(stderr, "last: %d\n", stream->StartCodeState); #endif if (!p[0] || p[0] == 0xb3) { #ifdef DEBUG - printf("last: %d start aspect %02x\n", stream->StartCodeState, p[4]); + printf("last: %d start aspect %02x\n", stream->StartCodeState, p[4]); #endif stream->PacketRb[stream->PacketWrite].stream_index -= 3; VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); @@ -1545,13 +1505,13 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con } break; - case 2: // 0x00 0x00 seen + case 2: // 0x00 0x00 seen #ifdef DEBUG fprintf(stderr, "last: %d\n", stream->StartCodeState); #endif if (p[0] == 0x01 && (!p[1] || p[1] == 0xb3)) { #ifdef DEBUG - printf("last: %d start aspect %02x\n", stream->StartCodeState, p[5]); + printf("last: %d start aspect %02x\n", stream->StartCodeState, p[5]); #endif stream->PacketRb[stream->PacketWrite].stream_index -= 2; VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); @@ -1562,13 +1522,13 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con pts = AV_NOPTS_VALUE; } break; - case 1: // 0x00 seen + case 1: // 0x00 seen #ifdef DEBUG fprintf(stderr, "last: %d\n", stream->StartCodeState); #endif if (!p[0] && p[1] == 0x01 && (!p[2] || p[2] == 0xb3)) { #ifdef DEBUG - printf("last: %d start aspect %02x\n", stream->StartCodeState, p[6]); + printf("last: %d start aspect %02x\n", stream->StartCodeState, p[6]); #endif stream->PacketRb[stream->PacketWrite].stream_index -= 1; VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO); @@ -1625,7 +1585,7 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con } stream->StartCodeState = 0; - switch (n) { // handle packet border start code + switch (n) { // handle packet border start code case 3: if (!p[0] && !p[1] && p[2] == 0x01) { stream->StartCodeState = 3; @@ -1666,8 +1626,7 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con */ #ifndef USE_PIP -static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt) -{ +static void FixPacketForFFMpeg(VideoDecoder *vdecoder, AVPacket *avpkt) { uint8_t *p; int n; AVPacket tmp[1]; @@ -1678,14 +1637,14 @@ static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt) *tmp = *avpkt; first = 1; -#if STILL_DEBUG>1 +#if STILL_DEBUG > 1 if (InStillPicture) { fprintf(stderr, "fix(%d): ", n); } #endif while (n > 3) { -#if STILL_DEBUG>1 +#if STILL_DEBUG > 1 if (InStillPicture && !p[0] && !p[1] && p[2] == 0x01) { fprintf(stderr, " %02x", p[3]); } @@ -1700,10 +1659,10 @@ static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt) } // packet has already an picture header tmp->size = p - tmp->data; -#if STILL_DEBUG>1 +#if STILL_DEBUG > 1 if (InStillPicture) { fprintf(stderr, "\nfix:%9d,%02x %02x %02x %02x\n", tmp->size, tmp->data[0], tmp->data[1], tmp->data[2], - tmp->data[3]); + tmp->data[3]); } #endif CodecVideoDecode(vdecoder, tmp); @@ -1717,10 +1676,10 @@ static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt) ++p; } -#if STILL_DEBUG>1 +#if STILL_DEBUG > 1 if (InStillPicture) { fprintf(stderr, "\nfix:%9d.%02x %02x %02x %02x\n", tmp->size, tmp->data[0], tmp->data[1], tmp->data[2], - tmp->data[3]); + tmp->data[3]); } #endif CodecVideoDecode(vdecoder, tmp); @@ -1732,8 +1691,7 @@ static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt) ** ** @param stream video stream */ -static void VideoStreamOpen(VideoStream * stream) -{ +static void VideoStreamOpen(VideoStream *stream) { stream->SkipStream = 1; stream->CodecID = AV_CODEC_ID_NONE; stream->LastCodecID = AV_CODEC_ID_NONE; @@ -1753,8 +1711,7 @@ static void VideoStreamOpen(VideoStream * stream) ** @note must be called from the video thread, otherwise xcb has a ** deadlock. */ -static void VideoStreamClose(VideoStream * stream, int delhw) -{ +static void VideoStreamClose(VideoStream *stream, int delhw) { stream->SkipStream = 1; if (stream->Decoder) { VideoDecoder *decoder; @@ -1763,7 +1720,7 @@ static void VideoStreamClose(VideoStream * stream, int delhw) decoder = stream->Decoder; // FIXME: remove this lock for main stream close pthread_mutex_lock(&stream->DecoderLockMutex); - stream->Decoder = NULL; // lock read thread + stream->Decoder = NULL; // lock read thread pthread_mutex_unlock(&stream->DecoderLockMutex); CodecVideoClose(decoder); CodecVideoDelDecoder(decoder); @@ -1788,31 +1745,30 @@ static void VideoStreamClose(VideoStream * stream, int delhw) ** ** @param stream video stream ** -** @retval 1 something todo -** @retval -1 empty stream +** @retval 1 something todo +** @retval -1 empty stream */ -int VideoPollInput(VideoStream * stream) -{ - if (!stream->Decoder || !stream->HwDecoder) { // closing +int VideoPollInput(VideoStream *stream) { + if (!stream->Decoder || !stream->HwDecoder) { // closing #ifdef DEBUG fprintf(stderr, "no decoder\n"); #endif return -1; } - if (stream->Close) { // close stream request + if (stream->Close) { // close stream request VideoStreamClose(stream, 1); stream->Close = 0; return 1; } - if (stream->ClearBuffers) { // clear buffer request + if (stream->ClearBuffers) { // clear buffer request atomic_set(&stream->PacketsFilled, 0); stream->PacketRead = stream->PacketWrite; // FIXME: ->Decoder already checked Debug(3, "Clear buffer request in Poll\n"); if (stream->Decoder && stream->HwDecoder) { CodecVideoFlushBuffers(stream->Decoder); -// VideoResetStart(stream->HwDecoder); + // VideoResetStart(stream->HwDecoder); } stream->ClearBuffers = 0; return 1; @@ -1828,32 +1784,31 @@ int VideoPollInput(VideoStream * stream) ** ** @param stream video stream ** -** @retval 0 packet decoded -** @retval 1 stream paused -** @retval -1 empty stream +** @retval 0 packet decoded +** @retval 1 stream paused +** @retval -1 empty stream */ -int VideoDecodeInput(VideoStream * stream, int trick) -{ +int VideoDecodeInput(VideoStream *stream, int trick) { int filled; AVPacket *avpkt; int saved_size; - if (!stream->Decoder) { // closing + if (!stream->Decoder) { // closing #ifdef DEBUG fprintf(stderr, "no decoder\n"); #endif return -1; } - if (stream->Close) { // close stream request + if (stream->Close) { // close stream request VideoStreamClose(stream, 1); stream->Close = 0; return 1; } - if (stream->ClearBuffers && trick) - stream->ClearBuffers = 0; - - if (stream->ClearBuffers) { // clear buffer request + if (stream->ClearBuffers && trick) + stream->ClearBuffers = 0; + + if (stream->ClearBuffers) { // clear buffer request atomic_set(&stream->PacketsFilled, 0); stream->PacketRead = stream->PacketWrite; // FIXME: ->Decoder already checked @@ -1865,7 +1820,7 @@ int VideoDecodeInput(VideoStream * stream, int trick) stream->ClearBuffers = 0; return 1; } - if (stream->Freezed) { // stream freezed + if (stream->Freezed) { // stream freezed // clear is called during freezed return 1; } @@ -1878,28 +1833,28 @@ int VideoDecodeInput(VideoStream * stream, int trick) #if 0 // clearing for normal channel switch has no advantage if (stream->ClearClose || stream->ClosingStream) { - int f; + int f; - // FIXME: during replay all packets are always checked + // FIXME: during replay all packets are always checked - // flush buffers, if close is in the queue - for (f = 0; f < filled; ++f) { - if (stream->CodecIDRb[(stream->PacketRead + f) % VIDEO_PACKET_MAX] == AV_CODEC_ID_NONE) { - if (f) { - Debug(3, "video: cleared upto close\n"); - atomic_sub(f, &stream->PacketsFilled); - stream->PacketRead = (stream->PacketRead + f) % VIDEO_PACKET_MAX; - stream->ClearClose = 0; - } - break; - } - } - stream->ClosingStream = 0; + // flush buffers, if close is in the queue + for (f = 0; f < filled; ++f) { + if (stream->CodecIDRb[(stream->PacketRead + f) % VIDEO_PACKET_MAX] == AV_CODEC_ID_NONE) { + if (f) { + Debug(3, "video: cleared upto close\n"); + atomic_sub(f, &stream->PacketsFilled); + stream->PacketRead = (stream->PacketRead + f) % VIDEO_PACKET_MAX; + stream->ClearClose = 0; + } + break; + } + } + stream->ClosingStream = 0; } #endif // - // handle queued commands + // handle queued commands // avpkt = &stream->PacketRb[stream->PacketRead]; switch (stream->CodecIDRb[stream->PacketRead]) { @@ -1969,7 +1924,7 @@ int VideoDecodeInput(VideoStream * stream, int trick) avpkt->size = saved_size; - skip: +skip: // advance packet read stream->PacketRead = (stream->PacketRead + 1) % VIDEO_PACKET_MAX; atomic_dec(&stream->PacketsFilled); @@ -1982,18 +1937,14 @@ int VideoDecodeInput(VideoStream * stream, int trick) ** ** @param stream video stream */ -int VideoGetBuffers(const VideoStream * stream) -{ - return atomic_read(&stream->PacketsFilled); -} +int VideoGetBuffers(const VideoStream *stream) { return atomic_read(&stream->PacketsFilled); } /** ** Try video start. ** ** NOT TRUE: Could be called, when already started. */ -static void StartVideo(void) -{ +static void StartVideo(void) { VideoInit(X11DisplayName); if (ConfigFullscreen) { @@ -2010,8 +1961,7 @@ static void StartVideo(void) /** ** Stop video. */ -static void StopVideo(void) -{ +static void StopVideo(void) { VideoOsdExit(); VideoExit(); AudioSyncStream = NULL; @@ -2025,7 +1975,7 @@ static void StopVideo(void) decoder = MyVideoStream->Decoder; pthread_mutex_lock(&MyVideoStream->DecoderLockMutex); - MyVideoStream->Decoder = NULL; // lock read thread + MyVideoStream->Decoder = NULL; // lock read thread pthread_mutex_unlock(&MyVideoStream->DecoderLockMutex); // FIXME: this can crash, hw decoder released by video exit Debug(3, "in Stop Video"); @@ -2050,8 +2000,7 @@ static void StopVideo(void) ** ** Function to dump a mpeg packet, not needed. */ -static void DumpMpeg(const uint8_t * data, int size) -{ +static void DumpMpeg(const uint8_t *data, int size) { fprintf(stderr, "%8d: ", size); // b3 b4 b8 00 b5 ... 00 b5 ... @@ -2074,8 +2023,7 @@ static void DumpMpeg(const uint8_t * data, int size) ** ** Function to Dump a h264 packet, not needed. */ -static int DumpH264(const uint8_t * data, int size) -{ +static int DumpH264(const uint8_t *data, int size) { printf("H264:"); do { if (size < 4) { @@ -2098,8 +2046,7 @@ static int DumpH264(const uint8_t * data, int size) ** ** Function to validate a mpeg packet, not needed. */ -static int ValidateMpeg(const uint8_t * data, int size) -{ +static int ValidateMpeg(const uint8_t *data, int size) { int pes_l; do { @@ -2112,7 +2059,7 @@ static int ValidateMpeg(const uint8_t * data, int size) } pes_l = (data[4] << 8) | data[5]; - if (!pes_l) { // contains unknown length + if (!pes_l) { // contains unknown length return 1; } @@ -2138,24 +2085,23 @@ static int ValidateMpeg(const uint8_t * data, int size) ** @return number of bytes used, 0 if internal buffer are full. ** */ -int PlayVideo3(VideoStream * stream, const uint8_t * data, int size) -{ +int PlayVideo3(VideoStream *stream, const uint8_t *data, int size) { const uint8_t *check; int64_t pts, dts; int n; int z; int l; - if (!stream->Decoder) { // no x11 video started + if (!stream->Decoder) { // no x11 video started return size; } - if (stream->SkipStream) { // skip video stream + if (stream->SkipStream) { // skip video stream return size; } - if (stream->Freezed) { // stream freezed + if (stream->Freezed) { // stream freezed return 0; } - if (stream->NewStream) { // channel switched + if (stream->NewStream) { // channel switched Debug(3, "video: new stream %dms\n", GetMsTicks() - VideoSwitch); if (atomic_read(&stream->PacketsFilled) >= VIDEO_PACKET_MAX - 1) { Debug(3, "video: new video stream lost\n"); @@ -2181,12 +2127,12 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size) stream->InvalidPesCounter = 0; } // 0xBE, filler, padding stream - if (data[3] == PES_PADDING_STREAM) { // from DVD plugin + if (data[3] == PES_PADDING_STREAM) { // from DVD plugin return size; } - n = data[8]; // header size - if (size <= 9 + n) { // wrong size + n = data[8]; // header size + if (size <= 9 + n) { // wrong size if (size == 9 + n) { Warning(_("[softhddev] empty video packet\n")); } else { @@ -2201,8 +2147,8 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size) } #ifdef USE_SOFTLIMIT // soft limit buffer full - if (AudioSyncStream == stream && atomic_read(&stream->PacketsFilled) > 3 - && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) { + if (AudioSyncStream == stream && atomic_read(&stream->PacketsFilled) > 3 && + AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) { return 0; } #endif @@ -2210,23 +2156,20 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size) pts = AV_NOPTS_VALUE; dts = AV_NOPTS_VALUE; if ((data[7] & 0xc0) == 0x80) { - pts = - (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] & - 0xFE) >> 1; + pts = (int64_t)(data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | + (data[13] & 0xFE) >> 1; } if ((data[7] & 0xC0) == 0xc0) { - pts = - (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] & - 0xFE) >> 1; - dts = - (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | data[17] << 7 | (data[18] & - 0xFE) >> 1; + pts = (int64_t)(data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | + (data[13] & 0xFE) >> 1; + dts = (int64_t)(data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | data[17] << 7 | + (data[18] & 0xFE) >> 1; } check = data + 9 + n; l = size - 9 - n; z = 0; - while (!*check) { // count leading zeros + while (!*check) { // count leading zeros if (l < 3) { // Warning(_("[softhddev] empty video packet %d bytes\n"), size); z = 0; @@ -2260,9 +2203,9 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size) #ifdef H264_EOS_TRICKSPEED // this should improve ffwd+frew, but produce crash in ffmpeg // with some streams - if (stream->TrickSpeed && pts != (int64_t) AV_NOPTS_VALUE) { + if (stream->TrickSpeed && pts != (int64_t)AV_NOPTS_VALUE) { // H264 NAL End of Sequence - static uint8_t seq_end_h264[] = { 0x00, 0x00, 0x00, 0x01, 0x0A }; + static uint8_t seq_end_h264[] = {0x00, 0x00, 0x00, 0x01, 0x0A}; // 1-5=SLICE 6=SEI 7=SPS 8=PPS // NAL SPS sequence parameter set @@ -2370,10 +2313,7 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size) ** ** @todo FIXME: combine the 5 ifs at start of the function */ -int PlayVideo(const uint8_t * data, int size) -{ - return PlayVideo3(MyVideoStream, data, size); -} +int PlayVideo(const uint8_t *data, int size) { return PlayVideo3(MyVideoStream, data, size); } /// call VDR support function extern uint8_t *CreateJpeg(uint8_t *, int *, int, int, int); @@ -2383,17 +2323,16 @@ extern uint8_t *CreateJpeg(uint8_t *, int *, int, int, int); /** ** Create a jpeg image in memory. ** -** @param image raw RGB image -** @param raw_size size of raw image -** @param size[out] size of jpeg image -** @param quality jpeg quality -** @param width number of horizontal pixels in image -** @param height number of vertical pixels in image +** @param image raw RGB image +** @param raw_size size of raw image +** @param size[out] size of jpeg image +** @param quality jpeg quality +** @param width number of horizontal pixels in image +** @param height number of vertical pixels in image ** ** @returns allocated jpeg image. */ -uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, int width, int height) -{ +uint8_t *CreateJpeg(uint8_t *image, int raw_size, int *size, int quality, int width, int height) { struct jpeg_compress_struct cinfo; struct jpeg_error_mgr jerr; JSAMPROW row_ptr[1]; @@ -2440,15 +2379,14 @@ uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, int w ** @param width number of horizontal pixels in the frame ** @param height number of vertical pixels in the frame */ -uint8_t *GrabImage(int *size, int jpeg, int quality, int width, int height) -{ +uint8_t *GrabImage(int *size, int jpeg, int quality, int width, int height) { if (jpeg) { uint8_t *image; int raw_size; raw_size = 0; image = VideoGrab(&raw_size, &width, &height, 0); - if (image) { // can fail, suspended, ... + if (image) { // can fail, suspended, ... uint8_t *jpg_image; jpg_image = CreateJpeg(image, size, quality, width, height); @@ -2466,18 +2404,17 @@ uint8_t *GrabImage(int *size, int jpeg, int quality, int width, int height) /** ** Set play mode, called on channel switch. ** -** @param play_mode play mode (none, video+audio, audio-only, ...) +** @param play_mode play mode (none, video+audio, audio-only, ...) */ -int SetPlayMode(int play_mode) -{ +int SetPlayMode(int play_mode) { Debug(3, "Set Playmode %d\n", play_mode); switch (play_mode) { - case 0: // audio/video from decoder + case 0: // audio/video from decoder // tell video parser we get new stream if (MyVideoStream->Decoder && !MyVideoStream->SkipStream) { // clear buffers on close configured always or replay only if (ConfigVideoClearOnSwitch || MyVideoStream->ClearClose) { - Clear(); // flush all buffers + Clear(); // flush all buffers MyVideoStream->ClearClose = 0; } if (MyVideoStream->CodecID != AV_CODEC_ID_NONE) { @@ -2492,23 +2429,23 @@ int SetPlayMode(int play_mode) #endif } } - if (MyAudioDecoder) { // tell audio parser we have new stream + if (MyAudioDecoder) { // tell audio parser we have new stream if (AudioCodecID != AV_CODEC_ID_NONE) { NewAudioStream = 1; } } break; - case 1: // audio/video from player + case 1: // audio/video from player VideoDisplayWakeup(); Play(); break; - case 2: // audio only from player, video from decoder - case 3: // audio only from player, no video (black screen) + case 2: // audio only from player, video from decoder + case 3: // audio only from player, no video (black screen) Debug(3, "softhddev: FIXME: audio only, silence video errors\n"); VideoDisplayWakeup(); Play(); break; - case 4: // video only from player, audio from decoder + case 4: // video only from player, audio from decoder VideoDisplayWakeup(); Play(); break; @@ -2520,8 +2457,7 @@ int SetPlayMode(int play_mode) ** Gets the current System Time Counter, which can be used to ** synchronize audio, video and subtitles. */ -int64_t GetSTC(void) -{ +int64_t GetSTC(void) { if (MyVideoStream->HwDecoder) { return VideoGetClock(MyVideoStream->HwDecoder); } @@ -2533,12 +2469,11 @@ int64_t GetSTC(void) /** ** Get video stream size and aspect. ** -** @param width[OUT] width of video stream -** @param height[OUT] height of video stream -** @param aspect[OUT] aspect ratio (4/3, 16/9, ...) of video stream +** @param width[OUT] width of video stream +** @param height[OUT] height of video stream +** @param aspect[OUT] aspect ratio (4/3, 16/9, ...) of video stream */ -void GetVideoSize(int *width, int *height, double *aspect) -{ +void GetVideoSize(int *width, int *height, double *aspect) { #ifdef DEBUG static int done_width; static int done_height; @@ -2552,7 +2487,7 @@ void GetVideoSize(int *width, int *height, double *aspect) } else { *width = 0; *height = 0; - *aspect = 1.0; // like default cDevice::GetVideoSize + *aspect = 1.0; // like default cDevice::GetVideoSize } #ifdef DEBUG @@ -2572,8 +2507,7 @@ void GetVideoSize(int *width, int *height, double *aspect) ** ** @param speed trick speed */ -void TrickSpeed(int speed) -{ +void TrickSpeed(int speed) { MyVideoStream->TrickSpeed = speed; if (MyVideoStream->HwDecoder) { VideoSetTrickSpeed(MyVideoStream->HwDecoder, speed); @@ -2588,15 +2522,14 @@ void TrickSpeed(int speed) /** ** Clears all video and audio data from the device. */ -void Clear(void) -{ +void Clear(void) { int i; - VideoResetPacket(MyVideoStream); // terminate work + VideoResetPacket(MyVideoStream); // terminate work MyVideoStream->ClearBuffers = 1; if (!SkipAudio) { AudioFlushBuffers(); - //NewAudioStream = 1; + // NewAudioStream = 1; } // FIXME: audio avcodec_flush_buffers, video is done by VideoClearBuffers @@ -2611,9 +2544,8 @@ void Clear(void) /** ** Sets the device into play mode. */ -void Play(void) -{ - TrickSpeed(0); // normal play +void Play(void) { + TrickSpeed(0); // normal play SkipAudio = 0; AudioPlay(); } @@ -2621,8 +2553,7 @@ void Play(void) /** ** Sets the device into "freeze frame" mode. */ -void Freeze(void) -{ +void Freeze(void) { StreamFreezed = 1; MyVideoStream->Freezed = 1; AudioPause(); @@ -2631,11 +2562,10 @@ void Freeze(void) /** ** Turns off audio while replaying. */ -void Mute(void) -{ +void Mute(void) { SkipAudio = 1; AudioFlushBuffers(); - //AudioSetVolume(0); + // AudioSetVolume(0); } /** @@ -2644,13 +2574,12 @@ void Mute(void) ** @param data pes frame data ** @param size number of bytes in frame */ -void StillPicture(const uint8_t * data, int size) -{ - static uint8_t seq_end_mpeg[] = { 0x00, 0x00, 0x01, 0xB7 }; +void StillPicture(const uint8_t *data, int size) { + static uint8_t seq_end_mpeg[] = {0x00, 0x00, 0x01, 0xB7}; // H264 NAL End of Sequence - static uint8_t seq_end_h264[] = { 0x00, 0x00, 0x00, 0x01, 0x0A }; + static uint8_t seq_end_h264[] = {0x00, 0x00, 0x00, 0x01, 0x0A}; // H265 NAL End of Sequence - static uint8_t seq_end_h265[] = { 0x00, 0x00, 0x00, 0x01, 0x48, 0x01 }; //0x48 = end of seq 0x4a = end of stream + static uint8_t seq_end_h265[] = {0x00, 0x00, 0x00, 0x01, 0x48, 0x01}; // 0x48 = end of seq 0x4a = end of stream int i; // might be called in Suspended Mode @@ -2668,7 +2597,7 @@ void StillPicture(const uint8_t * data, int size) VideoSetTrickSpeed(MyVideoStream->HwDecoder, 1); VideoResetPacket(MyVideoStream); - VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream + VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream if (MyVideoStream->CodecID == AV_CODEC_ID_NONE) { // FIXME: should detect codec, see PlayVideo @@ -2704,24 +2633,25 @@ void StillPicture(const uint8_t * data, int size) if (!len || len + 6 > n) { if ((split[3] & 0xF0) == 0xE0) { // video only - while (!PlayVideo3(MyVideoStream, split, n)) { // feed remaining bytes + while (!PlayVideo3(MyVideoStream, split, n)) { // feed remaining bytes } } break; } if ((split[3] & 0xF0) == 0xE0) { // video only - while (!PlayVideo3(MyVideoStream, split, len + 6)) { // feed it + while (!PlayVideo3(MyVideoStream, split, len + 6)) { // feed it } } split += 6 + len; n -= 6 + len; } while (n > 6); - VideoNextPacket(MyVideoStream, MyVideoStream->CodecID); // terminate last packet - } else { // ES packet + VideoNextPacket(MyVideoStream, + MyVideoStream->CodecID); // terminate last packet + } else { // ES packet if (MyVideoStream->CodecID != AV_CODEC_ID_MPEG2VIDEO) { - VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream + VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream MyVideoStream->CodecID = AV_CODEC_ID_MPEG2VIDEO; } VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, data, size); @@ -2733,7 +2663,8 @@ void StillPicture(const uint8_t * data, int size) } else { VideoEnqueue(MyVideoStream, AV_NOPTS_VALUE, AV_NOPTS_VALUE, seq_end_mpeg, sizeof(seq_end_mpeg)); } - VideoNextPacket(MyVideoStream, MyVideoStream->CodecID); // terminate last packet + VideoNextPacket(MyVideoStream, + MyVideoStream->CodecID); // terminate last packet } // wait for empty buffers @@ -2745,7 +2676,7 @@ void StillPicture(const uint8_t * data, int size) InStillPicture = 0; #endif - VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream + VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream VideoSetTrickSpeed(MyVideoStream->HwDecoder, 0); } @@ -2763,8 +2694,7 @@ void StillPicture(const uint8_t * data, int size) ** @retval true if ready ** @retval false if busy */ -int Poll(int timeout) -{ +int Poll(int timeout) { // poll is only called during replay, flush buffers after replay MyVideoStream->ClearClose = 1; for (;;) { @@ -2777,8 +2707,8 @@ int Poll(int timeout) // FIXME: no video! filled = atomic_read(&MyVideoStream->PacketsFilled); // soft limit + hard limit - full = (used > AUDIO_MIN_BUFFER_FREE && filled > 3) - || AudioFreeBytes() < AUDIO_MIN_BUFFER_FREE || filled >= VIDEO_PACKET_MAX - 10; + full = (used > AUDIO_MIN_BUFFER_FREE && filled > 3) || AudioFreeBytes() < AUDIO_MIN_BUFFER_FREE || + filled >= VIDEO_PACKET_MAX - 10; if (!full || !timeout) { return !full; @@ -2788,7 +2718,7 @@ int Poll(int timeout) if (timeout < t) { t = timeout; } - usleep(t * 1000); // let display thread work + usleep(t * 1000); // let display thread work timeout -= t; } } @@ -2798,10 +2728,9 @@ int Poll(int timeout) ** ** @param timeout timeout to flush in ms */ -int Flush(int timeout) -{ +int Flush(int timeout) { if (atomic_read(&MyVideoStream->PacketsFilled)) { - if (timeout) { // let display thread work + if (timeout) { // let display thread work usleep(timeout * 1000); } return !atomic_read(&MyVideoStream->PacketsFilled); @@ -2816,12 +2745,11 @@ int Flush(int timeout) /** ** Get OSD size and aspect. ** -** @param width[OUT] width of OSD -** @param height[OUT] height of OSD -** @param aspect[OUT] aspect ratio (4/3, 16/9, ...) of OSD +** @param width[OUT] width of OSD +** @param height[OUT] height of OSD +** @param aspect[OUT] aspect ratio (4/3, 16/9, ...) of OSD */ -void GetOsdSize(int *width, int *height, double *aspect) -{ +void GetOsdSize(int *width, int *height, double *aspect) { #ifdef DEBUG static int done_width; static int done_height; @@ -2842,25 +2770,21 @@ void GetOsdSize(int *width, int *height, double *aspect) /** ** Close OSD. */ -void OsdClose(void) -{ - VideoOsdClear(); -} +void OsdClose(void) { VideoOsdClear(); } /** ** Draw an OSD pixmap. ** -** @param xi x-coordinate in argb image -** @param yi y-coordinate in argb image +** @param xi x-coordinate in argb image +** @param yi y-coordinate in argb image ** @paran height height in pixel in argb image ** @paran width width in pixel in argb image ** @param pitch pitch of argb image ** @param argb 32bit ARGB image data -** @param x x-coordinate on screen of argb image -** @param y y-coordinate on screen of argb image +** @param x x-coordinate on screen of argb image +** @param y y-coordinate on screen of argb image */ -void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, const uint8_t * argb, int x, int y) -{ +void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, const uint8_t *argb, int x, int y) { // wakeup display for showing remote learning dialog VideoDisplayWakeup(); VideoOsdDrawARGB(xi, yi, height, width, pitch, argb, x, y); @@ -2871,27 +2795,33 @@ void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, const uint8_t /** ** Return command line help string. */ -const char *CommandLineHelp(void) -{ +const char *CommandLineHelp(void) { return " -a device\taudio device (fe. alsa: hw:0,0 oss: /dev/dsp)\n" - " -p device\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n" - " -c channel\taudio mixer channel name (fe. PCM)\n" " -d display\tdisplay of x11 server (fe. :0.0)\n" - " -f\t\tstart with fullscreen window (only with window manager)\n" - " -g geometry\tx11 window geometry wxh+x+y\n" " -r Refresh\tRefreshrate for DRM (default is 50 Hz)\n" - " -C Connector\tConnector for DRM (default is current Connector)\n" - " -S shader\tShader to use.\n\t\tOnly with placebo. Can be repeated for more shaders\n" - " -v device\tvideo driver device (cuvid)\n" " -s\t\tstart in suspended mode\n" - " -x\t\tstart x11 server, with -xx try to connect, if this fails\n" - " -X args\tX11 server arguments (f.e. -nocursor)\n" " -w workaround\tenable/disable workarounds\n" - "\tno-hw-decoder\t\tdisable hw decoder, use software decoder only\n" - "\tno-mpeg-hw-decoder\tdisable hw decoder for mpeg only\n" - "\tstill-hw-decoder\tenable hardware decoder for still-pictures\n" - "\tstill-h264-hw-decoder\tenable h264 hw decoder for still-pictures\n" - "\talsa-driver-broken\tdisable broken alsa driver message\n" - "\talsa-no-close-open\tdisable close open to fix alsa no sound bug\n" - "\talsa-close-open-delay\tenable close open delay to fix no sound bug\n" - "\tignore-repeat-pict\tdisable repeat pict message\n" - "\tuse-possible-defect-frames prefer faster channel switch\n" " -D\t\tstart in detached mode\n"; + " -p device\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n" + " -c channel\taudio mixer channel name (fe. PCM)\n" + " -d display\tdisplay of x11 server (fe. :0.0)\n" + " -f\t\tstart with fullscreen window (only with window manager)\n" + " -g geometry\tx11 window geometry wxh+x+y\n" + " -r Refresh\tRefreshrate for DRM (default is 50 Hz)\n" + " -C Connector\tConnector for DRM (default is current Connector)\n" + " -S shader\tShader to use.\n\t\tOnly with placebo. Can be repeated " + "for more shaders\n" + " -v device\tvideo driver device (cuvid)\n" + " -s\t\tstart in suspended mode\n" + " -x\t\tstart x11 server, with -xx try to connect, if this fails\n" + " -X args\tX11 server arguments (f.e. -nocursor)\n" + " -w workaround\tenable/disable workarounds\n" + "\tno-hw-decoder\t\tdisable hw decoder, use software decoder only\n" + "\tno-mpeg-hw-decoder\tdisable hw decoder for mpeg only\n" + "\tstill-hw-decoder\tenable hardware decoder for still-pictures\n" + "\tstill-h264-hw-decoder\tenable h264 hw decoder for still-pictures\n" + "\talsa-driver-broken\tdisable broken alsa driver message\n" + "\talsa-no-close-open\tdisable close open to fix alsa no sound bug\n" + "\talsa-close-open-delay\tenable close open delay to fix no sound " + "bug\n" + "\tignore-repeat-pict\tdisable repeat pict message\n" + "\tuse-possible-defect-frames prefer faster channel switch\n" + " -D\t\tstart in detached mode\n"; } /** @@ -2900,10 +2830,9 @@ const char *CommandLineHelp(void) ** @param argc number of arguments ** @param argv arguments vector */ -int ProcessArgs(int argc, char *const argv[]) -{ +int ProcessArgs(int argc, char *const argv[]) { // - // Parse arguments. + // Parse arguments. // #ifdef __FreeBSD__ if (!strcmp(*argv, "softhddevice")) { @@ -2914,56 +2843,56 @@ int ProcessArgs(int argc, char *const argv[]) for (;;) { switch (getopt(argc, argv, "-a:c:C:r:d:fg:p:S:sv:w:xDX:")) { - case 'a': // audio device for pcm + case 'a': // audio device for pcm AudioSetDevice(optarg); continue; - case 'c': // channel of audio mixer + case 'c': // channel of audio mixer AudioSetChannel(optarg); continue; - case 'C': // Connector for DRM + case 'C': // Connector for DRM VideoSetConnector(optarg); continue; - case 'r': // Connector for DRM + case 'r': // Connector for DRM VideoSetRefresh(optarg); continue; - case 'S': // Shader + case 'S': // Shader if (VideoSetShader(optarg) < 0) { - fprintf(stderr,_("Too much shaders definded\n")); + fprintf(stderr, _("Too much shaders definded\n")); return 0; } continue; - case 'p': // pass-through audio device + case 'p': // pass-through audio device AudioSetPassthroughDevice(optarg); continue; - case 'd': // x11 display name + case 'd': // x11 display name X11DisplayName = optarg; continue; - case 'f': // fullscreen mode + case 'f': // fullscreen mode ConfigFullscreen = 1; continue; - case 'g': // geometry + case 'g': // geometry if (VideoSetGeometry(optarg) < 0) { - fprintf(stderr, - _("Bad formated geometry please use: [=][{xX}][{+-}{+-}]\n")); + fprintf(stderr, _("Bad formated geometry please use: " + "[=][{xX}][{+-}{+-}]\n")); return 0; } continue; - case 'v': // video driver + case 'v': // video driver VideoSetDevice(optarg); continue; - case 'x': // x11 server + case 'x': // x11 server ConfigStartX11Server++; continue; - case 'X': // x11 server arguments + case 'X': // x11 server arguments X11ServerArguments = optarg; continue; - case 's': // start in suspend mode + case 's': // start in suspend mode ConfigStartSuspended = 1; continue; - case 'D': // start in detached mode + case 'D': // start in detached mode ConfigStartSuspended = -1; continue; - case 'w': // workarounds + case 'w': // workarounds if (!strcasecmp("no-hw-decoder", optarg)) { VideoHardwareDecoder = 0; } else if (!strcasecmp("no-mpeg-hw-decoder", optarg)) { @@ -3018,22 +2947,21 @@ int ProcessArgs(int argc, char *const argv[]) #include #include -#define XSERVER_MAX_ARGS 512 ///< how many arguments support +#define XSERVER_MAX_ARGS 512 ///< how many arguments support #ifndef __FreeBSD__ -static const char *X11Server = "/usr/bin/X"; ///< default x11 server +static const char *X11Server = "/usr/bin/X"; ///< default x11 server #else -static const char *X11Server = LOCALBASE "/bin/X"; ///< default x11 server +static const char *X11Server = LOCALBASE "/bin/X"; ///< default x11 server #endif -static pid_t X11ServerPid; ///< x11 server pid +static pid_t X11ServerPid; ///< x11 server pid /** ** USR1 signal handler. ** -** @param sig signal number +** @param sig signal number */ -static void Usr1Handler(int __attribute__((unused)) sig) -{ +static void Usr1Handler(int __attribute__((unused)) sig) { ++Usr1Signal; Debug(3, "x-setup: got signal usr1\n"); @@ -3042,8 +2970,7 @@ static void Usr1Handler(int __attribute__((unused)) sig) /** ** Start the X server */ -static void StartXServer(void) -{ +static void StartXServer(void) { struct sigaction usr1; pid_t pid; const char *sval; @@ -3062,7 +2989,7 @@ static void StartXServer(void) } argn = 1; - if (X11DisplayName) { // append display name + if (X11DisplayName) { // append display name args[argn++] = X11DisplayName; // export display for childs setenv("DISPLAY", X11DisplayName, 1); @@ -3098,7 +3025,7 @@ static void StartXServer(void) Debug(3, "x-setup: Starting X server '%s' '%s'\n", args[0], X11ServerArguments); // fork - if ((pid = fork())) { // parent + if ((pid = fork())) { // parent X11ServerPid = pid; Debug(3, "x-setup: Started x-server pid=%d\n", X11ServerPid); @@ -3106,14 +3033,14 @@ static void StartXServer(void) return; } // child - signal(SIGUSR1, SIG_IGN); // ignore to force answer - //setpgid(0,getpid()); + signal(SIGUSR1, SIG_IGN); // ignore to force answer + // setpgid(0,getpid()); setpgid(pid, 0); // close all open file-handles maxfd = sysconf(_SC_OPEN_MAX); - for (fd = 3; fd < maxfd; fd++) { // keep stdin, stdout, stderr - close(fd); // vdr should open with O_CLOEXEC + for (fd = 3; fd < maxfd; fd++) { // keep stdin, stdout, stderr + close(fd); // vdr should open with O_CLOEXEC } // start the X server @@ -3126,8 +3053,7 @@ static void StartXServer(void) /** ** Exit + cleanup. */ -void SoftHdDeviceExit(void) -{ +void SoftHdDeviceExit(void) { // lets hope that vdr does a good thread cleanup AudioExit(); @@ -3154,7 +3080,7 @@ void SoftHdDeviceExit(void) kill(X11ServerPid, SIGTERM); waittime = 0; - timeout = 500; // 0.5s + timeout = 500; // 0.5s // wait for x11 finishing, with timeout do { wpid = waitpid(X11ServerPid, &status, WNOHANG); @@ -3186,12 +3112,11 @@ void SoftHdDeviceExit(void) /** ** Prepare plugin. ** -** @retval 0 normal start -** @retval 1 suspended start -** @retval -1 detached start +** @retval 0 normal start +** @retval 1 suspended start +** @retval -1 detached start */ -int Start(void) -{ +int Start(void) { if (ConfigStartX11Server) { StartXServer(); } @@ -3224,7 +3149,7 @@ int Start(void) PesInit(PesDemuxAudio); #endif Info(_("[softhddev] ready%s\n"), - ConfigStartSuspended ? ConfigStartSuspended == -1 ? " detached" : " suspended" : ""); + ConfigStartSuspended ? ConfigStartSuspended == -1 ? " detached" : " suspended" : ""); return ConfigStartSuspended; } @@ -3234,8 +3159,7 @@ int Start(void) ** ** @note stop everything, but don't cleanup, module is still called. */ -void Stop(void) -{ +void Stop(void) { #ifdef DEBUG Debug(3, "video: max used PES packet size: %d\n", VideoMaxPacketSize); #endif @@ -3244,13 +3168,12 @@ void Stop(void) /** ** Perform any cleanup or other regular tasks. */ -void Housekeeping(void) -{ +void Housekeeping(void) { // - // when starting an own X11 server fails, try to connect to a already - // running X11 server. This can take some time. + // when starting an own X11 server fails, try to connect to a already + // running X11 server. This can take some time. // - if (X11ServerPid) { // check if X11 server still running + if (X11ServerPid) { // check if X11 server still running pid_t wpid; int status; @@ -3274,11 +3197,10 @@ void Housekeeping(void) /** ** Main thread hook, periodic called from main thread. */ -void MainThreadHook(void) -{ - if (Usr1Signal) { // x11 server ready +void MainThreadHook(void) { + if (Usr1Signal) { // x11 server ready // FIYME: x11 server keeps sending sigusr1 signals - signal(SIGUSR1, SIG_IGN); // ignore further signals + signal(SIGUSR1, SIG_IGN); // ignore further signals Usr1Signal = 0; StartVideo(); VideoDisplayWakeup(); @@ -3299,10 +3221,9 @@ extern void DelPip(void); ** @param audio suspend closes audio ** @param dox11 suspend closes x11 server */ -void Suspend(int video, int audio, int dox11) -{ +void Suspend(int video, int audio, int dox11) { pthread_mutex_lock(&SuspendLockMutex); - if (MyVideoStream->SkipStream && SkipAudio) { // already suspended + if (MyVideoStream->SkipStream && SkipAudio) { // already suspended pthread_mutex_unlock(&SuspendLockMutex); return; } @@ -3310,7 +3231,7 @@ void Suspend(int video, int audio, int dox11) Debug(3, "[softhddev]%s:\n", __FUNCTION__); #ifdef USE_PIP - DelPip(); // must stop PIP + DelPip(); // must stop PIP #endif // FIXME: should not be correct, if not both are suspended! @@ -3342,8 +3263,7 @@ void Suspend(int video, int audio, int dox11) /** ** Resume plugin. */ -void Resume(void) -{ +void Resume(void) { if (!MyVideoStream->SkipStream && !SkipAudio) { // we are not suspended return; } @@ -3353,10 +3273,10 @@ void Resume(void) pthread_mutex_lock(&SuspendLockMutex); // FIXME: start x11 - if (!MyVideoStream->HwDecoder) { // video not running + if (!MyVideoStream->HwDecoder) { // video not running StartVideo(); } - if (!MyAudioDecoder) { // audio not running + if (!MyAudioDecoder) { // audio not running // StartAudio(); AudioInit(); av_new_packet(AudioAvPkt, AUDIO_BUFFER_SIZE); @@ -3376,14 +3296,13 @@ void Resume(void) /* ** Get decoder statistics. ** -** @param[out] missed missed frames -** @param[out] duped duped frames +** @param[out] missed missed frames +** @param[out] duped duped frames ** @param[out] dropped dropped frames -** @param[out] count number of decoded frames +** @param[out] count number of decoded frames */ void GetStats(int *missed, int *duped, int *dropped, int *counter, float *frametime, int *width, int *height, - int *color, int *eotf) -{ + int *color, int *eotf) { *missed = 0; *duped = 0; *dropped = 0; @@ -3395,24 +3314,26 @@ void GetStats(int *missed, int *duped, int *dropped, int *counter, float *framet *eotf = 0; if (MyVideoStream->HwDecoder) { VideoGetStats(MyVideoStream->HwDecoder, missed, duped, dropped, counter, frametime, width, height, color, - eotf); + eotf); } } /** ** Scale the currently shown video. ** -** @param x video window x coordinate OSD relative -** @param y video window y coordinate OSD relative +** @param x video window x coordinate OSD relative +** @param y video window y coordinate OSD relative ** @param width video window width OSD relative ** @param height video window height OSD relative */ -void ScaleVideo(int x, int y, int width, int height) -{ +void ScaleVideo(int x, int y, int width, int height) { #ifdef USE_PIP if (PiPActive && !(x & y & width & height)) { Info("[softhddev]%s: fullscreen with PiP active.\n", __FUNCTION__); - x = mwx; y = mwy; width = mww; height = mwh; + x = mwx; + y = mwy; + width = mww; + height = mwh; } #endif if (MyVideoStream->HwDecoder) { @@ -3429,23 +3350,22 @@ void ScaleVideo(int x, int y, int width, int height) /** ** Set PIP position. ** -** @param x video window x coordinate OSD relative -** @param y video window y coordinate OSD relative -** @param width video window width OSD relative -** @param height video window height OSD relative -** @param pip_x pip window x coordinate OSD relative -** @param pip_y pip window y coordinate OSD relative -** @param pip_width pip window width OSD relative -** @param pip_height pip window height OSD relative +** @param x video window x coordinate OSD relative +** @param y video window y coordinate OSD relative +** @param width video window width OSD relative +** @param height video window height OSD relative +** @param pip_x pip window x coordinate OSD relative +** @param pip_y pip window y coordinate OSD relative +** @param pip_width pip window width OSD relative +** @param pip_height pip window height OSD relative */ -void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height) -{ - if (!MyVideoStream->HwDecoder) { // video not running +void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height) { + if (!MyVideoStream->HwDecoder) { // video not running return; } ScaleVideo(x, y, width, height); - if (!PipVideoStream->HwDecoder) { // pip not running + if (!PipVideoStream->HwDecoder) { // pip not running return; } VideoSetOutputPosition(PipVideoStream->HwDecoder, pip_x, pip_y, pip_width, pip_height); @@ -3454,18 +3374,17 @@ void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, i /** ** Start PIP stream. ** -** @param x video window x coordinate OSD relative -** @param y video window y coordinate OSD relative -** @param width video window width OSD relative -** @param height video window height OSD relative -** @param pip_x pip window x coordinate OSD relative -** @param pip_y pip window y coordinate OSD relative -** @param pip_width pip window width OSD relative -** @param pip_height pip window height OSD relative +** @param x video window x coordinate OSD relative +** @param y video window y coordinate OSD relative +** @param width video window width OSD relative +** @param height video window height OSD relative +** @param pip_x pip window x coordinate OSD relative +** @param pip_y pip window y coordinate OSD relative +** @param pip_width pip window width OSD relative +** @param pip_height pip window height OSD relative */ -void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height) -{ - if (!MyVideoStream->HwDecoder) { // video not running +void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height) { + if (!MyVideoStream->HwDecoder) { // video not running return; } @@ -3473,23 +3392,28 @@ void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, int pip VideoStreamOpen(PipVideoStream); } PipSetPosition(x, y, width, height, pip_x, pip_y, pip_width, pip_height); - mwx = x; mwy = y; mww = width; mwh = height; + mwx = x; + mwy = y; + mww = width; + mwh = height; PiPActive = 1; } /** ** Stop PIP. */ -void PipStop(void) -{ +void PipStop(void) { int i; - if (!MyVideoStream->HwDecoder) { // video not running + if (!MyVideoStream->HwDecoder) { // video not running return; } PiPActive = 0; - mwx = 0; mwy = 0; mww = 0; mwh = 0; + mwx = 0; + mwy = 0; + mww = 0; + mwh = 0; ScaleVideo(0, 0, 0, 0); PipVideoStream->Close = 1; @@ -3507,9 +3431,6 @@ void PipStop(void) ** ** @return number of bytes used, 0 if internal buffer are full. */ -int PipPlayVideo(const uint8_t * data, int size) -{ - return PlayVideo3(PipVideoStream, data, size); -} +int PipPlayVideo(const uint8_t *data, int size) { return PlayVideo3(PipVideoStream, data, size); } #endif diff --git a/softhddev.h b/softhddev.h index 154212a..e6aea21 100644 --- a/softhddev.h +++ b/softhddev.h @@ -1,7 +1,7 @@ /// -/// @file softhddev.h @brief software HD device plugin header file. +/// @file softhddev.h @brief software HD device plugin header file. /// -/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2011 - 2015 by Johns. All Rights Reserved. /// /// Contributor(s): /// @@ -21,95 +21,94 @@ ////////////////////////////////////////////////////////////////////////////// #ifdef __cplusplus -extern "C" -{ +extern "C" { #endif - /// C callback feed key press - extern void FeedKeyPress(const char *, const char *, int, int, const char *); +/// C callback feed key press +extern void FeedKeyPress(const char *, const char *, int, int, const char *); - /// C plugin get osd size and ascpect - extern void GetOsdSize(int *, int *, double *); +/// C plugin get osd size and ascpect +extern void GetOsdSize(int *, int *, double *); - /// C plugin close osd - extern void OsdClose(void); - /// C plugin draw osd pixmap - extern void OsdDrawARGB(int, int, int, int, int, const uint8_t *, int, int); +/// C plugin close osd +extern void OsdClose(void); +/// C plugin draw osd pixmap +extern void OsdDrawARGB(int, int, int, int, int, const uint8_t *, int, int); - /// C plugin play audio packet - extern int PlayAudio(const uint8_t *, int, uint8_t); - /// C plugin play TS audio packet - extern int PlayTsAudio(const uint8_t *, int); - /// C plugin set audio volume - extern void SetVolumeDevice(int); - /// C plugin reset channel id (restarts audio) - extern void ResetChannelId(void); +/// C plugin play audio packet +extern int PlayAudio(const uint8_t *, int, uint8_t); +/// C plugin play TS audio packet +extern int PlayTsAudio(const uint8_t *, int); +/// C plugin set audio volume +extern void SetVolumeDevice(int); +/// C plugin reset channel id (restarts audio) +extern void ResetChannelId(void); - /// C plugin play video packet - extern int PlayVideo(const uint8_t *, int); - /// C plugin play TS video packet - extern void PlayTsVideo(const uint8_t *, int); - /// C plugin grab an image - extern uint8_t *GrabImage(int *, int, int, int, int); +/// C plugin play video packet +extern int PlayVideo(const uint8_t *, int); +/// C plugin play TS video packet +extern void PlayTsVideo(const uint8_t *, int); +/// C plugin grab an image +extern uint8_t *GrabImage(int *, int, int, int, int); - /// C plugin set play mode - extern int SetPlayMode(int); - /// C plugin get current system time counter - extern int64_t GetSTC(void); - /// C plugin get video stream size and aspect - extern void GetVideoSize(int *, int *, double *); - /// C plugin set trick speed - extern void TrickSpeed(int); - /// C plugin clears all video and audio data from the device - extern void Clear(void); - /// C plugin sets the device into play mode - extern void Play(void); - /// C plugin sets the device into "freeze frame" mode - extern void Freeze(void); - /// C plugin mute audio - extern void Mute(void); - /// C plugin display I-frame as a still picture. - extern void StillPicture(const uint8_t *, int); - /// C plugin poll if ready - extern int Poll(int); - /// C plugin flush output buffers - extern int Flush(int); +/// C plugin set play mode +extern int SetPlayMode(int); +/// C plugin get current system time counter +extern int64_t GetSTC(void); +/// C plugin get video stream size and aspect +extern void GetVideoSize(int *, int *, double *); +/// C plugin set trick speed +extern void TrickSpeed(int); +/// C plugin clears all video and audio data from the device +extern void Clear(void); +/// C plugin sets the device into play mode +extern void Play(void); +/// C plugin sets the device into "freeze frame" mode +extern void Freeze(void); +/// C plugin mute audio +extern void Mute(void); +/// C plugin display I-frame as a still picture. +extern void StillPicture(const uint8_t *, int); +/// C plugin poll if ready +extern int Poll(int); +/// C plugin flush output buffers +extern int Flush(int); - /// C plugin command line help - extern const char *CommandLineHelp(void); - /// C plugin process the command line arguments - extern int ProcessArgs(int, char *const[]); +/// C plugin command line help +extern const char *CommandLineHelp(void); +/// C plugin process the command line arguments +extern int ProcessArgs(int, char *const[]); - /// C plugin exit + cleanup - extern void SoftHdDeviceExit(void); - /// C plugin start code - extern int Start(void); - /// C plugin stop code - extern void Stop(void); - /// C plugin house keeping - extern void Housekeeping(void); - /// C plugin main thread hook - extern void MainThreadHook(void); +/// C plugin exit + cleanup +extern void SoftHdDeviceExit(void); +/// C plugin start code +extern int Start(void); +/// C plugin stop code +extern void Stop(void); +/// C plugin house keeping +extern void Housekeeping(void); +/// C plugin main thread hook +extern void MainThreadHook(void); - /// Suspend plugin - extern void Suspend(int, int, int); - /// Resume plugin - extern void Resume(void); +/// Suspend plugin +extern void Suspend(int, int, int); +/// Resume plugin +extern void Resume(void); - /// Get decoder statistics - extern void GetStats(int *, int *, int *, int *, float *, int *, int *, int *, int *); - /// C plugin scale video - extern void ScaleVideo(int, int, int, int); +/// Get decoder statistics +extern void GetStats(int *, int *, int *, int *, float *, int *, int *, int *, int *); +/// C plugin scale video +extern void ScaleVideo(int, int, int, int); - /// Set Pip position - extern void PipSetPosition(int, int, int, int, int, int, int, int); - /// Pip start - extern void PipStart(int, int, int, int, int, int, int, int); - /// Pip stop - extern void PipStop(void); - /// Pip play video packet - extern int PipPlayVideo(const uint8_t *, int); +/// Set Pip position +extern void PipSetPosition(int, int, int, int, int, int, int, int); +/// Pip start +extern void PipStart(int, int, int, int, int, int, int, int); +/// Pip stop +extern void PipStop(void); +/// Pip play video packet +extern int PipPlayVideo(const uint8_t *, int); - extern const char *X11DisplayName; ///< x11 display name +extern const char *X11DisplayName; ///< x11 display name #ifdef __cplusplus } #endif diff --git a/softhddevice.h b/softhddevice.h index 45906a1..c75885c 100644 --- a/softhddevice.h +++ b/softhddevice.h @@ -1,7 +1,7 @@ /// /// @file softhddevice.h @brief software HD device plugin header file. /// -/// Copyright (c) 2011, 2014 by Johns. All Rights Reserved. +/// Copyright (c) 2011, 2014 by Johns. All Rights Reserved. /// /// Contributor(s): /// diff --git a/softhddevice_service.h b/softhddevice_service.h index 7794f83..1a25c72 100644 --- a/softhddevice_service.h +++ b/softhddevice_service.h @@ -1,7 +1,7 @@ /// /// @file softhddev_service.h @brief software HD device service header file. /// -/// Copyright (c) 2012 by durchflieger. All Rights Reserved. +/// Copyright (c) 2012 by durchflieger. All Rights Reserved. /// /// Contributor(s): /// @@ -22,15 +22,13 @@ #pragma once -#define ATMO_GRAB_SERVICE "SoftHDDevice-AtmoGrabService-v1.0" -#define ATMO1_GRAB_SERVICE "SoftHDDevice-AtmoGrabService-v1.1" -#define OSD_3DMODE_SERVICE "SoftHDDevice-Osd3DModeService-v1.0" +#define ATMO_GRAB_SERVICE "SoftHDDevice-AtmoGrabService-v1.0" +#define ATMO1_GRAB_SERVICE "SoftHDDevice-AtmoGrabService-v1.1" +#define OSD_3DMODE_SERVICE "SoftHDDevice-Osd3DModeService-v1.0" -enum -{ GRAB_IMG_RGBA_FORMAT_B8G8R8A8 }; +enum { GRAB_IMG_RGBA_FORMAT_B8G8R8A8 }; -typedef struct -{ +typedef struct { int structSize; // request data @@ -45,13 +43,11 @@ typedef struct void *img; } SoftHDDevice_AtmoGrabService_v1_0_t; -typedef struct -{ +typedef struct { int Mode; } SoftHDDevice_Osd3DModeService_v1_0_t; -typedef struct -{ +typedef struct { // request/reply data int width; diff --git a/video.c b/video.c index 93f9a81..6f3be7f 100644 --- a/video.c +++ b/video.c @@ -1,7 +1,7 @@ /// /// @file video.c @brief Video module /// -/// Copyright (c) 2009 - 2015 by Johns. All Rights Reserved. +/// Copyright (c) 2009 - 2015 by Johns. All Rights Reserved. /// /// Contributor(s): /// @@ -38,51 +38,51 @@ /// @todo FIXME: use vaErrorStr for all VA-API errors. /// -#define USE_XLIB_XCB ///< use xlib/xcb backend -#define noUSE_SCREENSAVER ///< support disable screensaver +#define USE_XLIB_XCB ///< use xlib/xcb backend +#define noUSE_SCREENSAVER ///< support disable screensaver -#define USE_GRAB ///< experimental grab code -// #define USE_GLX ///< outdated GLX code -#define USE_DOUBLEBUFFER ///< use GLX double buffers -#define USE_CUVID ///< enable cuvid support -// #define AV_INFO ///< log a/v sync informations +#define USE_GRAB ///< experimental grab code +// #define USE_GLX ///< outdated GLX code +#define USE_DOUBLEBUFFER ///< use GLX double buffers +#define USE_CUVID ///< enable cuvid support +// #define AV_INFO ///< log a/v sync informations #ifndef AV_INFO_TIME -#define AV_INFO_TIME (50 * 60) ///< a/v info every minute +#define AV_INFO_TIME (50 * 60) ///< a/v info every minute #endif -#define USE_VIDEO_THREAD ///< run decoder in an own thread +#define USE_VIDEO_THREAD ///< run decoder in an own thread -#include -#include +#include #include #include -#include +#include +#include -#include /* File Control Definitions */ -#include /* POSIX Terminal Control Definitions */ -#include /* UNIX Standard Definitions */ -#include /* ERROR Number Definitions */ -#include /* ioctl() */ +#include /* ERROR Number Definitions */ +#include /* File Control Definitions */ +#include /* ioctl() */ +#include /* POSIX Terminal Control Definitions */ +#include /* UNIX Standard Definitions */ -#include +#include #include -#include #include +#include +#include #include #include -#include #include -#define _(str) gettext(str) ///< gettext shortcut -#define _N(str) str ///< gettext_noop shortcut +#define _(str) gettext(str) ///< gettext shortcut +#define _N(str) str ///< gettext_noop shortcut #ifdef USE_VIDEO_THREAD #ifndef __USE_GNU #define __USE_GNU #endif #include -#include #include +#include #ifndef HAVE_PTHREAD_NAME /// only available with newer glibc #define pthread_setname_np(thread, name) @@ -98,8 +98,8 @@ #include #ifdef USE_SCREENSAVER -#include #include +#include #endif // #include @@ -116,13 +116,12 @@ /** * @brief Action on the _NET_WM_STATE property */ -typedef enum -{ +typedef enum { /* Remove/unset property */ XCB_EWMH_WM_STATE_REMOVE = 0, /* Add/set property */ XCB_EWMH_WM_STATE_ADD = 1, - /* Toggle property */ + /* Toggle property */ XCB_EWMH_WM_STATE_TOGGLE = 2 } xcb_ewmh_wm_state_action_t; #endif @@ -134,9 +133,11 @@ typedef enum #else #include #endif +// clang-format off #include #include #include +// clang-format on #endif #include @@ -144,26 +145,28 @@ typedef enum #include #ifdef CUVID +// clang-format off #include #include #include #include "drvapi_error_string.h" +// clang-format on #define __DEVICE_TYPES_H__ #endif #ifdef VAAPI -#include #include #include +#include #ifdef RASPI #include #endif #include -#define TO_AVHW_DEVICE_CTX(x) ((AVHWDeviceContext*)x->data) -#define TO_AVHW_FRAMES_CTX(x) ((AVHWFramesContext*)x->data) -#define TO_VAAPI_DEVICE_CTX(x) ((AVVAAPIDeviceContext*)TO_AVHW_DEVICE_CTX(x)->hwctx) -#define TO_VAAPI_FRAMES_CTX(x) ((AVVAAPIFramesContext*)TO_AVHW_FRAMES_CTX(x)->hwctx) +#define TO_AVHW_DEVICE_CTX(x) ((AVHWDeviceContext *)x->data) +#define TO_AVHW_FRAMES_CTX(x) ((AVHWFramesContext *)x->data) +#define TO_VAAPI_DEVICE_CTX(x) ((AVVAAPIDeviceContext *)TO_AVHW_DEVICE_CTX(x)->hwctx) +#define TO_VAAPI_FRAMES_CTX(x) ((AVVAAPIFramesContext *)TO_AVHW_FRAMES_CTX(x)->hwctx) #endif #include @@ -196,24 +199,26 @@ typedef void *EGLImageKHR; #include #include -#if defined(YADIF) || defined (VAAPI) +#if defined(YADIF) || defined(VAAPI) #include #include #include #endif -#include "iatomic.h" // portable atomic_t +// clang-format off +#include "iatomic.h" // portable atomic_t #include "misc.h" #include "video.h" #include "audio.h" #include "codec.h" +// clang-format on #if defined(APIVERSNUM) && APIVERSNUM < 20400 #error "VDR 2.4.0 or greater is required!" #endif -#define HAS_FFMPEG_3_4_API (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57,107,100)) -#define HAS_FFMPEG_4_API (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(58,18,100)) +#define HAS_FFMPEG_3_4_API (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 107, 100)) +#define HAS_FFMPEG_4_API (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(58, 18, 100)) #if !HAS_FFMPEG_3_4_API #error "FFmpeg 3.4 or greater is required!" @@ -226,76 +231,70 @@ typedef void *EGLImageKHR; /// /// Video resolutions selector. /// -typedef enum _video_resolutions_ -{ - VideoResolution576i, ///< ...x576 interlaced - VideoResolution720p, ///< ...x720 progressive - VideoResolutionFake1080i, ///< 1280x1080 1440x1080 interlaced - VideoResolution1080i, ///< 1920x1080 interlaced - VideoResolutionUHD, /// UHD progressive - VideoResolutionMax ///< number of resolution indexs +typedef enum _video_resolutions_ { + VideoResolution576i, ///< ...x576 interlaced + VideoResolution720p, ///< ...x720 progressive + VideoResolutionFake1080i, ///< 1280x1080 1440x1080 interlaced + VideoResolution1080i, ///< 1920x1080 interlaced + VideoResolutionUHD, /// UHD progressive + VideoResolutionMax ///< number of resolution indexs } VideoResolutions; /// /// Video deinterlace modes. /// -typedef enum _video_deinterlace_modes_ -{ - VideoDeinterlaceCuda, ///< Cuda build in deinterlace - VideoDeinterlaceYadif, ///< Yadif deinterlace +typedef enum _video_deinterlace_modes_ { + VideoDeinterlaceCuda, ///< Cuda build in deinterlace + VideoDeinterlaceYadif, ///< Yadif deinterlace } VideoDeinterlaceModes; /// /// Video scaleing modes. /// -typedef enum _video_scaling_modes_ -{ - VideoScalingNormal, ///< normal scaling - VideoScalingFast, ///< fastest scaling - VideoScalingHQ, ///< high quality scaling - VideoScalingAnamorphic, ///< anamorphic scaling +typedef enum _video_scaling_modes_ { + VideoScalingNormal, ///< normal scaling + VideoScalingFast, ///< fastest scaling + VideoScalingHQ, ///< high quality scaling + VideoScalingAnamorphic, ///< anamorphic scaling } VideoScalingModes; /// /// Video zoom modes. /// -typedef enum _video_zoom_modes_ -{ - VideoNormal, ///< normal - VideoStretch, ///< stretch to all edges - VideoCenterCutOut, ///< center and cut out - VideoNone, ///< no scaling +typedef enum _video_zoom_modes_ { + VideoNormal, ///< normal + VideoStretch, ///< stretch to all edges + VideoCenterCutOut, ///< center and cut out + VideoNone, ///< no scaling } VideoZoomModes; /// /// Video color space conversions. /// -typedef enum _video_color_space_ -{ - VideoColorSpaceNone, ///< no conversion - VideoColorSpaceBt601, ///< ITU.BT-601 Y'CbCr - VideoColorSpaceBt709, ///< ITU.BT-709 HDTV Y'CbCr - VideoColorSpaceSmpte240 ///< SMPTE-240M Y'PbPr +typedef enum _video_color_space_ { + VideoColorSpaceNone, ///< no conversion + VideoColorSpaceBt601, ///< ITU.BT-601 Y'CbCr + VideoColorSpaceBt709, ///< ITU.BT-709 HDTV Y'CbCr + VideoColorSpaceSmpte240 ///< SMPTE-240M Y'PbPr } VideoColorSpace; /// /// Video output module structure and typedef. /// -typedef struct _video_module_ -{ - const char *Name; ///< video output module name - char Enabled; ///< flag output module enabled +typedef struct _video_module_ { + const char *Name; ///< video output module name + char Enabled; ///< flag output module enabled /// allocate new video hw decoder VideoHwDecoder *(*const NewHwDecoder)(VideoStream *); void (*const DelHwDecoder)(VideoHwDecoder *); unsigned (*const GetSurface)(VideoHwDecoder *, const AVCodecContext *); void (*const ReleaseSurface)(VideoHwDecoder *, unsigned); - enum AVPixelFormat (*const get_format) (VideoHwDecoder *, AVCodecContext *, const enum AVPixelFormat *); + enum AVPixelFormat (*const get_format)(VideoHwDecoder *, AVCodecContext *, const enum AVPixelFormat *); void (*const RenderFrame)(VideoHwDecoder *, const AVCodecContext *, const AVFrame *); void *(*const GetHwAccelContext)(VideoHwDecoder *); void (*const SetClock)(VideoHwDecoder *, int64_t); - int64_t(*const GetClock) (const VideoHwDecoder *); + int64_t (*const GetClock)(const VideoHwDecoder *); void (*const SetClosing)(const VideoHwDecoder *); void (*const ResetStart)(const VideoHwDecoder *); void (*const SetTrickSpeed)(const VideoHwDecoder *, int); @@ -307,18 +306,17 @@ typedef struct _video_module_ /// module display handler thread void (*const DisplayHandlerThread)(void); - void (*const OsdClear)(void); ///< clear OSD + void (*const OsdClear)(void); ///< clear OSD /// draw OSD ARGB area void (*const OsdDrawARGB)(int, int, int, int, int, const uint8_t *, int, int); - void (*const OsdInit)(int, int); ///< initialize OSD - void (*const OsdExit)(void); ///< cleanup OSD + void (*const OsdInit)(int, int); ///< initialize OSD + void (*const OsdExit)(void); ///< cleanup OSD - int (*const Init)(const char *); ///< initialize video output module - void (*const Exit)(void); ///< cleanup video output module + int (*const Init)(const char *); ///< initialize video output module + void (*const Exit)(void); ///< cleanup video output module } VideoModule; -typedef struct -{ +typedef struct { /** Left X co-ordinate. Inclusive. */ uint32_t x0; @@ -337,29 +335,29 @@ typedef struct // Defines //---------------------------------------------------------------------------- -#define CODEC_SURFACES_MAX 12 // +#define CODEC_SURFACES_MAX 12 // -#define VIDEO_SURFACES_MAX 6 ///< video output surfaces for queue +#define VIDEO_SURFACES_MAX 6 ///< video output surfaces for queue -#define NUM_SHADERS 5 // Number of supported user shaders with placebo +#define NUM_SHADERS 5 // Number of supported user shaders with placebo #if defined VAAPI && !defined RASPI #define PIXEL_FORMAT AV_PIX_FMT_VAAPI -#define SWAP_BUFFER_SIZE 3 +#define SWAP_BUFFER_SIZE 3 #endif #ifdef CUVID #define PIXEL_FORMAT AV_PIX_FMT_CUDA -#define SWAP_BUFFER_SIZE 3 +#define SWAP_BUFFER_SIZE 3 #endif #if defined RASPI #define PIXEL_FORMAT AV_PIX_FMT_MMAL -#define SWAP_BUFFER_SIZE 3 +#define SWAP_BUFFER_SIZE 3 #endif //---------------------------------------------------------------------------- // Variables //---------------------------------------------------------------------------- -AVBufferRef *HwDeviceContext; ///< ffmpeg HW device context -char VideoIgnoreRepeatPict; ///< disable repeat pict warning +AVBufferRef *HwDeviceContext; ///< ffmpeg HW device context +char VideoIgnoreRepeatPict; ///< disable repeat pict warning #ifdef RASPI int Planes = 3; @@ -369,35 +367,35 @@ int Planes = 2; unsigned char *posd; -static const char *VideoDriverName = "cuvid"; ///< video output device -static Display *XlibDisplay; ///< Xlib X11 display -static xcb_connection_t *Connection; ///< xcb connection -static xcb_colormap_t VideoColormap; ///< video colormap -static xcb_window_t VideoWindow; ///< video window -static xcb_screen_t const *VideoScreen; ///< video screen -static uint32_t VideoBlankTick; ///< blank cursor timer -static xcb_pixmap_t VideoCursorPixmap; ///< blank curosr pixmap -static xcb_cursor_t VideoBlankCursor; ///< empty invisible cursor +static const char *VideoDriverName = "cuvid"; ///< video output device +static Display *XlibDisplay; ///< Xlib X11 display +static xcb_connection_t *Connection; ///< xcb connection +static xcb_colormap_t VideoColormap; ///< video colormap +static xcb_window_t VideoWindow; ///< video window +static xcb_screen_t const *VideoScreen; ///< video screen +static uint32_t VideoBlankTick; ///< blank cursor timer +static xcb_pixmap_t VideoCursorPixmap; ///< blank curosr pixmap +static xcb_cursor_t VideoBlankCursor; ///< empty invisible cursor -static int VideoWindowX; ///< video output window x coordinate -static int VideoWindowY; ///< video outout window y coordinate -static unsigned VideoWindowWidth; ///< video output window width -static unsigned VideoWindowHeight; ///< video output window height +static int VideoWindowX; ///< video output window x coordinate +static int VideoWindowY; ///< video outout window y coordinate +static unsigned VideoWindowWidth; ///< video output window width +static unsigned VideoWindowHeight; ///< video output window height -static const VideoModule NoopModule; ///< forward definition of noop module +static const VideoModule NoopModule; ///< forward definition of noop module /// selected video module static const VideoModule *VideoUsedModule = &NoopModule; -signed char VideoHardwareDecoder = -1; ///< flag use hardware decoder +signed char VideoHardwareDecoder = -1; ///< flag use hardware decoder -static char VideoSurfaceModesChanged; ///< flag surface modes changed +static char VideoSurfaceModesChanged; ///< flag surface modes changed /// flag use transparent OSD. static const char VideoTransparentOsd = 1; -static uint32_t VideoBackground; ///< video background color -char VideoStudioLevels; ///< flag use studio levels +static uint32_t VideoBackground; ///< video background color +char VideoStudioLevels; ///< flag use studio levels /// Default deinterlace mode. static VideoDeinterlaceModes VideoDeinterlace[VideoResolutionMax]; @@ -441,10 +439,10 @@ static char *DRMConnector = NULL; /// Default Value for DRM Refreshrate static int DRMRefresh = 50; -static char Video60HzMode; ///< handle 60hz displays -static char VideoSoftStartSync; ///< soft start sync audio/video -static const int VideoSoftStartFrames = 100; ///< soft start frames -static char VideoShowBlackPicture; ///< flag show black picture +static char Video60HzMode; ///< handle 60hz displays +static char VideoSoftStartSync; ///< soft start sync audio/video +static const int VideoSoftStartFrames = 100; ///< soft start frames +static char VideoShowBlackPicture; ///< flag show black picture static float VideoBrightness = 0.0f; static float VideoContrast = 1.0f; @@ -457,7 +455,7 @@ static int VideoScalerTest = 0; static int VideoColorBlindness = 0; static float VideoColorBlindnessFaktor = 1.0f; -static char* shadersp[NUM_SHADERS]; +static char *shadersp[NUM_SHADERS]; char MyConfigDir[200]; static int num_shaders = 0; static int LUTon = -1; @@ -468,56 +466,56 @@ static xcb_atom_t NetWmStateFullscreen; ///< fullscreen wm-state message atom static xcb_atom_t NetWmStateAbove; #ifdef DEBUG -extern uint32_t VideoSwitch; ///< ticks for channel switch +extern uint32_t VideoSwitch; ///< ticks for channel switch #endif -extern void AudioVideoReady(int64_t); ///< tell audio video is ready +extern void AudioVideoReady(int64_t); ///< tell audio video is ready #ifdef USE_VIDEO_THREAD -static pthread_t VideoThread; ///< video decode thread -static pthread_cond_t VideoWakeupCond; ///< wakeup condition variable -static pthread_mutex_t VideoMutex; ///< video condition mutex -static pthread_mutex_t VideoLockMutex; ///< video lock mutex -pthread_mutex_t OSDMutex; ///< OSD update mutex +static pthread_t VideoThread; ///< video decode thread +static pthread_cond_t VideoWakeupCond; ///< wakeup condition variable +static pthread_mutex_t VideoMutex; ///< video condition mutex +static pthread_mutex_t VideoLockMutex; ///< video lock mutex +pthread_mutex_t OSDMutex; ///< OSD update mutex #endif -static pthread_t VideoDisplayThread; ///< video display thread +static pthread_t VideoDisplayThread; ///< video display thread // static pthread_cond_t VideoDisplayWakeupCond; ///< wakeup condition variable // static pthread_mutex_t VideoDisplayMutex; ///< video condition mutex // static pthread_mutex_t VideoDisplayLockMutex; ///< video lock mutex -static int OsdConfigWidth; ///< osd configured width -static int OsdConfigHeight; ///< osd configured height -static char OsdShown; ///< flag show osd -static char Osd3DMode; ///< 3D OSD mode -static int OsdWidth; ///< osd width -static int OsdHeight; ///< osd height -static int OsdDirtyX; ///< osd dirty area x -static int OsdDirtyY; ///< osd dirty area y -static int OsdDirtyWidth; ///< osd dirty area width -static int OsdDirtyHeight; ///< osd dirty area height +static int OsdConfigWidth; ///< osd configured width +static int OsdConfigHeight; ///< osd configured height +static char OsdShown; ///< flag show osd +static char Osd3DMode; ///< 3D OSD mode +static int OsdWidth; ///< osd width +static int OsdHeight; ///< osd height +static int OsdDirtyX; ///< osd dirty area x +static int OsdDirtyY; ///< osd dirty area y +static int OsdDirtyWidth; ///< osd dirty area width +static int OsdDirtyHeight; ///< osd dirty area height static void (*VideoEventCallback)(void) = NULL; /// callback function to notify VDR about Video Events -static int64_t VideoDeltaPTS; ///< FIXME: fix pts +static int64_t VideoDeltaPTS; ///< FIXME: fix pts #ifdef USE_SCREENSAVER -static char DPMSDisabled; ///< flag we have disabled dpms -static char EnableDPMSatBlackScreen; ///< flag we should enable dpms at black screen +static char DPMSDisabled; ///< flag we have disabled dpms +static char EnableDPMSatBlackScreen; ///< flag we should enable dpms at black screen #endif static unsigned int Count; -static int EglEnabled; ///< use EGL -static int GlxVSyncEnabled = 1; ///< enable/disable v-sync +static int EglEnabled; ///< use EGL +static int GlxVSyncEnabled = 1; ///< enable/disable v-sync #ifdef CUVID -static GLXContext glxSharedContext; ///< shared gl context -static GLXContext glxContext; ///< our gl context +static GLXContext glxSharedContext; ///< shared gl context +static GLXContext glxContext; ///< our gl context -static GLXContext glxThreadContext; ///< our gl context for the thread +static GLXContext glxThreadContext; ///< our gl context for the thread -static XVisualInfo *GlxVisualInfo; ///< our gl visual +static XVisualInfo *GlxVisualInfo; ///< our gl visual static void GlxSetupWindow(xcb_window_t window, int width, int height, GLXContext context); GLXContext OSDcontext; #else @@ -529,31 +527,31 @@ static EGLDisplay eglDisplay; static EGLSurface eglSurface, eglOSDSurface; static EGLint eglAttrs[10]; static int eglVersion = 2; -static EGLImageKHR(EGLAPIENTRY * CreateImageKHR) (EGLDisplay, EGLContext, EGLenum, EGLClientBuffer, const EGLint *); -static EGLBoolean(EGLAPIENTRY * DestroyImageKHR) (EGLDisplay, EGLImageKHR); -static void (EGLAPIENTRY * EGLImageTargetTexture2DOES) (GLenum, GLeglImageOES); +static EGLImageKHR(EGLAPIENTRY *CreateImageKHR)(EGLDisplay, EGLContext, EGLenum, EGLClientBuffer, const EGLint *); +static EGLBoolean(EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR); +static void(EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES); PFNEGLCREATESYNCKHRPROC eglCreateSyncKHR; PFNEGLDESTROYSYNCKHRPROC eglDestroySyncKHR; PFNEGLWAITSYNCKHRPROC eglWaitSyncKHR; PFNEGLCLIENTWAITSYNCKHRPROC eglClientWaitSyncKHR; PFNEGLDUPNATIVEFENCEFDANDROIDPROC eglDupNativeFenceFDANDROID; -static EGLContext eglThreadContext; ///< our gl context for the thread +static EGLContext eglThreadContext; ///< our gl context for the thread static void GlxSetupWindow(xcb_window_t window, int width, int height, EGLContext context); EGLContext OSDcontext; #endif -static GLuint OsdGlTextures[2]; ///< gl texture for OSD -static int OsdIndex = 0; ///< index into OsdGlTextures +static GLuint OsdGlTextures[2]; ///< gl texture for OSD +static int OsdIndex = 0; ///< index into OsdGlTextures //---------------------------------------------------------------------------- // Common Functions //---------------------------------------------------------------------------- -void VideoThreadLock(void); ///< lock video thread -void VideoThreadUnlock(void); ///< unlock video thread -static void VideoThreadExit(void); ///< exit/kill video thread +void VideoThreadLock(void); ///< lock video thread +void VideoThreadUnlock(void); ///< unlock video thread +static void VideoThreadExit(void); ///< exit/kill video thread #ifdef USE_SCREENSAVER static void X11SuspendScreenSaver(xcb_connection_t *, int); @@ -562,8 +560,7 @@ static void X11DPMSReenable(xcb_connection_t *); static void X11DPMSDisable(xcb_connection_t *); #endif -char *eglErrorString(EGLint error) -{ +char *eglErrorString(EGLint error) { switch (error) { case EGL_SUCCESS: return "No error"; @@ -602,14 +599,14 @@ char *eglErrorString(EGLint error) /// /// egl check error. /// -#define EglCheck(void) \ -{\ - EGLint err;\ -\ - if ((err = eglGetError()) != EGL_SUCCESS) {\ - Debug(3, "video/egl: %s:%d error %d %s\n", __FILE__,__LINE__,err,eglErrorString(err));\ - }\ -} +#define EglCheck(void) \ + { \ + EGLint err; \ + \ + if ((err = eglGetError()) != EGL_SUCCESS) { \ + Debug(3, "video/egl: %s:%d error %d %s\n", __FILE__, __LINE__, err, eglErrorString(err)); \ + } \ + } //---------------------------------------------------------------------------- // DRM Helper Functions @@ -623,45 +620,45 @@ char *eglErrorString(EGLint error) /// Update video pts. /// /// @param pts_p pointer to pts -/// @param interlaced interlaced flag (frame isn't right) +/// @param interlaced interlaced flag (frame isn't right) /// @param frame frame to display /// /// @note frame->interlaced_frame can't be used for interlace detection /// -static void VideoSetPts(int64_t * pts_p, int interlaced, const AVCodecContext * video_ctx, const AVFrame * frame) -{ +static void VideoSetPts(int64_t *pts_p, int interlaced, const AVCodecContext *video_ctx, const AVFrame *frame) { int64_t pts; int duration; // - // Get duration for this frame. - // FIXME: using framerate as workaround for av_frame_get_pkt_duration + // Get duration for this frame. + // FIXME: using framerate as workaround for av_frame_get_pkt_duration // // if (video_ctx->framerate.num && video_ctx->framerate.den) { // duration = 1000 * video_ctx->framerate.den / video_ctx->framerate.num; // } else { - duration = interlaced ? 40 : 20; // 50Hz -> 20ms default + duration = interlaced ? 40 : 20; // 50Hz -> 20ms default // } - // Debug(4, "video: %d/%d %" PRIx64 " -> %d\n", video_ctx->framerate.den, video_ctx->framerate.num, av_frame_get_pkt_duration(frame), duration); + // Debug(4, "video: %d/%d %" PRIx64 " -> %d\n", video_ctx->framerate.den, + // video_ctx->framerate.num, av_frame_get_pkt_duration(frame), duration); // update video clock - if (*pts_p != (int64_t) AV_NOPTS_VALUE) { + if (*pts_p != (int64_t)AV_NOPTS_VALUE) { *pts_p += duration * 90; - //Info("video: %s +pts\n", Timestamp2String(*pts_p)); + // Info("video: %s +pts\n", Timestamp2String(*pts_p)); } // av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp"); // pts = frame->best_effort_timestamp; // pts = frame->pkt_pts; pts = frame->pts; - if (pts == (int64_t) AV_NOPTS_VALUE || !pts) { + if (pts == (int64_t)AV_NOPTS_VALUE || !pts) { // libav: 0.8pre didn't set pts pts = frame->pkt_dts; } // libav: sets only pkt_dts which can be 0 - if (pts && pts != (int64_t) AV_NOPTS_VALUE) { + if (pts && pts != (int64_t)AV_NOPTS_VALUE) { // build a monotonic pts - if (*pts_p != (int64_t) AV_NOPTS_VALUE) { + if (*pts_p != (int64_t)AV_NOPTS_VALUE) { int64_t delta; delta = pts - *pts_p; @@ -670,11 +667,11 @@ static void VideoSetPts(int64_t * pts_p, int interlaced, const AVCodecContext * if (-delta > VideoDeltaPTS) { VideoDeltaPTS = -delta; Debug(4, "video: %#012" PRIx64 "->%#012" PRIx64 " delta%+4" PRId64 " pts\n", *pts_p, pts, - pts - *pts_p); + pts - *pts_p); } return; } - } else { // first new clock value + } else { // first new clock value Debug(3, "++++++++++++++++++++++++++++++++++++starte audio\n"); AudioVideoReady(pts); } @@ -690,12 +687,12 @@ int CuvidMessage(int level, const char *format, ...); /// /// Update output for new size or aspect ratio. /// -/// @param input_aspect_ratio video stream aspect +/// @param input_aspect_ratio video stream aspect /// static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, int input_height, - VideoResolutions resolution, int video_x, int video_y, int video_width, int video_height, int *output_x, - int *output_y, int *output_width, int *output_height, int *crop_x, int *crop_y, int *crop_width, int *crop_height) -{ + VideoResolutions resolution, int video_x, int video_y, int video_width, int video_height, + int *output_x, int *output_y, int *output_width, int *output_height, int *crop_x, + int *crop_y, int *crop_width, int *crop_height) { AVRational display_aspect_ratio; AVRational tmp_ratio; @@ -712,12 +709,12 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, in display_aspect_ratio.den = VideoScreen->height_in_pixels; #endif av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den, - 1024 * 1024); + 1024 * 1024); Debug(3, "video: input %dx%d (%d:%d)\n", input_width, input_height, input_aspect_ratio.num, - input_aspect_ratio.den); + input_aspect_ratio.den); Debug(3, "video: display aspect %d:%d Resolution %d\n", display_aspect_ratio.num, display_aspect_ratio.den, - resolution); + resolution); Debug(3, "video: video %+d%+d %dx%d\n", video_x, video_y, video_width, video_height); *crop_x = VideoCutLeftRight[resolution]; @@ -751,7 +748,7 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, in goto video_none; } - normal: +normal: *output_x = video_x; *output_y = video_y; *output_height = video_height; @@ -767,7 +764,7 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, in CuvidMessage(2, "video: normal aspect output %dx%d%+d%+d\n", *output_width, *output_height, *output_x, *output_y); return; - stretch: +stretch: *output_x = video_x; *output_y = video_y; *output_width = video_width; @@ -775,7 +772,7 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, in CuvidMessage(2, "video: stretch output %dx%d%+d%+d\n", *output_width, *output_height, *output_x, *output_y); return; - center_cut_out: +center_cut_out: *output_x = video_x; *output_y = video_y; *output_height = video_height; @@ -794,12 +791,13 @@ static void VideoUpdateOutput(AVRational input_aspect_ratio, int input_width, in CuvidMessage(2, "video: aspect crop %dx%d%+d%+d\n", *crop_width, *crop_height, *crop_x, *crop_y); return; - video_none: +video_none: *output_height = *crop_height; - *output_width = (*crop_width * input_aspect_ratio.num) / input_aspect_ratio.den; // normalize pixel aspect ratio + *output_width = (*crop_width * input_aspect_ratio.num) / input_aspect_ratio.den; // normalize pixel aspect ratio *output_x = video_x + (video_width - *output_width) / 2; *output_y = video_y + (video_height - *output_height) / 2; - CuvidMessage(2, "video: original aspect output %dx%d%+d%+d\n", *output_width, *output_height, *output_x, *output_y); + CuvidMessage(2, "video: original aspect output %dx%d%+d%+d\n", *output_width, *output_height, *output_x, + *output_y); return; } @@ -808,56 +806,62 @@ static uint64_t test_time = 0; /// /// Lock video thread. /// -#define VideoThreadLock(void)\ -{\ - if (VideoThread) {\ - if (pthread_mutex_lock(&VideoLockMutex)) {\ - Error(_("video: can't lock thread\n"));\ - }\ - }\ -} +#define VideoThreadLock(void) \ + { \ + if (VideoThread) { \ + if (pthread_mutex_lock(&VideoLockMutex)) { \ + Error(_("video: can't lock thread\n")); \ + } \ + } \ + } // test_time = GetusTicks(); // printf("Lock start...."); /// /// Unlock video thread. /// -#define VideoThreadUnlock(void)\ -{\ - if (VideoThread) {\ - if (pthread_mutex_unlock(&VideoLockMutex)) {\ - Error(_("video: can't unlock thread\n"));\ - }\ - }\ -} +#define VideoThreadUnlock(void) \ + { \ + if (VideoThread) { \ + if (pthread_mutex_unlock(&VideoLockMutex)) { \ + Error(_("video: can't unlock thread\n")); \ + } \ + } \ + } // printf("Video Locked for %d\n",(GetusTicks()-test_time)/1000); #ifdef PLACEBO_GL -#define Lock_and_SharedContext\ -{\ - VideoThreadLock();\ - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext);\ - EglCheck();\ -} -#define Unlock_and_NoContext {\ - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);\ - EglCheck();\ - VideoThreadUnlock();\ -} -#define SharedContext\ -{\ - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext);\ - EglCheck();\ -} -#define NoContext {\ - eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);\ - EglCheck();\ -} +#define Lock_and_SharedContext \ + { \ + VideoThreadLock(); \ + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); \ + EglCheck(); \ + } +#define Unlock_and_NoContext \ + { \ + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); \ + EglCheck(); \ + VideoThreadUnlock(); \ + } +#define SharedContext \ + { \ + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, eglSharedContext); \ + EglCheck(); \ + } +#define NoContext \ + { \ + eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); \ + EglCheck(); \ + } #else #ifdef PLACEBO -#define Lock_and_SharedContext {VideoThreadLock();} -#define Unlock_and_NoContext {VideoThreadUnlock();} -#define SharedContext {} -#define NoContext {} +#define Lock_and_SharedContext \ + { VideoThreadLock(); } +#define Unlock_and_NoContext \ + { VideoThreadUnlock(); } +#define SharedContext \ + {} +#define NoContext \ + {} #endif #endif @@ -883,23 +887,22 @@ static PFNGLXSWAPINTERVALSGIPROC GlxSwapIntervalSGI; /// /// GLX check error. /// -#define GlxCheck(void)\ -{\ - GLenum err;\ -\ - if ((err = glGetError()) != GL_NO_ERROR) {\ - Debug(3, "video/glx: error %s:%d %d '%s'\n",__FILE__,__LINE__, err, gluErrorString(err));\ - }\ -} +#define GlxCheck(void) \ + { \ + GLenum err; \ + \ + if ((err = glGetError()) != GL_NO_ERROR) { \ + Debug(3, "video/glx: error %s:%d %d '%s'\n", __FILE__, __LINE__, err, gluErrorString(err)); \ + } \ + } /// /// GLX check if a GLX extension is supported. /// -/// @param ext extension to query +/// @param ext extension to query /// @returns true if supported, false otherwise /// -static int GlxIsExtensionSupported(const char *ext) -{ +static int GlxIsExtensionSupported(const char *ext) { const char *extensions; if ((extensions = glXQueryExtensionsString(XlibDisplay, DefaultScreen(XlibDisplay)))) { @@ -985,8 +988,7 @@ static void GlxSetupWindow(xcb_window_t window, int width, int height, EGLContex /// Initialize GLX. /// #ifdef CUVID -static void EglInit(void) -{ +static void EglInit(void) { XVisualInfo *vi = NULL; @@ -995,24 +997,32 @@ static void EglInit(void) #endif // The desired 30-bit color visual - int attributeList10[] = { - GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, - GLX_RENDER_TYPE, GLX_RGBA_BIT, - GLX_DOUBLEBUFFER, True, - GLX_RED_SIZE, 10, /*10bits for R */ - GLX_GREEN_SIZE, 10, /*10bits for G */ - GLX_BLUE_SIZE, 10, /*10bits for B */ - None - }; - int attributeList[] = { - GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, - GLX_RENDER_TYPE, GLX_RGBA_BIT, - GLX_DOUBLEBUFFER, True, - GLX_RED_SIZE, 8, /*8 bits for R */ - GLX_GREEN_SIZE, 8, /*8 bits for G */ - GLX_BLUE_SIZE, 8, /*8 bits for B */ - None - }; + int attributeList10[] = {GLX_DRAWABLE_TYPE, + GLX_WINDOW_BIT, + GLX_RENDER_TYPE, + GLX_RGBA_BIT, + GLX_DOUBLEBUFFER, + True, + GLX_RED_SIZE, + 10, /*10bits for R */ + GLX_GREEN_SIZE, + 10, /*10bits for G */ + GLX_BLUE_SIZE, + 10, /*10bits for B */ + None}; + int attributeList[] = {GLX_DRAWABLE_TYPE, + GLX_WINDOW_BIT, + GLX_RENDER_TYPE, + GLX_RGBA_BIT, + GLX_DOUBLEBUFFER, + True, + GLX_RED_SIZE, + 8, /*8 bits for R */ + GLX_GREEN_SIZE, + 8, /*8 bits for G */ + GLX_BLUE_SIZE, + 8, /*8 bits for B */ + None}; int fbcount; GLXContext context; @@ -1031,7 +1041,7 @@ static void EglInit(void) Debug(3, "video/glx: glx version %d.%d\n", major, minor); // - // check which extension are supported + // check which extension are supported // glx_GLX_EXT_swap_control = GlxIsExtensionSupported("GLX_EXT_swap_control"); glx_GLX_MESA_swap_control = GlxIsExtensionSupported("GLX_MESA_swap_control"); @@ -1040,22 +1050,19 @@ static void EglInit(void) #ifdef GLX_MESA_swap_control if (glx_GLX_MESA_swap_control) { - GlxSwapIntervalMESA = (PFNGLXSWAPINTERVALMESAPROC) - glXGetProcAddress((const GLubyte *)"glXSwapIntervalMESA"); + GlxSwapIntervalMESA = (PFNGLXSWAPINTERVALMESAPROC)glXGetProcAddress((const GLubyte *)"glXSwapIntervalMESA"); } Debug(3, "video/glx: GlxSwapIntervalMESA=%p\n", GlxSwapIntervalMESA); #endif #ifdef GLX_SGI_swap_control if (glx_GLX_SGI_swap_control) { - GlxSwapIntervalSGI = (PFNGLXSWAPINTERVALSGIPROC) - glXGetProcAddress((const GLubyte *)"wglSwapIntervalEXT"); + GlxSwapIntervalSGI = (PFNGLXSWAPINTERVALSGIPROC)glXGetProcAddress((const GLubyte *)"wglSwapIntervalEXT"); } Debug(3, "video/glx: GlxSwapIntervalSGI=%p\n", GlxSwapIntervalSGI); #endif #ifdef GLX_SGI_video_sync if (glx_GLX_SGI_video_sync) { - GlxGetVideoSyncSGI = (PFNGLXGETVIDEOSYNCSGIPROC) - glXGetProcAddress((const GLubyte *)"glXGetVideoSyncSGI"); + GlxGetVideoSyncSGI = (PFNGLXGETVIDEOSYNCSGIPROC)glXGetProcAddress((const GLubyte *)"glXGetVideoSyncSGI"); } Debug(3, "video/glx: GlxGetVideoSyncSGI=%p\n", GlxGetVideoSyncSGI); #endif @@ -1063,9 +1070,10 @@ static void EglInit(void) // create glx context glXMakeCurrent(XlibDisplay, None, NULL); - fbc = glXChooseFBConfig(XlibDisplay, DefaultScreen(XlibDisplay), attributeList10, &fbcount); // try 10 Bit + fbc = glXChooseFBConfig(XlibDisplay, DefaultScreen(XlibDisplay), attributeList10, &fbcount); // try 10 Bit if (fbc == NULL) { - fbc = glXChooseFBConfig(XlibDisplay, DefaultScreen(XlibDisplay), attributeList, &fbcount); // fall back to 8 Bit + fbc = + glXChooseFBConfig(XlibDisplay, DefaultScreen(XlibDisplay), attributeList, &fbcount); // fall back to 8 Bit if (fbc == NULL) Fatal(_("did not get FBconfig")); } @@ -1095,7 +1103,7 @@ static void EglInit(void) Debug(3, "video/glx: visual %#02x depth %u\n", (unsigned)vi->visualid, vi->depth); // - // query default v-sync state + // query default v-sync state // if (glx_GLX_EXT_swap_control) { unsigned tmp; @@ -1110,7 +1118,7 @@ static void EglInit(void) } // - // disable wait on v-sync + // disable wait on v-sync // // FIXME: sleep before swap / busy waiting hardware // FIXME: 60hz lcd panel @@ -1126,7 +1134,7 @@ static void EglInit(void) } else #endif #ifdef GLX_MESA_swap_control - if (GlxVSyncEnabled < 0 && GlxSwapIntervalMESA) { + if (GlxVSyncEnabled < 0 && GlxSwapIntervalMESA) { if (GlxSwapIntervalMESA(0)) { GlxCheck(); Warning(_("video/glx: can't disable v-sync\n")); @@ -1137,7 +1145,7 @@ static void EglInit(void) #endif // - // enable wait on v-sync + // enable wait on v-sync // #ifdef GLX_SGI_swap_control if (GlxVSyncEnabled > 0 && GlxSwapIntervalMESA) { @@ -1150,7 +1158,7 @@ static void EglInit(void) } else #endif #ifdef GLX_MESA_swap_control - if (GlxVSyncEnabled > 0 && GlxSwapIntervalSGI) { + if (GlxVSyncEnabled > 0 && GlxSwapIntervalSGI) { if (GlxSwapIntervalSGI(1)) { GlxCheck(); Warning(_("video/glx: SGI can't enable v-sync\n")); @@ -1159,12 +1167,10 @@ static void EglInit(void) } } #endif - } #else // VAAPI -static void EglInit(void) -{ +static void EglInit(void) { int redSize, greenSize, blueSize, alphaSize; static int glewdone = 0; @@ -1174,17 +1180,17 @@ static void EglInit(void) EGLContext context; // create egl context - // setenv("MESA_GL_VERSION_OVERRIDE", "3.3", 0); - // setenv("V3D_DOUBLE_BUFFER", "1", 0); + // setenv("MESA_GL_VERSION_OVERRIDE", "3.3", 0); + // setenv("V3D_DOUBLE_BUFFER", "1", 0); make_egl(); if (!glewdone) { GLenum err = glewInit(); glewdone = 1; -// if (err != GLEW_OK) { -// Debug(3, "Error: %s\n", glewGetErrorString(err)); -// } + // if (err != GLEW_OK) { + // Debug(3, "Error: %s\n", glewGetErrorString(err)); + // } } eglGetConfigAttrib(eglDisplay, eglConfig, EGL_BLUE_SIZE, &blueSize); @@ -1196,7 +1202,7 @@ static void EglInit(void) eglSharedContext = eglContext; context = eglCreateContext(eglDisplay, eglConfig, eglSharedContext, eglAttrs); - + EglCheck(); if (!context) { Fatal(_("video/egl: can't create egl context\n")); @@ -1208,8 +1214,7 @@ static void EglInit(void) /// /// Cleanup GLX. /// -static void EglExit(void) -{ +static void EglExit(void) { Debug(3, "video/egl: %s\n", __FUNCTION__); #if defined PLACEBO && !defined PLACEBO_GL return; @@ -1278,13 +1283,11 @@ static void EglExit(void) /// /// @param width video picture raw width /// @param height video picture raw height -/// @param interlace flag interlaced video picture +/// @param interlace flag interlaced video picture /// /// @note interlace isn't used yet and probably wrong set by caller. /// -static VideoResolutions VideoResolutionGroup(int width, int height, __attribute__((unused)) - int interlace) -{ +static VideoResolutions VideoResolutionGroup(int width, int height, __attribute__((unused)) int interlace) { if (height == 2160) { return VideoResolutionUHD; } @@ -1310,8 +1313,7 @@ static VideoResolutions VideoResolutionGroup(int width, int height, __attribute_ #ifdef USE_CUVID #ifdef PLACEBO -struct ext_buf -{ +struct ext_buf { int fd; #ifdef CUVID CUexternalMemory mem; @@ -1324,56 +1326,55 @@ struct ext_buf #endif #ifdef VAAPI -static VADisplay *VaDisplay; ///< VA-API display +static VADisplay *VaDisplay; ///< VA-API display #endif /// /// CUVID decoder /// -typedef struct _cuvid_decoder_ -{ +typedef struct _cuvid_decoder_ { #ifdef VAAPI - VADisplay *VaDisplay; ///< VA-API display + VADisplay *VaDisplay; ///< VA-API display #endif - xcb_window_t Window; ///< output window + xcb_window_t Window; ///< output window - int VideoX; ///< video base x coordinate - int VideoY; ///< video base y coordinate - int VideoWidth; ///< video base width - int VideoHeight; ///< video base height + int VideoX; ///< video base x coordinate + int VideoY; ///< video base y coordinate + int VideoWidth; ///< video base width + int VideoHeight; ///< video base height - int OutputX; ///< real video output x coordinate - int OutputY; ///< real video output y coordinate - int OutputWidth; ///< real video output width - int OutputHeight; ///< real video output height + int OutputX; ///< real video output x coordinate + int OutputY; ///< real video output y coordinate + int OutputWidth; ///< real video output width + int OutputHeight; ///< real video output height - enum AVPixelFormat PixFmt; ///< ffmpeg frame pixfmt - enum AVColorSpace ColorSpace; /// ffmpeg ColorSpace + enum AVPixelFormat PixFmt; ///< ffmpeg frame pixfmt + enum AVColorSpace ColorSpace; /// ffmpeg ColorSpace enum AVColorTransferCharacteristic trc; // enum AVColorPrimaries color_primaries; - int WrongInterlacedWarned; ///< warning about interlace flag issued - int Interlaced; ///< ffmpeg interlaced flag - int TopFieldFirst; ///< ffmpeg top field displayed first + int WrongInterlacedWarned; ///< warning about interlace flag issued + int Interlaced; ///< ffmpeg interlaced flag + int TopFieldFirst; ///< ffmpeg top field displayed first - int InputWidth; ///< video input width - int InputHeight; ///< video input height - AVRational InputAspect; ///< video input aspect ratio - VideoResolutions Resolution; ///< resolution group + int InputWidth; ///< video input width + int InputHeight; ///< video input height + AVRational InputAspect; ///< video input aspect ratio + VideoResolutions Resolution; ///< resolution group - int CropX; ///< video crop x - int CropY; ///< video crop y - int CropWidth; ///< video crop width - int CropHeight; ///< video crop height + int CropX; ///< video crop x + int CropY; ///< video crop y + int CropWidth; ///< video crop width + int CropHeight; ///< video crop height - int grabwidth, grabheight, grab; // Grab Data + int grabwidth, grabheight, grab; // Grab Data void *grabbase; - int SurfacesNeeded; ///< number of surface to request - int SurfaceUsedN; ///< number of used video surfaces + int SurfacesNeeded; ///< number of surface to request + int SurfaceUsedN; ///< number of used video surfaces /// used video surface ids int SurfacesUsed[CODEC_SURFACES_MAX]; - int SurfaceFreeN; ///< number of free video surfaces + int SurfaceFreeN; ///< number of free video surfaces /// free video surface ids int SurfacesFree[CODEC_SURFACES_MAX]; /// video surface ring buffer @@ -1382,54 +1383,54 @@ typedef struct _cuvid_decoder_ // cudaStream_t stream; // make my own cuda stream // CUgraphicsResource cuResource; - int SurfaceWrite; ///< write pointer - int SurfaceRead; ///< read pointer - atomic_t SurfacesFilled; ///< how many of the buffer is used + int SurfaceWrite; ///< write pointer + int SurfaceRead; ///< read pointer + atomic_t SurfacesFilled; ///< how many of the buffer is used AVFrame *frames[CODEC_SURFACES_MAX + 1]; #ifdef CUVID CUarray cu_array[CODEC_SURFACES_MAX + 1][2]; CUgraphicsResource cu_res[CODEC_SURFACES_MAX + 1][2]; CUcontext cuda_ctx; #endif - GLuint gl_textures[(CODEC_SURFACES_MAX + 1) * 2]; // where we will copy the CUDA result + GLuint gl_textures[(CODEC_SURFACES_MAX + 1) * 2]; // where we will copy the CUDA result #ifdef VAAPI EGLImageKHR images[(CODEC_SURFACES_MAX + 1) * 2]; int fds[(CODEC_SURFACES_MAX + 1) * 2]; #endif #ifdef PLACEBO - struct pl_frame pl_frames[CODEC_SURFACES_MAX + 1]; // images for Placebo chain - struct ext_buf ebuf[CODEC_SURFACES_MAX + 1]; // for managing vk buffer + struct pl_frame pl_frames[CODEC_SURFACES_MAX + 1]; // images for Placebo chain + struct ext_buf ebuf[CODEC_SURFACES_MAX + 1]; // for managing vk buffer #endif - int SurfaceField; ///< current displayed field - int TrickSpeed; ///< current trick speed - int TrickCounter; ///< current trick speed counter - struct timespec FrameTime; ///< time of last display - VideoStream *Stream; ///< video stream - int Closing; ///< flag about closing current stream - int SyncOnAudio; ///< flag sync to audio - int64_t PTS; ///< video PTS clock + int SurfaceField; ///< current displayed field + int TrickSpeed; ///< current trick speed + int TrickCounter; ///< current trick speed counter + struct timespec FrameTime; ///< time of last display + VideoStream *Stream; ///< video stream + int Closing; ///< flag about closing current stream + int SyncOnAudio; ///< flag sync to audio + int64_t PTS; ///< video PTS clock -#if defined(YADIF) || defined (VAAPI) +#if defined(YADIF) || defined(VAAPI) AVFilterContext *buffersink_ctx; AVFilterContext *buffersrc_ctx; AVFilterGraph *filter_graph; #endif AVBufferRef *cached_hw_frames_ctx; - int LastAVDiff; ///< last audio - video difference - int SyncCounter; ///< counter to sync frames - int StartCounter; ///< counter for video start - int FramesDuped; ///< number of frames duplicated - int FramesMissed; ///< number of frames missed - int FramesDropped; ///< number of frames dropped - int FrameCounter; ///< number of frames decoded - int FramesDisplayed; ///< number of frames displayed - float Frameproc; /// Time to process frame + int LastAVDiff; ///< last audio - video difference + int SyncCounter; ///< counter to sync frames + int StartCounter; ///< counter for video start + int FramesDuped; ///< number of frames duplicated + int FramesMissed; ///< number of frames missed + int FramesDropped; ///< number of frames dropped + int FrameCounter; ///< number of frames decoded + int FramesDisplayed; ///< number of frames displayed + float Frameproc; /// Time to process frame int newchannel; } CuvidDecoder; -static CuvidDecoder *CuvidDecoders[2]; ///< open decoder streams -static int CuvidDecoderN; ///< number of decoder streams +static CuvidDecoder *CuvidDecoders[2]; ///< open decoder streams +static int CuvidDecoderN; ///< number of decoder streams #ifdef CUVID static CudaFunctions *cu; @@ -1437,14 +1438,12 @@ static CudaFunctions *cu; #ifdef PLACEBO -struct file -{ +struct file { void *data; size_t size; }; -typedef struct priv -{ +typedef struct priv { const struct pl_gpu *gpu; const struct pl_vulkan *vk; const struct pl_vk_inst *vk_inst; @@ -1458,7 +1457,7 @@ typedef struct priv struct pl_context_params context; // struct pl_frame r_target; // struct pl_render_params r_params; - // struct pl_tex final_fbo; + // struct pl_tex final_fbo; #ifndef PLACEBO_GL VkSurfaceKHR pSurface; #endif @@ -1481,15 +1480,15 @@ struct itimerval itimer; GLuint vao_buffer; -//GLuint vao_vao[4]; -GLuint gl_shader = 0, gl_prog = 0, gl_fbo = 0; // shader programm +// GLuint vao_vao[4]; +GLuint gl_shader = 0, gl_prog = 0, gl_fbo = 0; // shader programm GLint gl_colormatrix, gl_colormatrix_c; GLuint OSDfb = 0; GLuint OSDtexture, gl_prog_osd = 0; int OSDx, OSDy, OSDxsize, OSDysize; -static struct timespec CuvidFrameTime; ///< time of last display +static struct timespec CuvidFrameTime; ///< time of last display int window_width, window_height; @@ -1504,20 +1503,19 @@ int window_width, window_height; /// /// @param level message level (Error, Warning, Info, Debug, ...) /// @param format printf format string (NULL to flush messages) -/// @param ... printf arguments +/// @param ... printf arguments /// /// @returns true, if message shown /// -int CuvidMessage(int level, const char *format, ...) -{ +int CuvidMessage(int level, const char *format, ...) { if (SysLogLevel > level || DebugLevel > level) { static const char *last_format; static char buf[256]; va_list ap; va_start(ap, format); - if (format != last_format) { // don't repeat same message - if (buf[0]) { // print last repeated message + if (format != last_format) { // don't repeat same message + if (buf[0]) { // print last repeated message syslog(LOG_ERR, "%s", buf); buf[0] = '\0'; } @@ -1538,23 +1536,25 @@ int CuvidMessage(int level, const char *format, ...) //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions #ifdef CUVID -// This will output the proper CUDA error strings in the event that a CUDA host call returns an error -#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) +// This will output the proper CUDA error strings in the event that a CUDA host +// call returns an error +#define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) // These are the inline versions for all of the SDK helper functions -static inline void __checkCudaErrors(CUresult err, const char *file, const int line) -{ +static inline void __checkCudaErrors(CUresult err, const char *file, const int line) { if (CUDA_SUCCESS != err) { - CuvidMessage(2, "checkCudaErrors() Driver API error = %04d >%s< from file <%s>, line %i.\n", err, - getCudaDrvErrorString(err), file, line); + CuvidMessage(2, + "checkCudaErrors() Driver API error = %04d >%s< from file " + "<%s>, line %i.\n", + err, getCudaDrvErrorString(err), file, line); exit(EXIT_FAILURE); } } #endif // Surfaces ------------------------------------------------------------- -void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsigned int size_y, - enum AVPixelFormat PixFmt); +void createTextureDst(CuvidDecoder *decoder, int anz, unsigned int size_x, unsigned int size_y, + enum AVPixelFormat PixFmt); /// /// Create surfaces for CUVID decoder. /// @@ -1562,8 +1562,7 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi /// @param width surface source/video width /// @param height surface source/video height /// -static void CuvidCreateSurfaces(CuvidDecoder * decoder, int width, int height, enum AVPixelFormat PixFmt) -{ +static void CuvidCreateSurfaces(CuvidDecoder *decoder, int width, int height, enum AVPixelFormat PixFmt) { int i; #ifdef DEBUG @@ -1591,13 +1590,11 @@ static void CuvidCreateSurfaces(CuvidDecoder * decoder, int width, int height, e /// /// @param decoder CUVID hw decoder /// -static void CuvidDestroySurfaces(CuvidDecoder * decoder) -{ +static void CuvidDestroySurfaces(CuvidDecoder *decoder) { int i, j; Debug(3, "video/cuvid: %s\n", __FUNCTION__); - #ifndef PLACEBO #ifdef CUVID glXMakeCurrent(XlibDisplay, VideoWindow, glxSharedContext); @@ -1607,14 +1604,14 @@ static void CuvidDestroySurfaces(CuvidDecoder * decoder) EglCheck(); #endif #endif - -#ifdef PLACEBO + +#ifdef PLACEBO pl_gpu_finish(p->gpu); -#if API_VER >= 58 +#if API_VER >= 58 p->num_shaders = 0; #endif #endif - + for (i = 0; i < decoder->SurfacesNeeded; i++) { if (decoder->frames[i]) { av_frame_free(&decoder->frames[i]); @@ -1657,22 +1654,22 @@ static void CuvidDestroySurfaces(CuvidDecoder * decoder) #ifdef PLACEBO - // pl_renderer_destroy(&p->renderer); - // p->renderer = pl_renderer_create(p->ctx, p->gpu); + // pl_renderer_destroy(&p->renderer); + // p->renderer = pl_renderer_create(p->ctx, p->gpu); #else - glDeleteTextures(CODEC_SURFACES_MAX * 2, (GLuint *) & decoder->gl_textures); + glDeleteTextures(CODEC_SURFACES_MAX * 2, (GLuint *)&decoder->gl_textures); GlxCheck(); - if (CuvidDecoderN == 1) { // only wenn last decoder closes + if (CuvidDecoderN == 1) { // only wenn last decoder closes Debug(3, "Last decoder closes\n"); - glDeleteBuffers(1, (GLuint *) & vao_buffer); + glDeleteBuffers(1, (GLuint *)&vao_buffer); if (gl_prog) glDeleteProgram(gl_prog); gl_prog = 0; } #endif - + for (i = 0; i < decoder->SurfaceFreeN; ++i) { decoder->SurfacesFree[i] = -1; } @@ -1683,7 +1680,6 @@ static void CuvidDestroySurfaces(CuvidDecoder * decoder) decoder->SurfaceFreeN = 0; decoder->SurfaceUsedN = 0; - } /// @@ -1693,8 +1689,7 @@ static void CuvidDestroySurfaces(CuvidDecoder * decoder) /// /// @returns the oldest free surface /// -static int CuvidGetVideoSurface0(CuvidDecoder * decoder) -{ +static int CuvidGetVideoSurface0(CuvidDecoder *decoder) { int surface; int i; @@ -1722,8 +1717,7 @@ static int CuvidGetVideoSurface0(CuvidDecoder * decoder) /// @param decoder CUVID hw decoder /// @param surface surface no longer used /// -static void CuvidReleaseSurface(CuvidDecoder * decoder, int surface) -{ +static void CuvidReleaseSurface(CuvidDecoder *decoder, int surface) { int i; if (decoder->frames[surface]) { @@ -1755,8 +1749,8 @@ static void CuvidReleaseSurface(CuvidDecoder * decoder, int surface) if (decoder->fds[surface * Planes]) { close(decoder->fds[surface * Planes]); } - if (decoder->fds[surface * Planes + 1]) { - close(decoder->fds[surface*Planes+1]); + if (decoder->fds[surface * Planes + 1]) { + close(decoder->fds[surface * Planes + 1]); } } decoder->fds[surface * Planes] = 0; @@ -1781,17 +1775,15 @@ static void CuvidReleaseSurface(CuvidDecoder * decoder, int surface) /// /// @param decoder CUVID hw decoder /// -static void CuvidPrintFrames(const CuvidDecoder * decoder) -{ +static void CuvidPrintFrames(const CuvidDecoder *decoder) { Debug(3, "video/cuvid: %d missed, %d duped, %d dropped frames of %d,%d\n", decoder->FramesMissed, - decoder->FramesDuped, decoder->FramesDropped, decoder->FrameCounter, decoder->FramesDisplayed); + decoder->FramesDuped, decoder->FramesDropped, decoder->FrameCounter, decoder->FramesDisplayed); #ifndef DEBUG (void)decoder; #endif } -int CuvidTestSurfaces() -{ +int CuvidTestSurfaces() { int i = 0; if (CuvidDecoders[0] != NULL) { @@ -1803,44 +1795,27 @@ int CuvidTestSurfaces() } #ifdef VAAPI -struct mp_egl_config_attr -{ +struct mp_egl_config_attr { int attrib; const char *name; }; -#define MPGL_VER(major, minor) (((major) * 100) + (minor) * 10) +#define MPGL_VER(major, minor) (((major)*100) + (minor)*10) #define MPGL_VER_GET_MAJOR(ver) ((unsigned)(ver) / 100) #define MPGL_VER_GET_MINOR(ver) ((unsigned)(ver) % 100 / 10) -#define MP_EGL_ATTRIB(id) {id, # id} +#define MP_EGL_ATTRIB(id) \ + { id, #id } static const struct mp_egl_config_attr mp_egl_attribs[] = { - MP_EGL_ATTRIB(EGL_CONFIG_ID), - MP_EGL_ATTRIB(EGL_RED_SIZE), - MP_EGL_ATTRIB(EGL_GREEN_SIZE), - MP_EGL_ATTRIB(EGL_BLUE_SIZE), - MP_EGL_ATTRIB(EGL_ALPHA_SIZE), - MP_EGL_ATTRIB(EGL_COLOR_BUFFER_TYPE), - MP_EGL_ATTRIB(EGL_CONFIG_CAVEAT), - MP_EGL_ATTRIB(EGL_CONFORMANT), + MP_EGL_ATTRIB(EGL_CONFIG_ID), MP_EGL_ATTRIB(EGL_RED_SIZE), MP_EGL_ATTRIB(EGL_GREEN_SIZE), + MP_EGL_ATTRIB(EGL_BLUE_SIZE), MP_EGL_ATTRIB(EGL_ALPHA_SIZE), MP_EGL_ATTRIB(EGL_COLOR_BUFFER_TYPE), + MP_EGL_ATTRIB(EGL_CONFIG_CAVEAT), MP_EGL_ATTRIB(EGL_CONFORMANT), }; -const int mpgl_preferred_gl_versions[] = { - 460, - 440, - 430, - 400, - 330, - 320, - 310, - 300, - 210, - 0 -}; +const int mpgl_preferred_gl_versions[] = {460, 440, 430, 400, 330, 320, 310, 300, 210, 0}; -static bool create_context_cb(EGLDisplay display, int es_version, EGLContext * out_context, EGLConfig * out_config, - int *bpp) -{ +static bool create_context_cb(EGLDisplay display, int es_version, EGLContext *out_context, EGLConfig *out_config, + int *bpp) { EGLenum api; EGLint rend, *attribs; @@ -1863,58 +1838,60 @@ static bool create_context_cb(EGLDisplay display, int es_version, EGLContext * o name = "GLES 3.x"; break; default: - Fatal(_("Wrong ES version \n"));; + Fatal(_("Wrong ES version \n")); + ; } if (!eglBindAPI(api)) { Fatal(_(" Could not bind API!\n")); } - + Debug(3, "Trying to create %s context \n", name); - + EGLint attributes8[] = { - EGL_SURFACE_TYPE, EGL_WINDOW_BIT, - EGL_RED_SIZE, 8, - EGL_GREEN_SIZE, 8, - EGL_BLUE_SIZE, 8, - EGL_ALPHA_SIZE, 8, - EGL_RENDERABLE_TYPE, rend, - EGL_NONE - }; - EGLint attributes10[] = { - EGL_SURFACE_TYPE, EGL_WINDOW_BIT, - EGL_RED_SIZE, 10, - EGL_GREEN_SIZE, 10, - EGL_BLUE_SIZE, 10, - EGL_ALPHA_SIZE, 2, - EGL_RENDERABLE_TYPE, rend, - EGL_NONE - }; + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_ALPHA_SIZE, 8, + EGL_RENDERABLE_TYPE, rend, EGL_NONE}; + EGLint attributes10[] = {EGL_SURFACE_TYPE, + EGL_WINDOW_BIT, + EGL_RED_SIZE, + 10, + EGL_GREEN_SIZE, + 10, + EGL_BLUE_SIZE, + 10, + EGL_ALPHA_SIZE, + 2, + EGL_RENDERABLE_TYPE, + rend, + EGL_NONE}; EGLint num_configs = 0; #ifndef RASPI attribs = attributes10; *bpp = 10; - if (!eglChooseConfig(display, attributes10, NULL, 0, &num_configs)) { // try 10 Bit + if (!eglChooseConfig(display, attributes10, NULL, 0, + &num_configs)) { // try 10 Bit EglCheck(); Debug(3, " 10 Bit egl Failed\n"); attribs = attributes8; *bpp = 8; - if (!eglChooseConfig(display, attributes8, NULL, 0, &num_configs)) { // try 8 Bit + if (!eglChooseConfig(display, attributes8, NULL, 0, + &num_configs)) { // try 8 Bit num_configs = 0; } } else #endif - if (num_configs == 0) { + if (num_configs == 0) { EglCheck(); Debug(3, " 10 Bit egl Failed\n"); attribs = attributes8; *bpp = 8; - if (!eglChooseConfig(display, attributes8, NULL, 0, &num_configs)) { // try 8 Bit + if (!eglChooseConfig(display, attributes8, NULL, 0, + &num_configs)) { // try 8 Bit num_configs = 0; } } - + EGLConfig *configs = malloc(sizeof(EGLConfig) * num_configs); if (!eglChooseConfig(display, attribs, configs, num_configs, &num_configs)) @@ -1968,8 +1945,7 @@ static bool create_context_cb(EGLDisplay display, int es_version, EGLContext * o return true; } -make_egl() -{ +make_egl() { int bpp; CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR"); @@ -2000,7 +1976,7 @@ make_egl() #ifdef USE_DRM InitBo(bpp); #else - eglSurface = eglCreateWindowSurface(eglDisplay, eglConfig, (EGLNativeWindowType) VideoWindow, NULL); + eglSurface = eglCreateWindowSurface(eglDisplay, eglConfig, (EGLNativeWindowType)VideoWindow, NULL); if (eglSurface == EGL_NO_SURFACE) { Fatal(_("Could not create EGL surface!\n")); @@ -2020,8 +1996,7 @@ make_egl() /// /// @returns a new prepared cuvid hardware decoder. /// -static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) -{ +static CuvidDecoder *CuvidNewHwDecoder(VideoStream *stream) { CuvidDecoder *decoder; @@ -2039,8 +2014,9 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) Fatal("codec: can't allocate HW video codec context err %04x", i); } #endif -#if defined (VAAPI) && !defined (RASPI) - // if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, ":0.0" , NULL, 0)) != 0 ) { +#if defined(VAAPI) && !defined(RASPI) + // if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, + // ":0.0" , NULL, 0)) != 0 ) { if ((i = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, "/dev/dri/renderD128", NULL, 0)) != 0) { Fatal("codec: can't allocate HW video codec context err %04x", i); } @@ -2053,7 +2029,7 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) Error(_("video/cuvid: out of memory\n")); return NULL; } -#if defined (VAAPI) && !defined (RASPI) +#if defined(VAAPI) && !defined(RASPI) VaDisplay = TO_VAAPI_DEVICE_CTX(HwDeviceContext)->display; decoder->VaDisplay = VaDisplay; #endif @@ -2082,7 +2058,7 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) decoder->PixFmt = AV_PIX_FMT_NONE; decoder->Stream = stream; - if (!CuvidDecoderN) { // FIXME: hack sync on audio + if (!CuvidDecoderN) { // FIXME: hack sync on audio decoder->SyncOnAudio = 1; } decoder->Closing = -300 - 1; @@ -2098,8 +2074,7 @@ static CuvidDecoder *CuvidNewHwDecoder(VideoStream * stream) /// /// @param decoder CUVID hw decoder /// -static void CuvidCleanup(CuvidDecoder * decoder) -{ +static void CuvidCleanup(CuvidDecoder *decoder) { int i; Debug(3, "Cuvid Clean up\n"); @@ -2133,8 +2108,7 @@ static void CuvidCleanup(CuvidDecoder * decoder) /// /// @param decoder CUVID hw decoder /// -static void CuvidDelHwDecoder(CuvidDecoder * decoder) -{ +static void CuvidDelHwDecoder(CuvidDecoder *decoder) { int i; Debug(3, "cuvid del hw decoder \n"); @@ -2179,9 +2153,7 @@ static void CuvidDelHwDecoder(CuvidDecoder * decoder) Error(_("video/cuvid: decoder not in decoder list.\n")); } -static int CuvidGlxInit( __attribute__((unused)) - const char *display_name) -{ +static int CuvidGlxInit(__attribute__((unused)) const char *display_name) { #if !defined PLACEBO || defined PLACEBO_GL @@ -2207,8 +2179,7 @@ static int CuvidGlxInit( __attribute__((unused)) /// /// CUVID cleanup. /// -static void CuvidExit(void) -{ +static void CuvidExit(void) { int i; for (i = 0; i < CuvidDecoderN; ++i) { @@ -2220,7 +2191,6 @@ static void CuvidExit(void) CuvidDecoderN = 0; Debug(3, "CuvidExit\n"); - } /// @@ -2228,17 +2198,14 @@ static void CuvidExit(void) /// /// @param decoder CUVID hw decoder /// -static void CuvidUpdateOutput(CuvidDecoder * decoder) -{ +static void CuvidUpdateOutput(CuvidDecoder *decoder) { VideoUpdateOutput(decoder->InputAspect, decoder->InputWidth, decoder->InputHeight, decoder->Resolution, - decoder->VideoX, decoder->VideoY, decoder->VideoWidth, decoder->VideoHeight, &decoder->OutputX, - &decoder->OutputY, &decoder->OutputWidth, &decoder->OutputHeight, &decoder->CropX, &decoder->CropY, - &decoder->CropWidth, &decoder->CropHeight); - + decoder->VideoX, decoder->VideoY, decoder->VideoWidth, decoder->VideoHeight, &decoder->OutputX, + &decoder->OutputY, &decoder->OutputWidth, &decoder->OutputHeight, &decoder->CropX, + &decoder->CropY, &decoder->CropWidth, &decoder->CropHeight); } -void SDK_CHECK_ERROR_GL() -{ +void SDK_CHECK_ERROR_GL() { GLenum gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { @@ -2248,9 +2215,8 @@ void SDK_CHECK_ERROR_GL() #ifdef CUVID // copy image and process using CUDA -void generateCUDAImage(CuvidDecoder * decoder, int index, const AVFrame * frame, int image_width, int image_height, - int bytes) -{ +void generateCUDAImage(CuvidDecoder *decoder, int index, const AVFrame *frame, int image_width, int image_height, + int bytes) { int n; for (n = 0; n < 2; n++) { @@ -2259,7 +2225,7 @@ void generateCUDAImage(CuvidDecoder * decoder, int index, const AVFrame * frame, CUDA_MEMCPY2D cpy = { .srcMemoryType = CU_MEMORYTYPE_DEVICE, .dstMemoryType = CU_MEMORYTYPE_ARRAY, - .srcDevice = (CUdeviceptr) frame->data[n], + .srcDevice = (CUdeviceptr)frame->data[n], .srcPitch = frame->linesize[n], .srcY = 0, .dstArray = decoder->cu_array[index][n], @@ -2273,9 +2239,8 @@ void generateCUDAImage(CuvidDecoder * decoder, int index, const AVFrame * frame, #endif #ifdef PLACEBO -void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsigned int size_y, - enum AVPixelFormat PixFmt) -{ +void createTextureDst(CuvidDecoder *decoder, int anz, unsigned int size_x, unsigned int size_y, + enum AVPixelFormat PixFmt) { int n, i, size = 1, fd; const struct pl_fmt *fmt; struct pl_tex *tex; @@ -2285,51 +2250,47 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi SharedContext; // printf("Create textures and planes %d %d\n",size_x,size_y); Debug(3, "video/vulkan: create %d Textures Format %s w %d h %d \n", anz, - PixFmt == AV_PIX_FMT_NV12 ? "NV12" : "P010", size_x, size_y); + PixFmt == AV_PIX_FMT_NV12 ? "NV12" : "P010", size_x, size_y); - for (i = 0; i < anz; i++) { // number of texture + for (i = 0; i < anz; i++) { // number of texture if (decoder->frames[i]) { av_frame_free(&decoder->frames[i]); decoder->frames[i] = NULL; } - for (n = 0; n < 2; n++) { // number of planes + for (n = 0; n < 2; n++) { // number of planes bool ok = true; if (PixFmt == AV_PIX_FMT_NV12) { fmt = pl_find_named_fmt(p->gpu, n == 0 ? "r8" : "rg8"); // 8 Bit YUV size = 1; } else { - fmt = pl_find_named_fmt(p->gpu, n == 0 ? "r16" : "rg16"); // 10 Bit YUV + fmt = pl_find_named_fmt(p->gpu, n == 0 ? "r16" : "rg16"); // 10 Bit YUV size = 2; } if (decoder->pl_frames[i].planes[n].texture) { -// #ifdef VAAPI + // #ifdef VAAPI if (decoder->pl_frames[i].planes[n].texture->params.shared_mem.handle.fd) { close(decoder->pl_frames[i].planes[n].texture->params.shared_mem.handle.fd); } -// #endif - pl_tex_destroy(p->gpu, &decoder->pl_frames[i].planes[n].texture); // delete old texture + // #endif + pl_tex_destroy(p->gpu, + &decoder->pl_frames[i].planes[n].texture); // delete old texture } if (p->has_dma_buf == 0) { - decoder->pl_frames[i].planes[n].texture = pl_tex_create(p->gpu, &(struct pl_tex_params) { - .w = n == 0 ? size_x : size_x / 2, - .h = n == 0 ? size_y : size_y / 2, - .d = 0, - .format = fmt, - .sampleable = true, - .host_writable = true, - .blit_dst = true, + decoder->pl_frames[i].planes[n].texture = pl_tex_create( + p->gpu, &(struct pl_tex_params) { + .w = n == 0 ? size_x : size_x / 2, .h = n == 0 ? size_y : size_y / 2, .d = 0, .format = fmt, + .sampleable = true, .host_writable = true, .blit_dst = true, #if PL_API_VER < 159 - .sample_mode = PL_TEX_SAMPLE_LINEAR, - .address_mode = PL_TEX_ADDRESS_CLAMP, + .sample_mode = PL_TEX_SAMPLE_LINEAR, .address_mode = PL_TEX_ADDRESS_CLAMP, #endif #if !defined PLACEBO_GL .export_handle = PL_HANDLE_FD, #endif }); } - + // make planes for image pl = &decoder->pl_frames[i].planes[n]; pl->components = n == 0 ? 1 : 2; @@ -2341,7 +2302,7 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi pl->component_mapping[2] = -1; pl->component_mapping[3] = -1; } else { - pl->shift_x = -0.5f; // PL_CHROMA_LEFT + pl->shift_x = -0.5f; // PL_CHROMA_LEFT pl->component_mapping[0] = PL_CHANNEL_U; pl->component_mapping[1] = PL_CHANNEL_V; pl->component_mapping[2] = -1; @@ -2355,13 +2316,16 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi CUDA_EXTERNAL_MEMORY_HANDLE_DESC ext_desc = { .type = CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, .handle.fd = fd, - .size = decoder->pl_frames[i].planes[n].texture->shared_mem.size, // image_width * image_height * bytes, + .size = + decoder->pl_frames[i].planes[n].texture->shared_mem.size, // image_width * image_height * bytes, .flags = 0, }; - checkCudaErrors(cu->cuImportExternalMemory(&decoder->ebuf[i * 2 + n].mem, &ext_desc)); // Import Memory segment + checkCudaErrors( + cu->cuImportExternalMemory(&decoder->ebuf[i * 2 + n].mem, &ext_desc)); // Import Memory segment CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC tex_desc = { .offset = decoder->pl_frames[i].planes[n].texture->shared_mem.offset, - .arrayDesc = { + .arrayDesc = + { .Width = n == 0 ? size_x : size_x / 2, .Height = n == 0 ? size_y : size_y / 2, .Depth = 0, @@ -2372,7 +2336,7 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi .numLevels = 1, }; checkCudaErrors(cu->cuExternalMemoryGetMappedMipmappedArray(&decoder->ebuf[i * 2 + n].mma, - decoder->ebuf[i * 2 + n].mem, &tex_desc)); + decoder->ebuf[i * 2 + n].mem, &tex_desc)); checkCudaErrors(cu->cuMipmappedArrayGetLevel(&decoder->cu_array[i][n], decoder->ebuf[i * 2 + n].mma, 0)); #endif } @@ -2387,40 +2351,38 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi img->repr.sys = PL_COLOR_SYSTEM_BT_709; // overwritten later img->repr.levels = PL_COLOR_LEVELS_TV; img->repr.alpha = PL_ALPHA_UNKNOWN; - img->color.primaries = pl_color_primaries_guess(size_x, size_y); // Gammut overwritten later - img->color.transfer = PL_COLOR_TRC_BT_1886; // overwritten later - img->color.light = PL_COLOR_LIGHT_SCENE_709_1886; // needs config ??? - img->color.sig_peak = 0.0f; // needs config ???? + img->color.primaries = pl_color_primaries_guess(size_x, size_y); // Gammut overwritten later + img->color.transfer = PL_COLOR_TRC_BT_1886; // overwritten later + img->color.light = PL_COLOR_LIGHT_SCENE_709_1886; // needs config ??? + img->color.sig_peak = 0.0f; // needs config ???? img->color.sig_avg = 0.0f; img->num_overlays = 0; } NoContext; } - #ifdef VAAPI // copy image and process using CUDA -void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame, int image_width, int image_height) -{ +void generateVAAPIImage(CuvidDecoder *decoder, int index, const AVFrame *frame, int image_width, int image_height) { int n; VAStatus status; int toggle = 0; uint64_t first_time; VADRMPRIMESurfaceDescriptor desc; - + vaSyncSurface(decoder->VaDisplay, (unsigned int)frame->data[3]); status = vaExportSurfaceHandle(decoder->VaDisplay, (unsigned int)frame->data[3], VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, - VA_EXPORT_SURFACE_READ_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &desc); + VA_EXPORT_SURFACE_READ_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &desc); if (status != VA_STATUS_SUCCESS) { printf("Fehler beim export VAAPI Handle\n"); return; } - //vaSyncSurface(decoder->VaDisplay, (unsigned int)frame->data[3]); + // vaSyncSurface(decoder->VaDisplay, (unsigned int)frame->data[3]); Lock_and_SharedContext; - for (n = 0; n < 2; n++) { // Set DMA_BUF from VAAPI decoder to Textures + for (n = 0; n < 2; n++) { // Set DMA_BUF from VAAPI decoder to Textures int id = desc.layers[n].object_index[0]; int fd = desc.objects[id].fd; uint32_t size = desc.objects[id].size; @@ -2432,27 +2394,27 @@ void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame return; } - if (!size) { - size = n==0 ? desc.width * desc.height: desc.width * desc.height /2 ; - } - - -// fmt = pl_find_fourcc(p->gpu,desc.layers[n].drm_format); -#if 1 + if (!size) { + size = n == 0 ? desc.width * desc.height : desc.width * desc.height / 2; + } + + // fmt = pl_find_fourcc(p->gpu,desc.layers[n].drm_format); +#if 1 if (decoder->PixFmt == AV_PIX_FMT_NV12) { fmt = pl_find_named_fmt(p->gpu, n == 0 ? "r8" : "rg8"); // 8 Bit YUV } else { - fmt = pl_find_fourcc(p->gpu, n == 0 ? 0x20363152 : 0x32335247); // 10 Bit YUV + fmt = pl_find_fourcc(p->gpu, + n == 0 ? 0x20363152 : 0x32335247); // 10 Bit YUV } -#endif - +#endif + assert(fmt != NULL); #ifdef PLACEBO_GL fmt->fourcc = desc.layers[n].drm_format; #endif - + struct pl_tex_params tex_params = { - .w = n == 0 ? image_width : image_width / 2 , + .w = n == 0 ? image_width : image_width / 2, .h = n == 0 ? image_height : image_height / 2, .d = 0, .format = fmt, @@ -2461,12 +2423,14 @@ void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame .blit_dst = true, .renderable = true, #if PL_API_VER < 159 - .address_mode = PL_TEX_ADDRESS_CLAMP , + .address_mode = PL_TEX_ADDRESS_CLAMP, .sample_mode = PL_TEX_SAMPLE_LINEAR, #endif .import_handle = PL_HANDLE_DMA_BUF, - .shared_mem = (struct pl_shared_mem) { - .handle = { + .shared_mem = + (struct pl_shared_mem){ + .handle = + { .fd = fd, }, .size = size, @@ -2474,18 +2438,17 @@ void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame .stride_h = n == 0 ? image_height : image_height / 2, .stride_w = desc.layers[n].pitch[0], .drm_format_mod = desc.objects[id].drm_format_modifier, - }, + }, }; - // printf("vor create Object %d with fd %d import size %u offset %d %dx%d\n",id,fd,size,offset, tex_params.w,tex_params.h); + // printf("vor create Object %d with fd %d import size %u offset %d + // %dx%d\n",id,fd,size,offset, tex_params.w,tex_params.h); if (decoder->pl_frames[index].planes[n].texture) { pl_tex_destroy(p->gpu, &decoder->pl_frames[index].planes[n].texture); - } decoder->pl_frames[index].planes[n].texture = pl_tex_create(p->gpu, &tex_params); - } Unlock_and_NoContext; } @@ -2493,17 +2456,17 @@ void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame #else // no PLACEBO -void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsigned int size_y, - enum AVPixelFormat PixFmt) -{ +void createTextureDst(CuvidDecoder *decoder, int anz, unsigned int size_x, unsigned int size_y, + enum AVPixelFormat PixFmt) { int n, i; Debug(3, "video: create %d Textures Format %s w %d h %d \n", anz, PixFmt == AV_PIX_FMT_NV12 ? "NV12" : "P010", - size_x, size_y); + size_x, size_y); #ifdef USE_DRM - //set_video_mode(size_x,size_y); // switch Mode here (highly experimental) + // set_video_mode(size_x,size_y); // switch Mode here (highly + // experimental) #endif #ifdef CUVID @@ -2523,7 +2486,7 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi GlxCheck(); for (i = 0; i < anz; i++) { - for (n = 0; n < Planes; n++) { // number of planes + for (n = 0; n < Planes; n++) { // number of planes glBindTexture(GL_TEXTURE_2D, decoder->gl_textures[i * Planes + n]); GlxCheck(); @@ -2535,26 +2498,26 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi #ifdef RASPI if (PixFmt == AV_PIX_FMT_NV12) glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, n == 0 ? size_x : size_x / 2, n == 0 ? size_y : size_y / 2, 0, - GL_RED, GL_UNSIGNED_BYTE, NULL); + GL_RED, GL_UNSIGNED_BYTE, NULL); else glTexImage2D(GL_TEXTURE_2D, 0, GL_R16, n == 0 ? size_x : size_x / 2, n == 0 ? size_y : size_y / 2, 0, - GL_RED, GL_UNSIGNED_SHORT, NULL); + GL_RED, GL_UNSIGNED_SHORT, NULL); #else if (PixFmt == AV_PIX_FMT_NV12) glTexImage2D(GL_TEXTURE_2D, 0, n == 0 ? GL_R8 : GL_RG8, n == 0 ? size_x : size_x / 2, - n == 0 ? size_y : size_y / 2, 0, n == 0 ? GL_RED : GL_RG, GL_UNSIGNED_BYTE, NULL); + n == 0 ? size_y : size_y / 2, 0, n == 0 ? GL_RED : GL_RG, GL_UNSIGNED_BYTE, NULL); else glTexImage2D(GL_TEXTURE_2D, 0, n == 0 ? GL_R16 : GL_RG16, n == 0 ? size_x : size_x / 2, - n == 0 ? size_y : size_y / 2, 0, n == 0 ? GL_RED : GL_RG, GL_UNSIGNED_SHORT, NULL); + n == 0 ? size_y : size_y / 2, 0, n == 0 ? GL_RED : GL_RG, GL_UNSIGNED_SHORT, NULL); #endif SDK_CHECK_ERROR_GL(); // register this texture with CUDA #ifdef CUVID checkCudaErrors(cu->cuGraphicsGLRegisterImage(&decoder->cu_res[i][n], decoder->gl_textures[i * Planes + n], - GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD)); + GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD)); checkCudaErrors(cu->cuGraphicsMapResources(1, &decoder->cu_res[i][n], 0)); - checkCudaErrors(cu->cuGraphicsSubResourceGetMappedArray(&decoder->cu_array[i][n], decoder->cu_res[i][n], 0, - 0)); + checkCudaErrors( + cu->cuGraphicsSubResourceGetMappedArray(&decoder->cu_array[i][n], decoder->cu_res[i][n], 0, 0)); checkCudaErrors(cu->cuGraphicsUnmapResources(1, &decoder->cu_res[i][n], 0)); #endif } @@ -2571,44 +2534,41 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi #ifdef VAAPI #define MP_ARRAY_SIZE(s) (sizeof(s) / sizeof((s)[0])) -#define ADD_ATTRIB(name, value) \ - do { \ - assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \ - attribs[num_attribs++] = (name); \ - attribs[num_attribs++] = (value); \ - attribs[num_attribs] = EGL_NONE; \ - } while(0) - -#define ADD_PLANE_ATTRIBS(plane) do { \ - ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _FD_EXT, \ - desc.objects[desc.layers[n].object_index[plane]].fd); \ - ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _OFFSET_EXT, \ - desc.layers[n].offset[plane]); \ - ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _PITCH_EXT, \ - desc.layers[n].pitch[plane]); \ +#define ADD_ATTRIB(name, value) \ + do { \ + assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \ + attribs[num_attribs++] = (name); \ + attribs[num_attribs++] = (value); \ + attribs[num_attribs] = EGL_NONE; \ } while (0) -void generateVAAPIImage(CuvidDecoder * decoder, VASurfaceID index, const AVFrame * frame, int image_width, - int image_height) -{ +#define ADD_PLANE_ATTRIBS(plane) \ + do { \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE##plane##_FD_EXT, desc.objects[desc.layers[n].object_index[plane]].fd); \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE##plane##_OFFSET_EXT, desc.layers[n].offset[plane]); \ + ADD_ATTRIB(EGL_DMA_BUF_PLANE##plane##_PITCH_EXT, desc.layers[n].pitch[plane]); \ + } while (0) + +void generateVAAPIImage(CuvidDecoder *decoder, VASurfaceID index, const AVFrame *frame, int image_width, + int image_height) { VAStatus status; uint64_t first_time; -#if defined (VAAPI) && !defined (RASPI) +#if defined(VAAPI) && !defined(RASPI) VADRMPRIMESurfaceDescriptor desc; - vaSyncSurface(decoder->VaDisplay, (VASurfaceID) (uintptr_t) frame->data[3]); - status = - vaExportSurfaceHandle(decoder->VaDisplay, (VASurfaceID) (uintptr_t) frame->data[3], - VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, VA_EXPORT_SURFACE_READ_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, - &desc); + vaSyncSurface(decoder->VaDisplay, (VASurfaceID)(uintptr_t)frame->data[3]); + status = vaExportSurfaceHandle(decoder->VaDisplay, (VASurfaceID)(uintptr_t)frame->data[3], + VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, + VA_EXPORT_SURFACE_READ_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &desc); if (status != VA_STATUS_SUCCESS) { printf("Fehler beim export VAAPI Handle\n"); return; } - //vaSyncSurface(decoder->VaDisplay, (VASurfaceID) (uintptr_t) frame->data[3]); + // vaSyncSurface(decoder->VaDisplay, (VASurfaceID) (uintptr_t) + // frame->data[3]); #endif #ifdef RASPI AVDRMFrameDescriptor desc; @@ -2621,11 +2581,11 @@ void generateVAAPIImage(CuvidDecoder * decoder, VASurfaceID index, const AVFrame EglCheck(); for (int n = 0; n < Planes; n++) { - int attribs[20] = { EGL_NONE }; + int attribs[20] = {EGL_NONE}; uint num_attribs = 0; int fd; -#if defined (VAAPI) && !defined (RASPI) +#if defined(VAAPI) && !defined(RASPI) ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, desc.layers[n].drm_format); ADD_ATTRIB(EGL_WIDTH, n == 0 ? image_width : image_width / 2); ADD_ATTRIB(EGL_HEIGHT, n == 0 ? image_height : image_height / 2); @@ -2642,13 +2602,13 @@ void generateVAAPIImage(CuvidDecoder * decoder, VASurfaceID index, const AVFrame EGLImageTargetTexture2DOES(GL_TEXTURE_2D, decoder->images[index * Planes + n]); decoder->fds[index * Planes + n] = desc.objects[n].fd; } - + glBindTexture(GL_TEXTURE_2D, 0); eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); EglCheck(); return; - esh_failed: +esh_failed: Debug(3, "Failure in generateVAAPIImage\n"); for (int n = 0; n < Planes; n++) close(desc.objects[n].fd); @@ -2663,40 +2623,37 @@ void generateVAAPIImage(CuvidDecoder * decoder, VASurfaceID index, const AVFrame /// /// @param decoder CUVID hw decoder /// -static void CuvidSetupOutput(CuvidDecoder * decoder) -{ +static void CuvidSetupOutput(CuvidDecoder *decoder) { // FIXME: need only to create and destroy surfaces for size changes - // or when number of needed surfaces changed! + // or when number of needed surfaces changed! decoder->Resolution = VideoResolutionGroup(decoder->InputWidth, decoder->InputHeight, decoder->Interlaced); CuvidCreateSurfaces(decoder, decoder->InputWidth, decoder->InputHeight, decoder->PixFmt); - CuvidUpdateOutput(decoder); // update aspect/scaling + CuvidUpdateOutput(decoder); // update aspect/scaling window_width = decoder->OutputWidth; window_height = decoder->OutputHeight; } /// -/// Get a free surface. Called from ffmpeg. +/// Get a free surface. Called from ffmpeg. /// /// @param decoder CUVID hw decoder -/// @param video_ctx ffmpeg video codec context +/// @param video_ctx ffmpeg video codec context /// /// @returns the oldest free surface /// -static unsigned CuvidGetVideoSurface(CuvidDecoder * decoder, const AVCodecContext * video_ctx) -{ +static unsigned CuvidGetVideoSurface(CuvidDecoder *decoder, const AVCodecContext *video_ctx) { (void)video_ctx; return CuvidGetVideoSurface0(decoder); } -#if defined (VAAPI) || defined (YADIF) -static void CuvidSyncRenderFrame(CuvidDecoder * decoder, const AVCodecContext * video_ctx, AVFrame * frame); +#if defined(VAAPI) || defined(YADIF) +static void CuvidSyncRenderFrame(CuvidDecoder *decoder, const AVCodecContext *video_ctx, AVFrame *frame); -int push_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * frame) -{ +int push_filters(AVCodecContext *dec_ctx, CuvidDecoder *decoder, AVFrame *frame) { int ret; AVFrame *filt_frame = av_frame_alloc(); @@ -2706,23 +2663,23 @@ int push_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * fra av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); } - // printf("Interlaced %d tff %d\n",frame->interlaced_frame,frame->top_field_first); + // printf("Interlaced %d tff + // %d\n",frame->interlaced_frame,frame->top_field_first); /* pull filtered frames from the filtergraph */ while ((ret = av_buffersink_get_frame(decoder->buffersink_ctx, filt_frame)) >= 0) { filt_frame->pts /= 2; decoder->Interlaced = 0; - // printf("vaapideint video:new %#012" PRIx64 " old %#012" PRIx64 "\n",filt_frame->pts,frame->pts); + // printf("vaapideint video:new %#012" PRIx64 " old %#012" PRIx64 + // "\n",filt_frame->pts,frame->pts); CuvidSyncRenderFrame(decoder, dec_ctx, filt_frame); - filt_frame = av_frame_alloc(); // get new frame - + filt_frame = av_frame_alloc(); // get new frame } av_frame_free(&filt_frame); av_frame_free(&frame); return ret; } -int init_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * frame) -{ +int init_filters(AVCodecContext *dec_ctx, CuvidDecoder *decoder, AVFrame *frame) { enum AVPixelFormat format = PIXEL_FORMAT; #ifdef VAAPI @@ -2730,7 +2687,7 @@ int init_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * fra #endif #ifdef YADIF const char *filters_descr = "yadif_cuda=1:0:1"; // mode=send_field,parity=tff,deint=interlaced"; - enum AVPixelFormat pix_fmts[] = { format, AV_PIX_FMT_NONE }; + enum AVPixelFormat pix_fmts[] = {format, AV_PIX_FMT_NONE}; #endif char args[512]; @@ -2750,9 +2707,10 @@ int init_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * fra goto end; } - /* buffer video source: the decoded frames from the decoder will be inserted here. */ + /* buffer video source: the decoded frames from the decoder will be inserted + * here. */ snprintf(args, sizeof(args), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", dec_ctx->width, - dec_ctx->height, format, 1, 90000, dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); + dec_ctx->height, format, 1, 90000, dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); ret = avfilter_graph_create_filter(&decoder->buffersrc_ctx, buffersrc, "in", args, NULL, decoder->filter_graph); if (ret < 0) { @@ -2770,7 +2728,8 @@ int init_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * fra src_params->frame_rate.den = 1; src_params->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; - // printf("width %d height %d hw_frames_ctx %p\n",dec_ctx->width,dec_ctx->height ,frame->hw_frames_ctx); + // printf("width %d height %d hw_frames_ctx + // %p\n",dec_ctx->width,dec_ctx->height ,frame->hw_frames_ctx); ret = av_buffersrc_parameters_set(decoder->buffersrc_ctx, src_params); if (ret < 0) { Debug(3, "Cannot set hw_frames_ctx to src\n"); @@ -2826,7 +2785,7 @@ int init_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * fra goto end; } - end: +end: avfilter_inout_free(&inputs); avfilter_inout_free(&outputs); @@ -2835,8 +2794,7 @@ int init_filters(AVCodecContext * dec_ctx, CuvidDecoder * decoder, AVFrame * fra #endif #ifdef VAAPI -static int init_generic_hwaccel(CuvidDecoder * decoder, enum AVPixelFormat hw_fmt, AVCodecContext * video_ctx) -{ +static int init_generic_hwaccel(CuvidDecoder *decoder, enum AVPixelFormat hw_fmt, AVCodecContext *video_ctx) { AVBufferRef *new_frames_ctx = NULL; @@ -2857,11 +2815,11 @@ static int init_generic_hwaccel(CuvidDecoder * decoder, enum AVPixelFormat hw_fm AVHWFramesContext *old_fctx = (void *)decoder->cached_hw_frames_ctx->data; Debug(3, "CMP %d:%d %d:%d %d:%d %d:%d %d:%d\,", new_fctx->format, old_fctx->format, new_fctx->sw_format, - old_fctx->sw_format, new_fctx->width, old_fctx->width, new_fctx->height, old_fctx->height, - new_fctx->initial_pool_size, old_fctx->initial_pool_size); - if (new_fctx->format != old_fctx->format || new_fctx->sw_format != old_fctx->sw_format - || new_fctx->width != old_fctx->width || new_fctx->height != old_fctx->height - || new_fctx->initial_pool_size != old_fctx->initial_pool_size) { + old_fctx->sw_format, new_fctx->width, old_fctx->width, new_fctx->height, old_fctx->height, + new_fctx->initial_pool_size, old_fctx->initial_pool_size); + if (new_fctx->format != old_fctx->format || new_fctx->sw_format != old_fctx->sw_format || + new_fctx->width != old_fctx->width || new_fctx->height != old_fctx->height || + new_fctx->initial_pool_size != old_fctx->initial_pool_size) { Debug(3, "delete old cache"); if (decoder->filter_graph) avfilter_graph_free(&decoder->filter_graph); @@ -2887,7 +2845,7 @@ static int init_generic_hwaccel(CuvidDecoder * decoder, enum AVPixelFormat hw_fm av_buffer_unref(&new_frames_ctx); return 0; - error: +error: Debug(3, "Error with hwframes\n"); av_buffer_unref(&new_frames_ctx); av_buffer_unref(&decoder->cached_hw_frames_ctx); @@ -2897,21 +2855,20 @@ static int init_generic_hwaccel(CuvidDecoder * decoder, enum AVPixelFormat hw_fm /// /// Callback to negotiate the PixelFormat. /// -/// @param fmt is the list of formats which are supported by the codec, -/// it is terminated by -1 as 0 is a valid format, the -/// formats are ordered by quality. +/// @param fmt is the list of formats which are supported by the codec, +/// it is terminated by -1 as 0 is a valid format, the +/// formats are ordered by quality. /// -static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContext * video_ctx, - const enum AVPixelFormat *fmt) -{ +static enum AVPixelFormat Cuvid_get_format(CuvidDecoder *decoder, AVCodecContext *video_ctx, + const enum AVPixelFormat *fmt) { const enum AVPixelFormat *fmt_idx; int bitformat16 = 0, deint = 0; VideoDecoder *ist = video_ctx->opaque; // - // look through formats + // look through formats // Debug(3, "%s: codec %d fmts:\n", __FUNCTION__, video_ctx->codec_id); for (fmt_idx = fmt; *fmt_idx != AV_PIX_FMT_NONE; fmt_idx++) { @@ -2946,7 +2903,7 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex Fatal(_("video: no valid profile found\n")); } - // decoder->newchannel = 1; + // decoder->newchannel = 1; #ifdef VAAPI init_generic_hwaccel(decoder, PIXEL_FORMAT, video_ctx); #endif @@ -2956,25 +2913,25 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex ist->GetFormatDone = 1; - Debug(3, "video: create decoder 16bit?=%d %dx%d old %d %d\n", bitformat16, video_ctx->width, video_ctx->height, - decoder->InputWidth, decoder->InputHeight); + Debug(3, "video: create decoder 16bit?=%d %dx%d old %d %d\n", bitformat16, video_ctx->width, video_ctx->height, + decoder->InputWidth, decoder->InputHeight); - if (*fmt_idx == PIXEL_FORMAT) { // HWACCEL used + if (*fmt_idx == PIXEL_FORMAT) { // HWACCEL used // Check image, format, size // if (bitformat16) { - decoder->PixFmt = AV_PIX_FMT_YUV420P; // 10 Bit Planar + decoder->PixFmt = AV_PIX_FMT_YUV420P; // 10 Bit Planar ist->hwaccel_output_format = AV_PIX_FMT_YUV420P; } else { - decoder->PixFmt = AV_PIX_FMT_NV12; // 8 Bit Planar + decoder->PixFmt = AV_PIX_FMT_NV12; // 8 Bit Planar ist->hwaccel_output_format = AV_PIX_FMT_NV12; } - if ((video_ctx->width != decoder->InputWidth || video_ctx->height != decoder->InputHeight) - && decoder->TrickSpeed == 0) { + if ((video_ctx->width != decoder->InputWidth || video_ctx->height != decoder->InputHeight) && + decoder->TrickSpeed == 0) { - // if (decoder->TrickSpeed == 0) { + // if (decoder->TrickSpeed == 0) { #ifdef PLACEBO VideoThreadLock(); #endif @@ -2993,13 +2950,13 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex #ifdef YADIF if (VideoDeinterlace[decoder->Resolution] == VideoDeinterlaceYadif) { deint = 0; - ist->filter = 1; // init yadif_cuda + ist->filter = 1; // init yadif_cuda } else { deint = 2; ist->filter = 0; } CuvidMessage(2, "deint = %s\n", deint == 0 ? "Yadif" : "Cuda"); - if (av_opt_set_int(video_ctx->priv_data, "deint", deint, 0) < 0) { // adaptive + if (av_opt_set_int(video_ctx->priv_data, "deint", deint, 0) < 0) { // adaptive Fatal(_("codec: can't set option deint to video codec!\n")); } #endif @@ -3012,7 +2969,7 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex decoder->PTS = AV_NOPTS_VALUE; VideoDeltaPTS = 0; decoder->InputAspect = video_ctx->sample_aspect_ratio; - CuvidUpdateOutput(decoder); // update aspect/scaling + CuvidUpdateOutput(decoder); // update aspect/scaling } CuvidMessage(2, "GetFormat Init ok %dx%d\n", video_ctx->width, video_ctx->height); @@ -3020,8 +2977,8 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex #ifdef CUVID ist->active_hwaccel_id = HWACCEL_CUVID; #else - if (VideoDeinterlace[decoder->Resolution]) // need deinterlace - ist->filter = 1; // init deint vaapi + if (VideoDeinterlace[decoder->Resolution]) // need deinterlace + ist->filter = 1; // init deint vaapi else ist->filter = 0; @@ -3035,23 +2992,22 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex } #ifdef USE_GRAB -void swapc(unsigned char *x, unsigned char *y) -{ +void swapc(unsigned char *x, unsigned char *y) { unsigned char temp = *x; + *x = *y; *y = temp; } + #ifdef PLACEBO -int get_RGB(CuvidDecoder * decoder, struct pl_overlay *ovl) -{ +int get_RGB(CuvidDecoder *decoder, struct pl_overlay *ovl) { #else -int get_RGB(CuvidDecoder * decoder) -{ +int get_RGB(CuvidDecoder *decoder) { #endif #ifdef PLACEBO struct pl_render_params render_params = pl_render_default_params; - struct pl_frame target = { 0 }; + struct pl_frame target = {0}; const struct pl_fmt *fmt; int offset, x1, y1, x0, y0; @@ -3097,7 +3053,7 @@ int get_RGB(CuvidDecoder * decoder) GlxCheck(); if (gl_prog == 0) - gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm + gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm glUseProgram(gl_prog); texLoc = glGetUniformLocation(gl_prog, "texture0"); @@ -3129,7 +3085,7 @@ int get_RGB(CuvidDecoder * decoder) if (OsdShown == 1) { if (OSDtexture) glDeleteTextures(1, &OSDtexture); -// pthread_mutex_lock(&OSDMutex); + // pthread_mutex_lock(&OSDMutex); glGenTextures(1, &OSDtexture); glBindTexture(GL_TEXTURE_2D, OSDtexture); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, OSDxsize, OSDysize, 0, GL_RGBA, GL_UNSIGNED_BYTE, posd); @@ -3137,7 +3093,7 @@ int get_RGB(CuvidDecoder * decoder) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); -// pthread_mutex_unlock(&OSDMutex); + // pthread_mutex_unlock(&OSDMutex); OsdShown = 2; } @@ -3158,15 +3114,14 @@ int get_RGB(CuvidDecoder * decoder) glActiveTexture(GL_TEXTURE0); -// pthread_mutex_lock(&OSDMutex); + // pthread_mutex_lock(&OSDMutex); glBindTexture(GL_TEXTURE_2D, OSDtexture); glBindFramebuffer(GL_FRAMEBUFFER, fb); render_pass_quad(0, 0.0, 0.0); -// pthread_mutex_unlock(&OSDMutex); + // pthread_mutex_unlock(&OSDMutex); glUseProgram(0); glActiveTexture(GL_TEXTURE0); - } glFlush(); Debug(3, "Read pixels %d %d\n", width, height); @@ -3185,12 +3140,13 @@ int get_RGB(CuvidDecoder * decoder) faktorx = (float)width / (float)VideoWindowWidth; faktory = (float)height / (float)VideoWindowHeight; #ifdef PLACEBO_GL - fmt = pl_find_named_fmt(p->gpu, "rgba8"); // bgra8 not supported + fmt = pl_find_named_fmt(p->gpu, "rgba8"); // bgra8 not supported #else - fmt = pl_find_named_fmt(p->gpu, "bgra8"); + fmt = pl_find_named_fmt(p->gpu, "bgra8"); #endif #if PL_API_VER < 159 - target.fbo = pl_tex_create(p->gpu, &(struct pl_tex_params) { + target.fbo = pl_tex_create( + p->gpu, &(struct pl_tex_params) { #else target.num_planes = 1; target.planes[0].components = 4; @@ -3198,23 +3154,17 @@ int get_RGB(CuvidDecoder * decoder) target.planes[0].component_mapping[1] = PL_CHANNEL_G; target.planes[0].component_mapping[2] = PL_CHANNEL_B; target.planes[0].component_mapping[3] = PL_CHANNEL_A; - target.planes[0].texture = pl_tex_create(p->gpu, &(struct pl_tex_params) { + target.planes[0].texture = pl_tex_create( + p->gpu, &(struct pl_tex_params) { #endif - .w = width, - .h = height, - .d = 0, - .format = fmt, - .sampleable = true, - .renderable = true, - .blit_dst = true, + .w = width, .h = height, .d = 0, .format = fmt, .sampleable = true, .renderable = true, .blit_dst = true, .host_readable = true, -#if PL_API_VER < 159 - .sample_mode = PL_TEX_SAMPLE_LINEAR, - .address_mode = PL_TEX_ADDRESS_CLAMP, +#if PL_API_VER < 159 + .sample_mode = PL_TEX_SAMPLE_LINEAR, .address_mode = PL_TEX_ADDRESS_CLAMP, #endif }); -#if PL_API_VER >= 100 +#if PL_API_VER >= 100 target.crop.x0 = (float)decoder->OutputX * faktorx; target.crop.y0 = (float)decoder->OutputY * faktory; target.crop.x1 = (float)(decoder->OutputX + decoder->OutputWidth) * faktorx; @@ -3245,19 +3195,19 @@ int get_RGB(CuvidDecoder * decoder) y1 = ovl->rect.y0; x1 = ovl->rect.x1; y0 = ovl->rect.y1; - ovl->rect.x0 = (float)x0 *faktorx; - ovl->rect.y0 = (float)y0 *faktory; - ovl->rect.x1 = (float)x1 *faktorx; - ovl->rect.y1 = (float)y1 *faktory; + ovl->rect.x0 = (float)x0 * faktorx; + ovl->rect.y0 = (float)y0 * faktory; + ovl->rect.x1 = (float)x1 * faktorx; + ovl->rect.y1 = (float)y1 * faktory; #else - x0 = ovl->rect.x0; + x0 = ovl->rect.x0; y0 = ovl->rect.y0; x1 = ovl->rect.x1; y1 = ovl->rect.y1; - ovl->rect.x0 = (float)x0 *faktorx; - ovl->rect.y0 = (float)y0 *faktory; - ovl->rect.x1 = (float)x1 *faktorx; - ovl->rect.y1 = (float)y1 *faktory; + ovl->rect.x0 = (float)x0 * faktorx; + ovl->rect.y0 = (float)y0 * faktory; + ovl->rect.x1 = (float)x1 * faktorx; + ovl->rect.y1 = (float)y1 * faktory; #endif } else { @@ -3278,18 +3228,19 @@ int get_RGB(CuvidDecoder * decoder) ovl->rect.x1 = x1; ovl->rect.y1 = y0; #else - ovl->rect.x0 = x0; + ovl->rect.x0 = x0; ovl->rect.y0 = y0; ovl->rect.x1 = x1; ovl->rect.y1 = y1; #endif } - pl_tex_download(p->gpu, &(struct pl_tex_transfer_params) { // download Data + pl_tex_download( + p->gpu, &(struct pl_tex_transfer_params) { // download Data #if PL_API_VER < 159 .tex = target.fbo, #else - .tex = target.planes[0].texture, + .tex = target.planes[0].texture, #endif .ptr = base, }); @@ -3299,9 +3250,10 @@ int get_RGB(CuvidDecoder * decoder) pl_tex_destroy(p->gpu, &target.planes[0].texture); #endif #ifdef PLACEBO_GL - unsigned char *b = base; - for (int i = 0 ; i < width * height * 4; i+=4) - swapc(&b[i+0],&b[i+2]); + unsigned char *b = base; + + for (int i = 0; i < width * height * 4; i += 4) + swapc(&b[i + 0], &b[i + 2]); #endif #endif return 0; @@ -3311,11 +3263,10 @@ int get_RGB(CuvidDecoder * decoder) /// Grab output surface already locked. /// /// @param ret_size[out] size of allocated surface copy -/// @param ret_width[in,out] width of output -/// @param ret_height[in,out] height of output +/// @param ret_width[in,out] width of output +/// @param ret_height[in,out] height of output /// -static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int *ret_height, int mitosd) -{ +static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int *ret_height, int mitosd) { uint32_t size; uint32_t width; uint32_t height; @@ -3324,12 +3275,12 @@ static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int CuvidDecoder *decoder; decoder = CuvidDecoders[0]; - if (decoder == NULL) // no video aktiv + if (decoder == NULL) // no video aktiv return NULL; - // surface = CuvidSurfacesRb[CuvidOutputSurfaceIndex]; + // surface = CuvidSurfacesRb[CuvidOutputSurfaceIndex]; - // get real surface size + // get real surface size #ifdef PLACEBO width = decoder->VideoWidth; height = decoder->VideoHeight; @@ -3346,7 +3297,7 @@ static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int source_rect.y1 = height; if (ret_width && ret_height) { - if (*ret_width <= -64) { // this is an Atmo grab service request + if (*ret_width <= -64) { // this is an Atmo grab service request int overscan; // calculate aspect correct size of analyze image @@ -3390,7 +3341,7 @@ static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int decoder->grab = 1; while (decoder->grab) { - usleep(1000); // wait for data + usleep(1000); // wait for data } // Debug(3,"got grab data\n"); @@ -3413,11 +3364,10 @@ static uint8_t *CuvidGrabOutputSurfaceLocked(int *ret_size, int *ret_width, int /// Grab output surface. /// /// @param ret_size[out] size of allocated surface copy -/// @param ret_width[in,out] width of output -/// @param ret_height[in,out] height of output +/// @param ret_width[in,out] width of output +/// @param ret_height[in,out] height of output /// -static uint8_t *CuvidGrabOutputSurface(int *ret_size, int *ret_width, int *ret_height, int mitosd) -{ +static uint8_t *CuvidGrabOutputSurface(int *ret_size, int *ret_width, int *ret_height, int mitosd) { uint8_t *img; img = CuvidGrabOutputSurfaceLocked(ret_size, ret_width, ret_height, mitosd); @@ -3435,8 +3385,7 @@ static uint8_t *CuvidGrabOutputSurface(int *ret_size, int *ret_width, int *ret_h /// /// @note we can't mix software and hardware decoder surfaces /// -static void CuvidQueueVideoSurface(CuvidDecoder * decoder, int surface, int softdec) -{ +static void CuvidQueueVideoSurface(CuvidDecoder *decoder, int surface, int softdec) { int old; ++decoder->FrameCounter; @@ -3444,7 +3393,7 @@ static void CuvidQueueVideoSurface(CuvidDecoder * decoder, int surface, int soft // can't wait for output queue empty if (atomic_read(&decoder->SurfacesFilled) >= VIDEO_SURFACES_MAX) { Warning(_("video/cuvid: output buffer full, dropping frame (%d/%d)\n"), ++decoder->FramesDropped, - decoder->FrameCounter); + decoder->FrameCounter); if (!(decoder->FramesDisplayed % 300)) { CuvidPrintFrames(decoder); } @@ -3488,11 +3437,10 @@ extern void cudaLaunchNV12toARGBDrv(uint32_t * d_srcNV12, size_t nSourcePitch, u /// Render a ffmpeg frame. /// /// @param decoder CUVID hw decoder -/// @param video_ctx ffmpeg video codec context +/// @param video_ctx ffmpeg video codec context /// @param frame frame to display /// -static void CuvidRenderFrame(CuvidDecoder * decoder, const AVCodecContext * video_ctx, AVFrame * frame) -{ +static void CuvidRenderFrame(CuvidDecoder *decoder, const AVCodecContext *video_ctx, AVFrame *frame) { uint64_t first_time; int surface; enum AVColorSpace color; @@ -3511,64 +3459,68 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, const AVCodecContext * vide Debug(3, "video/cuvid: aspect ratio changed\n"); decoder->InputAspect = frame->sample_aspect_ratio; - // printf("new aspect %d:%d\n",frame->sample_aspect_ratio.num,frame->sample_aspect_ratio.den); + // printf("new aspect + // %d:%d\n",frame->sample_aspect_ratio.num,frame->sample_aspect_ratio.den); CuvidUpdateOutput(decoder); } - -// printf("Orig colorspace %d Primaries %d TRC %d ------- ",frame->colorspace,frame->color_primaries,frame->color_trc); - - // Fix libav colorspace failure - color = frame->colorspace; - if (color == AVCOL_SPC_UNSPECIFIED) // failure with RTL HD and all SD channels with vaapi - if (frame->width > 720) - color = AVCOL_SPC_BT709; - else - color = AVCOL_SPC_BT470BG; - if (color == AVCOL_SPC_RGB) // Cuvid decoder failure with SD channels - color = AVCOL_SPC_BT470BG; + // printf("Orig colorspace %d Primaries %d TRC %d ------- + //",frame->colorspace,frame->color_primaries,frame->color_trc); + + // Fix libav colorspace failure + color = frame->colorspace; + if (color == AVCOL_SPC_UNSPECIFIED) // failure with RTL HD and all SD channels + // with vaapi + if (frame->width > 720) + color = AVCOL_SPC_BT709; + else + color = AVCOL_SPC_BT470BG; + if (color == AVCOL_SPC_RGB) // Cuvid decoder failure with SD channels + color = AVCOL_SPC_BT470BG; frame->colorspace = color; - - // Fix libav Color primaries failures - if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED) // failure with RTL HD and all SD channels with vaapi - if (frame->width > 720) - frame->color_primaries = AVCOL_PRI_BT709; - else - frame->color_primaries = AVCOL_PRI_BT470BG; - if (frame->color_primaries == AVCOL_PRI_RESERVED0) // cuvid decoder failure with SD channels - frame->color_primaries = AVCOL_PRI_BT470BG; - - // Fix libav Color TRC failures - if (frame->color_trc == AVCOL_TRC_UNSPECIFIED) // failure with RTL HD and all SD channels with vaapi - if (frame->width > 720) - frame->color_trc = AVCOL_TRC_BT709; - else - frame->color_trc = AVCOL_TRC_SMPTE170M; - if (frame->color_trc == AVCOL_TRC_RESERVED0) // cuvid decoder failure with SD channels - frame->color_trc = AVCOL_TRC_SMPTE170M; - -// printf("Patched colorspace %d Primaries %d TRC %d\n",frame->colorspace,frame->color_primaries,frame->color_trc); + + // Fix libav Color primaries failures + if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED) // failure with RTL HD and all SD channels with + // vaapi + if (frame->width > 720) + frame->color_primaries = AVCOL_PRI_BT709; + else + frame->color_primaries = AVCOL_PRI_BT470BG; + if (frame->color_primaries == AVCOL_PRI_RESERVED0) // cuvid decoder failure with SD channels + frame->color_primaries = AVCOL_PRI_BT470BG; + + // Fix libav Color TRC failures + if (frame->color_trc == AVCOL_TRC_UNSPECIFIED) // failure with RTL HD and all + // SD channels with vaapi + if (frame->width > 720) + frame->color_trc = AVCOL_TRC_BT709; + else + frame->color_trc = AVCOL_TRC_SMPTE170M; + if (frame->color_trc == AVCOL_TRC_RESERVED0) // cuvid decoder failure with SD channels + frame->color_trc = AVCOL_TRC_SMPTE170M; + +// printf("Patched colorspace %d Primaries %d TRC +// %d\n",frame->colorspace,frame->color_primaries,frame->color_trc); #ifdef RASPI // - // Check image, format, size + // Check image, format, size // - if ( // decoder->PixFmt != video_ctx->pix_fmt + if ( // decoder->PixFmt != video_ctx->pix_fmt video_ctx->width != decoder->InputWidth -// || decoder->ColorSpace != color + // || decoder->ColorSpace != color || video_ctx->height != decoder->InputHeight) { - Debug(3, "fmt %02d:%02d width %d:%d hight %d:%d\n", decoder->ColorSpace, frame->colorspace, video_ctx->width, - decoder->InputWidth, video_ctx->height, decoder->InputHeight); + Debug(3, "fmt %02d:%02d width %d:%d hight %d:%d\n", decoder->ColorSpace, frame->colorspace, video_ctx->width, + decoder->InputWidth, video_ctx->height, decoder->InputHeight); decoder->PixFmt = AV_PIX_FMT_NV12; decoder->InputWidth = video_ctx->width; decoder->InputHeight = video_ctx->height; CuvidCleanup(decoder); decoder->SurfacesNeeded = VIDEO_SURFACES_MAX + 1; CuvidSetupOutput(decoder); - } #endif // - // Copy data from frame to image + // Copy data from frame to image // #ifdef RASPI if (video_ctx->pix_fmt == 0) { @@ -3578,22 +3530,22 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, const AVCodecContext * vide int w = decoder->InputWidth; int h = decoder->InputHeight; - decoder->ColorSpace = color; // save colorspace + decoder->ColorSpace = color; // save colorspace decoder->trc = frame->color_trc; decoder->color_primaries = frame->color_primaries; surface = CuvidGetVideoSurface0(decoder); - if (surface == -1) { // no free surfaces + if (surface == -1) { // no free surfaces Debug(3, "no more surfaces\n"); av_frame_free(&frame); return; } -#if defined (VAAPI) && defined (PLACEBO) - if (p->has_dma_buf) { // Vulkan supports DMA_BUF no copy required +#if defined(VAAPI) && defined(PLACEBO) + if (p->has_dma_buf) { // Vulkan supports DMA_BUF no copy required generateVAAPIImage(decoder, surface, frame, w, h); - } else { // we need to Copy the frame via RAM + } else { // we need to Copy the frame via RAM AVFrame *output; VideoThreadLock(); @@ -3601,25 +3553,26 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, const AVCodecContext * vide output = av_frame_alloc(); av_hwframe_transfer_data(output, frame, 0); av_frame_copy_props(output, frame); - // printf("Save Surface ID %d %p %p\n",surface,decoder->pl_frames[surface].planes[0].texture,decoder->pl_frames[surface].planes[1].texture); - bool ok = pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params) { - .tex = decoder->pl_frames[surface].planes[0].texture, - .stride_w = output->linesize[0], - .stride_h = h, - .ptr = output->data[0], - .rc.x1 = w, - .rc.y1 = h, - .rc.z1 = 0, - }); - ok &= pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params) { - .tex = decoder->pl_frames[surface].planes[1].texture, - .stride_w = output->linesize[0] / 2, - .stride_h = h / 2, - .ptr = output->data[1], - .rc.x1 = w / 2, - .rc.y1 = h / 2, - .rc.z1 = 0, - }); + // printf("Save Surface ID %d %p + // %p\n",surface,decoder->pl_frames[surface].planes[0].texture,decoder->pl_frames[surface].planes[1].texture); + bool ok = pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params){ + .tex = decoder->pl_frames[surface].planes[0].texture, + .stride_w = output->linesize[0], + .stride_h = h, + .ptr = output->data[0], + .rc.x1 = w, + .rc.y1 = h, + .rc.z1 = 0, + }); + ok &= pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params){ + .tex = decoder->pl_frames[surface].planes[1].texture, + .stride_w = output->linesize[0] / 2, + .stride_h = h / 2, + .ptr = output->data[1], + .rc.x1 = w / 2, + .rc.y1 = h / 2, + .rc.z1 = 0, + }); av_frame_free(&output); VideoThreadUnlock(); } @@ -3636,10 +3589,10 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, const AVCodecContext * vide CuvidQueueVideoSurface(decoder, surface, 1); decoder->frames[surface] = frame; return; - } - // Debug(3,"video/cuvid: pixel format %d not supported\n", video_ctx->pix_fmt); + // Debug(3,"video/cuvid: pixel format %d not supported\n", + // video_ctx->pix_fmt); av_frame_free(&frame); return; } @@ -3649,8 +3602,7 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, const AVCodecContext * vide /// /// @param decoder CUVID hw decoder /// -static void *CuvidGetHwAccelContext(CuvidDecoder * decoder) -{ +static void *CuvidGetHwAccelContext(CuvidDecoder *decoder) { unsigned int version, ret; Debug(3, "Initializing cuvid hwaccel thread ID:%ld\n", (long int)syscall(186)); @@ -3670,7 +3622,7 @@ static void *CuvidGetHwAccelContext(CuvidDecoder * decoder) } checkCudaErrors(cu->cuInit(0)); - checkCudaErrors(cu->cuCtxCreate(&decoder->cuda_ctx, (unsigned int)CU_CTX_SCHED_BLOCKING_SYNC, (CUdevice) 0)); + checkCudaErrors(cu->cuCtxCreate(&decoder->cuda_ctx, (unsigned int)CU_CTX_SCHED_BLOCKING_SYNC, (CUdevice)0)); if (decoder->cuda_ctx == NULL) Fatal(_("Kein Cuda device gefunden")); @@ -3679,7 +3631,6 @@ static void *CuvidGetHwAccelContext(CuvidDecoder * decoder) // Debug(3, "***********CUDA API Version %d\n", version); #endif return NULL; - } /// @@ -3690,13 +3641,11 @@ static void *CuvidGetHwAccelContext(CuvidDecoder * decoder) /// @FIXME: render only video area, not fullscreen! /// decoder->Output.. isn't correct setup for radio stations /// -static void CuvidBlackSurface( __attribute__((unused)) CuvidDecoder * decoder) -{ +static void CuvidBlackSurface(__attribute__((unused)) CuvidDecoder *decoder) { #ifndef PLACEBO glClear(GL_COLOR_BUFFER_BIT); #endif return; - } /// @@ -3704,8 +3653,7 @@ static void CuvidBlackSurface( __attribute__((unused)) CuvidDecoder * decoder) /// /// @param decoder CUVID hw decoder /// -static void CuvidAdvanceDecoderFrame(CuvidDecoder * decoder) -{ +static void CuvidAdvanceDecoderFrame(CuvidDecoder *decoder) { // next surface, if complete frame is displayed (1 -> 0) if (decoder->SurfaceField) { int filled; @@ -3733,22 +3681,21 @@ static void CuvidAdvanceDecoderFrame(CuvidDecoder * decoder) // next field decoder->SurfaceField = 1; } - + #if defined PLACEBO && PL_API_VER >= 58 -static const struct pl_hook * -parse_user_shader(char *shader) -{ +static const struct pl_hook *parse_user_shader(char *shader) { char tmp[200]; - if (!shader ) + + if (!shader) return NULL; const struct pl_hook *hook = NULL; char *str = NULL; - -// Debug(3,"Parse user shader %s/%s\n",MyConfigDir,shader); - sprintf(tmp,"%s/%s",MyConfigDir,shader); + // Debug(3,"Parse user shader %s/%s\n",MyConfigDir,shader); + + sprintf(tmp, "%s/%s", MyConfigDir, shader); FILE *f = fopen(tmp, "rb"); if (!f) { @@ -3757,9 +3704,11 @@ parse_user_shader(char *shader) } int ret = fseek(f, 0, SEEK_END); + if (ret == -1) goto error; long length = ftell(f); + if (length == -1) goto error; rewind(f); @@ -3773,7 +3722,7 @@ parse_user_shader(char *shader) hook = pl_mpv_user_shader_parse(p->gpu, str, length); // fall through - Debug(3,"User shader %p\n",hook); + Debug(3, "User shader %p\n", hook); error: if (f) fclose(f); @@ -3782,7 +3731,6 @@ error: } #endif - /// /// Render video surface to output surface. /// @@ -3790,10 +3738,9 @@ error: /// @param level video surface level 0 = bottom /// #ifdef PLACEBO -static void CuvidMixVideo(CuvidDecoder * decoder, int level, struct pl_frame *target, struct pl_overlay *ovl) +static void CuvidMixVideo(CuvidDecoder *decoder, int level, struct pl_frame *target, struct pl_overlay *ovl) #else -static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) - int level) +static void CuvidMixVideo(CuvidDecoder *decoder, __attribute__((unused)) int level) #endif { #ifdef PLACEBO @@ -3804,7 +3751,7 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) struct pl_tex_vk *vkp; struct pl_plane *pl; const struct pl_fmt *fmt; - struct pl_tex *tex0,*tex1; + struct pl_tex *tex0, *tex1; struct pl_frame *img; bool ok; @@ -3818,16 +3765,16 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) float xcropf, ycropf; GLint texLoc; AVFrame *frame; - AVFrameSideData *sd,*sd1=NULL,*sd2=NULL; + AVFrameSideData *sd, *sd1 = NULL, *sd2 = NULL; #ifdef PLACEBO if (level) { - dst_rect.x0 = decoder->VideoX; // video window output (clip) + dst_rect.x0 = decoder->VideoX; // video window output (clip) dst_rect.y0 = decoder->VideoY; dst_rect.x1 = decoder->VideoX + decoder->VideoWidth; dst_rect.y1 = decoder->VideoY + decoder->VideoHeight; } else { - dst_rect.x0 = 0; // complete window (clip) + dst_rect.x0 = 0; // complete window (clip) dst_rect.y0 = 0; dst_rect.x1 = VideoWindowWidth; dst_rect.y1 = VideoWindowHeight; @@ -3838,7 +3785,7 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) video_src_rect.x1 = decoder->CropX + decoder->CropWidth; video_src_rect.y1 = decoder->CropY + decoder->CropHeight; - dst_video_rect.x0 = decoder->OutputX; // video output (scale) + dst_video_rect.x0 = decoder->OutputX; // video output (scale) dst_video_rect.y0 = decoder->OutputY; dst_video_rect.x1 = decoder->OutputX + decoder->OutputWidth; dst_video_rect.y1 = decoder->OutputY + decoder->OutputHeight; @@ -3867,7 +3814,7 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) glViewport(decoder->OutputX, y, decoder->OutputWidth, decoder->OutputHeight); if (gl_prog == 0) - gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm + gl_prog = sc_generate(gl_prog, decoder->ColorSpace); // generate shader programm glUseProgram(gl_prog); texLoc = glGetUniformLocation(gl_prog, "texture0"); @@ -3900,14 +3847,13 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) memcpy(&deband, &pl_deband_default_params, sizeof(deband)); memcpy(&render_params, &pl_render_default_params, sizeof(render_params)); render_params.deband_params = &deband; - frame = decoder->frames[current]; - // Fix Color Parameters + // Fix Color Parameters switch (decoder->ColorSpace) { - case AVCOL_SPC_RGB: // BT 601 is reportet as RGB + case AVCOL_SPC_RGB: // BT 601 is reportet as RGB case AVCOL_SPC_BT470BG: memcpy(&img->repr, &pl_color_repr_sdtv, sizeof(struct pl_color_repr)); img->color.primaries = PL_COLOR_PRIM_BT_601_625; @@ -3916,7 +3862,7 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) pl->shift_x = 0.0f; break; case AVCOL_SPC_BT709: - case AVCOL_SPC_UNSPECIFIED: // comes with UHD + case AVCOL_SPC_UNSPECIFIED: // comes with UHD memcpy(&img->repr, &pl_color_repr_hdtv, sizeof(struct pl_color_repr)); memcpy(&img->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); pl->shift_x = -0.5f; @@ -3925,12 +3871,12 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) case AVCOL_SPC_BT2020_NCL: memcpy(&img->repr, &pl_color_repr_uhdtv, sizeof(struct pl_color_repr)); memcpy(&img->color, &pl_color_space_bt2020_hlg, sizeof(struct pl_color_space)); - deband.grain = 0.0f; // no grain in HDR + deband.grain = 0.0f; // no grain in HDR img->color.sig_scale = 1.0f; pl->shift_x = -0.5f; - + if ((sd = av_frame_get_side_data(frame, AV_FRAME_DATA_ICC_PROFILE))) { - img->profile = (struct pl_icc_profile) { + img->profile = (struct pl_icc_profile){ .data = sd->data, .len = sd->size, }; @@ -3940,14 +3886,16 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) } if ((sd1 = av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL))) { - const AVContentLightMetadata *clm = (AVContentLightMetadata *) sd->data; + const AVContentLightMetadata *clm = (AVContentLightMetadata *)sd->data; + img->color.sig_peak = clm->MaxCLL / PL_COLOR_SDR_WHITE; img->color.sig_avg = clm->MaxFALL / PL_COLOR_SDR_WHITE; } // This overrides the CLL values above, if both are present if ((sd2 = av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA))) { - const AVMasteringDisplayMetadata *mdm = (AVMasteringDisplayMetadata *) sd->data; + const AVMasteringDisplayMetadata *mdm = (AVMasteringDisplayMetadata *)sd->data; + if (mdm->has_luminance) img->color.sig_peak = av_q2d(mdm->max_luminance) / PL_COLOR_SDR_WHITE; } @@ -3956,8 +3904,7 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) if (img->color.sig_peak < 1.0 || img->color.sig_peak > 50.0) img->color.sig_peak = 0.0; - -#if defined VAAPI || defined USE_DRM +#if defined VAAPI || defined USE_DRM render_params.peak_detect_params = NULL; render_params.deband_params = NULL; render_params.dither_params = NULL; @@ -3965,14 +3912,14 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) #endif break; - default: // fallback + default: // fallback memcpy(&img->repr, &pl_color_repr_hdtv, sizeof(struct pl_color_repr)); memcpy(&img->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); pl->shift_x = -0.5f; break; } - - target->repr.sys = PL_COLOR_SYSTEM_RGB; + + target->repr.sys = PL_COLOR_SYSTEM_RGB; if (VideoStudioLevels) target->repr.levels = PL_COLOR_LEVELS_PC; else @@ -3982,79 +3929,81 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) // target.repr.bits.sample_depth = 16; // target.repr.bits.color_depth = 16; // target.repr.bits.bit_shift =0; - + #if USE_DRM - switch (VulkanTargetColorSpace) { - case 0: // Monitor - memcpy(&target->color, &pl_color_space_monitor, sizeof(struct pl_color_space)); - break; - case 1: // sRGB - memcpy(&target->color, &pl_color_space_srgb, sizeof(struct pl_color_space)); - break; - case 2: // HD TV - set_hdr_metadata(frame->color_primaries, frame->color_trc, sd1, sd2); - if (decoder->ColorSpace == AVCOL_SPC_BT470BG) { - target->color.primaries = PL_COLOR_PRIM_BT_601_625; - target->color.transfer = PL_COLOR_TRC_BT_1886; - target->color.light = PL_COLOR_LIGHT_DISPLAY; - } else { - memcpy(&target->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); - } - break; - case 3: // HDR TV - set_hdr_metadata(frame->color_primaries, frame->color_trc, sd1, sd2); - if (decoder->ColorSpace == AVCOL_SPC_BT2020_NCL) { - memcpy(&target->color, &pl_color_space_bt2020_hlg, sizeof(struct pl_color_space)); - } else if (decoder->ColorSpace == AVCOL_SPC_BT470BG) { - target->color.primaries = PL_COLOR_PRIM_BT_601_625; - target->color.transfer = PL_COLOR_TRC_BT_1886; - target->color.light = PL_COLOR_LIGHT_DISPLAY;; - } else { - memcpy(&target->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); - } - break; - default: - memcpy(&target->color, &pl_color_space_monitor, sizeof(struct pl_color_space)); - break; - } -#else switch (VulkanTargetColorSpace) { - case 0: // Monitor + case 0: // Monitor memcpy(&target->color, &pl_color_space_monitor, sizeof(struct pl_color_space)); break; - case 1: // sRGB + case 1: // sRGB memcpy(&target->color, &pl_color_space_srgb, sizeof(struct pl_color_space)); break; - case 2: // HD TV - case 3: // UHD HDR TV - memcpy(&target->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); + case 2: // HD TV + set_hdr_metadata(frame->color_primaries, frame->color_trc, sd1, sd2); + if (decoder->ColorSpace == AVCOL_SPC_BT470BG) { + target->color.primaries = PL_COLOR_PRIM_BT_601_625; + target->color.transfer = PL_COLOR_TRC_BT_1886; + target->color.light = PL_COLOR_LIGHT_DISPLAY; + } else { + memcpy(&target->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); + } + break; + case 3: // HDR TV + set_hdr_metadata(frame->color_primaries, frame->color_trc, sd1, sd2); + if (decoder->ColorSpace == AVCOL_SPC_BT2020_NCL) { + memcpy(&target->color, &pl_color_space_bt2020_hlg, sizeof(struct pl_color_space)); + } else if (decoder->ColorSpace == AVCOL_SPC_BT470BG) { + target->color.primaries = PL_COLOR_PRIM_BT_601_625; + target->color.transfer = PL_COLOR_TRC_BT_1886; + target->color.light = PL_COLOR_LIGHT_DISPLAY; + ; + } else { + memcpy(&target->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); + } + break; + default: + memcpy(&target->color, &pl_color_space_monitor, sizeof(struct pl_color_space)); + break; + } +#else + switch (VulkanTargetColorSpace) { + case 0: // Monitor + memcpy(&target->color, &pl_color_space_monitor, sizeof(struct pl_color_space)); + break; + case 1: // sRGB + memcpy(&target->color, &pl_color_space_srgb, sizeof(struct pl_color_space)); + break; + case 2: // HD TV + case 3: // UHD HDR TV + memcpy(&target->color, &pl_color_space_bt709, sizeof(struct pl_color_space)); break; default: memcpy(&target->color, &pl_color_space_monitor, sizeof(struct pl_color_space)); break; } #endif - -//printf("sys %d prim %d trc %d light %d\n",img->repr.sys,img->color.primaries,img->color.transfer,img->color.light); - // Source crop - if (VideoScalerTest) { // right side defined scaler - //Input crop + // printf("sys %d prim %d trc %d light + // %d\n",img->repr.sys,img->color.primaries,img->color.transfer,img->color.light); + // Source crop + if (VideoScalerTest) { // right side defined scaler + + // Input crop img->crop.x0 = video_src_rect.x1 / 2 + 1; img->crop.y0 = video_src_rect.y0; img->crop.x1 = video_src_rect.x1; img->crop.y1 = video_src_rect.y1; // Output scale -#ifdef PLACEBO_GL +#ifdef PLACEBO_GL target->crop.x0 = dst_video_rect.x1 / 2 + dst_video_rect.x0 / 2 + 1; target->crop.y1 = dst_video_rect.y0; target->crop.x1 = dst_video_rect.x1; - target->crop.y0 = dst_video_rect.y1; + target->crop.y0 = dst_video_rect.y1; #else target->crop.x0 = dst_video_rect.x1 / 2 + dst_video_rect.x0 / 2 + 1; target->crop.y0 = dst_video_rect.y0; target->crop.x1 = dst_video_rect.x1; - target->crop.y1 = dst_video_rect.y1; + target->crop.y1 = dst_video_rect.y1; #endif } else { @@ -4063,27 +4012,26 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) img->crop.y0 = video_src_rect.y0; img->crop.x1 = video_src_rect.x1; img->crop.y1 = video_src_rect.y1; - -#ifdef PLACEBO_GL + +#ifdef PLACEBO_GL target->crop.x0 = dst_video_rect.x0; target->crop.y1 = dst_video_rect.y0; target->crop.x1 = dst_video_rect.x1; - target->crop.y0 = dst_video_rect.y1; + target->crop.y0 = dst_video_rect.y1; #else target->crop.x0 = dst_video_rect.x0; target->crop.y0 = dst_video_rect.y0; target->crop.x1 = dst_video_rect.x1; - target->crop.y1 = dst_video_rect.y1; + target->crop.y1 = dst_video_rect.y1; #endif - } #if PL_API_VER < 100 if (level == 0) - pl_tex_clear(p->gpu, target->fbo, (float[4]) { 0 }); + pl_tex_clear(p->gpu, target->fbo, (float[4]){0}); #else if (!level && pl_frame_is_cropped(target)) - pl_frame_clear(p->gpu, target, (float[3]) {0} ); + pl_frame_clear(p->gpu, target, (float[3]){0}); #endif if (VideoColorBlindness) { switch (VideoColorBlindness) { @@ -4115,14 +4063,14 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) render_params.downscaler = pl_named_filters[VideoScaling[decoder->Resolution]].filter; render_params.color_adjustment = &colors; - + colors.brightness = VideoBrightness; colors.contrast = VideoContrast; colors.saturation = VideoSaturation; colors.hue = VideoHue; colors.gamma = VideoGamma; #if PL_API_VER >= 119 - colors.temperature = VideoTemperature; + colors.temperature = VideoTemperature; #endif if (ovl) { @@ -4132,33 +4080,32 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) target->overlays = 0; target->num_overlays = 0; } - + #if PL_API_VER >= 58 - if (decoder->newchannel == 1 && !level) { // got new textures + if (decoder->newchannel == 1 && !level) { // got new textures p->num_shaders = 0; - for (int i=NUM_SHADERS-1;i>=0;i--) { // Remove shaders in invers order + for (int i = NUM_SHADERS - 1; i >= 0; i--) { // Remove shaders in invers order if (p->hook[i]) { pl_mpv_user_shader_destroy(&p->hook[i]); p->hook[i] = NULL; - Debug(3,"remove shader %d\n",i); + Debug(3, "remove shader %d\n", i); } } - for (int i = 0;ihook[i] == NULL && shadersp[i]) { p->hook[i] = parse_user_shader(shadersp[i]); if (!p->hook[i]) - shadersp[i]= 0; + shadersp[i] = 0; else p->num_shaders++; } } - } - render_params.hooks = &p->hook; - if (level || ovl || (video_src_rect.x1 > dst_video_rect.x1) || (video_src_rect.y1 > dst_video_rect.y1) ) { - render_params.num_hooks = 0; // no user shaders when OSD activ or downward scaling or PIP } - else { - render_params.num_hooks = p->num_shaders; + render_params.hooks = &p->hook; + if (level || ovl || (video_src_rect.x1 > dst_video_rect.x1) || (video_src_rect.y1 > dst_video_rect.y1)) { + render_params.num_hooks = 0; // no user shaders when OSD activ or downward scaling or PIP + } else { + render_params.num_hooks = p->num_shaders; } #endif #if PL_API_VER >= 113 @@ -4166,9 +4113,9 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) if (LUTon) render_params.lut = p->lut; else - render_params.lut = NULL; + render_params.lut = NULL; #endif - + if (decoder->newchannel && current == 0) { colors.brightness = -1.0f; colors.contrast = 0.0f; @@ -4180,25 +4127,25 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) } decoder->newchannel = 0; -// uint64_t tt = GetusTicks(); + // uint64_t tt = GetusTicks(); if (!pl_render_image(p->renderer, &decoder->pl_frames[current], target, &render_params)) { Debug(4, "Failed rendering frame!\n"); } -// pl_gpu_finish(p->gpu); -//printf("Rendertime %ld -- \n,",GetusTicks() - tt); + // pl_gpu_finish(p->gpu); + // printf("Rendertime %ld -- \n,",GetusTicks() - tt); - if (VideoScalerTest) { // left side test scaler + if (VideoScalerTest) { // left side test scaler // Source crop img->crop.x0 = video_src_rect.x0; img->crop.y0 = video_src_rect.y0; img->crop.x1 = video_src_rect.x1 / 2; img->crop.y1 = video_src_rect.y1; -#ifdef PLACEBO_GL +#ifdef PLACEBO_GL target->crop.x0 = dst_video_rect.x0; target->crop.y1 = dst_video_rect.y0; target->crop.x1 = dst_video_rect.x1 / 2 + dst_video_rect.x0 / 2; - target->crop.y0 = dst_video_rect.y1; + target->crop.y0 = dst_video_rect.y1; #else // Video aspect ratio target->crop.x0 = dst_video_rect.x0; @@ -4209,10 +4156,10 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) render_params.upscaler = pl_named_filters[VideoScalerTest - 1].filter; render_params.downscaler = pl_named_filters[VideoScalerTest - 1].filter; - -// render_params.lut = NULL; + + // render_params.lut = NULL; render_params.num_hooks = 0; - + if (!p->renderertest) p->renderertest = pl_renderer_create(p->ctx, p->gpu); @@ -4228,40 +4175,34 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused)) } #ifdef PLACEBO -void make_osd_overlay(int x, int y, int width, int height) -{ +void make_osd_overlay(int x, int y, int width, int height) { const struct pl_fmt *fmt; struct pl_overlay *pl; int offset = VideoWindowHeight - (VideoWindowHeight - height - y) - (VideoWindowHeight - y); - fmt = pl_find_named_fmt(p->gpu, "rgba8"); // 8 Bit RGB + fmt = pl_find_named_fmt(p->gpu, "rgba8"); // 8 Bit RGB pl = &osdoverlay; if (pl->plane.texture && (pl->plane.texture->params.w != width || pl->plane.texture->params.h != height)) { -// pl_tex_clear(p->gpu, pl->plane.texture, (float[4]) { 0 }); + // pl_tex_clear(p->gpu, pl->plane.texture, (float[4]) { 0 }); pl_tex_destroy(p->gpu, &pl->plane.texture); } // make texture for OSD if (pl->plane.texture == NULL) { - pl->plane.texture = pl_tex_create(p->gpu, &(struct pl_tex_params) { - .w = width, - .h = height, - .d = 0, - .format = fmt, - .sampleable = true, - .host_writable = true, + pl->plane.texture = pl_tex_create( + p->gpu, &(struct pl_tex_params) { + .w = width, .h = height, .d = 0, .format = fmt, .sampleable = true, .host_writable = true, .blit_dst = true, #if PL_API_VER < 159 - .sample_mode = PL_TEX_SAMPLE_LINEAR, - .address_mode = PL_TEX_ADDRESS_CLAMP, + .sample_mode = PL_TEX_SAMPLE_LINEAR, .address_mode = PL_TEX_ADDRESS_CLAMP, #endif }); } // make overlay - pl_tex_clear(p->gpu, pl->plane.texture, (float[4]) { 0 }); + pl_tex_clear(p->gpu, pl->plane.texture, (float[4]){0}); pl->plane.components = 4; pl->plane.shift_x = 0.0f; pl->plane.shift_y = 0.0f; @@ -4275,15 +4216,15 @@ void make_osd_overlay(int x, int y, int width, int height) pl->repr.alpha = PL_ALPHA_INDEPENDENT; memcpy(&osdoverlay.color, &pl_color_space_srgb, sizeof(struct pl_color_space)); -#ifdef PLACEBO_GL +#ifdef PLACEBO_GL pl->rect.x0 = x; - pl->rect.y1 = VideoWindowHeight - y ; // Boden von oben + pl->rect.y1 = VideoWindowHeight - y; // Boden von oben pl->rect.x1 = x + width; - pl->rect.y0 = VideoWindowHeight - height - y; + pl->rect.y0 = VideoWindowHeight - height - y; #else pl->rect.x0 = x; - pl->rect.y0 = VideoWindowHeight - y + offset; // Boden von oben + pl->rect.y0 = VideoWindowHeight - y + offset; // Boden von oben pl->rect.x1 = x + width; pl->rect.y1 = VideoWindowHeight - height - y + offset; #endif @@ -4293,13 +4234,12 @@ void make_osd_overlay(int x, int y, int width, int height) /// Display a video frame. /// -static void CuvidDisplayFrame(void) -{ +static void CuvidDisplayFrame(void) { static uint64_t first_time = 0, round_time = 0; static uint64_t last_time = 0; int i; - + int filled; CuvidDecoder *decoder; int RTS_flag; @@ -4314,9 +4254,8 @@ static void CuvidDisplayFrame(void) struct pl_frame target; bool ok; - const struct pl_fmt *fmt; - const float black[4] = { 0.0f, 0.0f, 0.0f, 1.0f }; + const float black[4] = {0.0f, 0.0f, 0.0f, 1.0f}; #endif #ifndef PLACEBO @@ -4326,7 +4265,8 @@ static void CuvidDisplayFrame(void) #ifdef CUVID glXMakeCurrent(XlibDisplay, VideoWindow, glxThreadContext); - glXWaitVideoSyncSGI(2, (Count + 1) % 2, &Count); // wait for previous frame to swap + glXWaitVideoSyncSGI(2, (Count + 1) % 2, + &Count); // wait for previous frame to swap last_time = GetusTicks(); #else eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); @@ -4339,7 +4279,8 @@ static void CuvidDisplayFrame(void) #ifdef PLACEBO_GL #ifdef CUVID glXMakeCurrent(XlibDisplay, VideoWindow, glxThreadContext); - glXWaitVideoSyncSGI(2, (Count + 1) % 2, &Count); // wait for previous frame to swap + glXWaitVideoSyncSGI(2, (Count + 1) % 2, + &Count); // wait for previous frame to swap last_time = GetusTicks(); #else eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); @@ -4347,7 +4288,7 @@ static void CuvidDisplayFrame(void) #endif glClear(GL_COLOR_BUFFER_BIT); #endif - + if (CuvidDecoderN) { ldiff = (float)(GetusTicks() - round_time) / 1000000.0; if (ldiff < 100.0 && ldiff > 0.0) @@ -4365,7 +4306,7 @@ static void CuvidDisplayFrame(void) last_time = GetusTicks(); - while (!pl_swapchain_start_frame(p->swapchain, &frame)) { // get new frame wait for previous to swap + while (!pl_swapchain_start_frame(p->swapchain, &frame)) { // get new frame wait for previous to swap usleep(5); } @@ -4378,9 +4319,9 @@ static void CuvidDisplayFrame(void) #ifdef VAAPI VideoThreadLock(); #endif - - pl_frame_from_swapchain(&target, &frame); // make target frame - + + pl_frame_from_swapchain(&target, &frame); // make target frame + if (VideoSurfaceModesChanged) { pl_renderer_destroy(&p->renderer); p->renderer = pl_renderer_create(p->ctx, p->gpu); @@ -4391,15 +4332,12 @@ static void CuvidDisplayFrame(void) VideoSurfaceModesChanged = 0; } - - - #ifdef GAMMA // target.color.transfer = PL_COLOR_TRC_LINEAR; #endif #endif // - // Render videos into output + // Render videos into output // /// @@ -4410,11 +4348,12 @@ static void CuvidDisplayFrame(void) decoder->StartCounter++; filled = atomic_read(&decoder->SurfacesFilled); -//printf("Filled %d\n",filled); - // need 1 frame for progressive, 3 frames for interlaced + // printf("Filled %d\n",filled); + // need 1 frame for progressive, 3 frames for interlaced if (filled < 1 + 2 * decoder->Interlaced) { // FIXME: rewrite MixVideo to support less surfaces - if ((VideoShowBlackPicture && !decoder->TrickSpeed) || (VideoShowBlackPicture && decoder->Closing < -300)) { + if ((VideoShowBlackPicture && !decoder->TrickSpeed) || + (VideoShowBlackPicture && decoder->Closing < -300)) { CuvidBlackSurface(decoder); CuvidMessage(4, "video/cuvid: black surface displayed\n"); } @@ -4422,20 +4361,20 @@ static void CuvidDisplayFrame(void) } valid_frame = 1; #ifdef PLACEBO - if (OsdShown == 1) { // New OSD opened + if (OsdShown == 1) { // New OSD opened pthread_mutex_lock(&OSDMutex); make_osd_overlay(OSDx, OSDy, OSDxsize, OSDysize); if (posd) { - pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params) { // upload OSD - .tex = osdoverlay.plane.texture, - .ptr = posd, - }); + pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params){ + // upload OSD + .tex = osdoverlay.plane.texture, + .ptr = posd, + }); } OsdShown = 2; pthread_mutex_unlock(&OSDMutex); - } - + if (OsdShown == 2) { CuvidMixVideo(decoder, i, &target, &osdoverlay); } else { @@ -4445,7 +4384,7 @@ static void CuvidDisplayFrame(void) #else CuvidMixVideo(decoder, i); #endif - if (i == 0 && decoder->grab) { // Grab frame + if (i == 0 && decoder->grab) { // Grab frame #ifdef PLACEBO if (decoder->grab == 2 && OsdShown == 2) { get_RGB(decoder, &osdoverlay); @@ -4460,7 +4399,7 @@ static void CuvidDisplayFrame(void) } #ifndef PLACEBO - // add osd to surface + // add osd to surface if (OsdShown && valid_frame) { GLint texLoc; @@ -4509,28 +4448,27 @@ static void CuvidDisplayFrame(void) glActiveTexture(GL_TEXTURE0); -// pthread_mutex_lock(&OSDMutex); + // pthread_mutex_lock(&OSDMutex); glBindTexture(GL_TEXTURE_2D, OSDtexture); render_pass_quad(1, 0, 0); -// pthread_mutex_unlock(&OSDMutex); + // pthread_mutex_unlock(&OSDMutex); glUseProgram(0); glActiveTexture(GL_TEXTURE0); -// eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); + // eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglThreadContext); } #endif - #if defined PLACEBO // && !defined PLACEBO_GL // first_time = GetusTicks(); if (!pl_swapchain_submit_frame(p->swapchain)) Fatal(_("Failed to submit swapchain buffer\n")); - pl_swapchain_swap_buffers(p->swapchain); // swap buffers + pl_swapchain_swap_buffers(p->swapchain); // swap buffers NoContext; VideoThreadUnlock(); #else // not PLACEBO #ifdef CUVID - glXGetVideoSyncSGI(&Count); // get current frame + glXGetVideoSyncSGI(&Count); // get current frame glXSwapBuffers(XlibDisplay, VideoWindow); glXMakeCurrent(XlibDisplay, None, NULL); #else @@ -4551,27 +4489,25 @@ static void CuvidDisplayFrame(void) } } -#ifdef PLACEBO_GL -CuvidSwapBuffer() { +#ifdef PLACEBO_GL +CuvidSwapBuffer() { #ifndef USE_DRM eglSwapBuffers(eglDisplay, eglSurface); -// eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); +// eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, +// EGL_NO_CONTEXT); #else drm_swap_buffers(); -#endif +#endif } #endif - + /// /// Set CUVID decoder video clock. /// /// @param decoder CUVID hardware decoder -/// @param pts audio presentation timestamp +/// @param pts audio presentation timestamp /// -void CuvidSetClock(CuvidDecoder * decoder, int64_t pts) -{ - decoder->PTS = pts; -} +void CuvidSetClock(CuvidDecoder *decoder, int64_t pts) { decoder->PTS = pts; } /// /// Get CUVID decoder video clock. @@ -4580,10 +4516,9 @@ void CuvidSetClock(CuvidDecoder * decoder, int64_t pts) /// /// FIXME: 20 wrong for 60hz dvb streams /// -static int64_t CuvidGetClock(const CuvidDecoder * decoder) -{ +static int64_t CuvidGetClock(const CuvidDecoder *decoder) { // pts is the timestamp of the latest decoded frame - if (decoder->PTS == (int64_t) AV_NOPTS_VALUE) { + if (decoder->PTS == (int64_t)AV_NOPTS_VALUE) { return AV_NOPTS_VALUE; } // subtract buffered decoded frames @@ -4606,8 +4541,7 @@ static int64_t CuvidGetClock(const CuvidDecoder * decoder) /// /// @param decoder CUVID decoder /// -static void CuvidSetClosing(CuvidDecoder * decoder) -{ +static void CuvidSetClosing(CuvidDecoder *decoder) { decoder->Closing = 1; OsdShown = 0; } @@ -4617,10 +4551,7 @@ static void CuvidSetClosing(CuvidDecoder * decoder) /// /// @param decoder CUVID decoder /// -static void CuvidResetStart(CuvidDecoder * decoder) -{ - decoder->StartCounter = 0; -} +static void CuvidResetStart(CuvidDecoder *decoder) { decoder->StartCounter = 0; } /// /// Set trick play speed. @@ -4628,8 +4559,7 @@ static void CuvidResetStart(CuvidDecoder * decoder) /// @param decoder CUVID decoder /// @param speed trick speed (0 = normal) /// -static void CuvidSetTrickSpeed(CuvidDecoder * decoder, int speed) -{ +static void CuvidSetTrickSpeed(CuvidDecoder *decoder, int speed) { decoder->TrickSpeed = speed; decoder->TrickCounter = speed; if (speed) { @@ -4641,14 +4571,13 @@ static void CuvidSetTrickSpeed(CuvidDecoder * decoder, int speed) /// Get CUVID decoder statistics. /// /// @param decoder CUVID decoder -/// @param[out] missed missed frames -/// @param[out] duped duped frames +/// @param[out] missed missed frames +/// @param[out] duped duped frames /// @param[out] dropped dropped frames -/// @param[out] count number of decoded frames +/// @param[out] count number of decoded frames /// -void CuvidGetStats(CuvidDecoder * decoder, int *missed, int *duped, int *dropped, int *counter, float *frametime, - int *width, int *height, int *color, int *eotf) -{ +void CuvidGetStats(CuvidDecoder *decoder, int *missed, int *duped, int *dropped, int *counter, float *frametime, + int *width, int *height, int *color, int *eotf) { *missed = decoder->FramesMissed; *duped = decoder->FramesDuped; *dropped = decoder->FramesDropped; @@ -4665,16 +4594,15 @@ void CuvidGetStats(CuvidDecoder * decoder, int *missed, int *duped, int *dropped /// /// trick-speed show frame times /// still-picture show frame until new frame arrives -/// 60hz-mode repeat every 5th picture +/// 60hz-mode repeat every 5th picture /// video>audio slow down video by duplicating frames /// video