158 Commits

Author SHA1 Message Date
jojo61
d553a8108d Fix AC3 dowmix 2021-01-26 09:17:39 +01:00
jojo61
cb4515f6b7 Minor updates 2021-01-14 10:40:29 +01:00
jojo61
277d7fbd86 More shader samples 2021-01-11 17:22:47 +01:00
jojo61
9347f2a502 Fix CUVID without placebo 2021-01-11 16:13:04 +01:00
jojo61
6dfd2d96aa Remove local cuda dependencie 2021-01-11 08:28:44 +01:00
jojo61
a7471e8800 Support for libplacebo API Version 106
Support for LIBPLACEBO with opengl -> needs API >= 106
2021-01-10 13:55:09 +01:00
jojo61
184cc1aa05 Fix playback for old PES recordings with vaapi 2020-08-18 11:35:11 +02:00
jojo61
072e1d6847 Fix vaapi for libplacebo > 87 2020-08-14 12:38:27 +02:00
jojo61
6c13195fda Remove hbbtv changes - not needed 2020-06-23 11:22:24 +02:00
jojo61
6a31404aa0 Optimize for hbbtv plugin 2020-06-22 16:35:37 +02:00
jojo61
a424a57036 Mangle with C and C++ 2020-06-16 08:49:03 +02:00
jojo61
05c2585238 Fix shaders for vaapi Version 2020-06-16 07:46:27 +02:00
jojo61
a28e368c1b Fix jumps between recording marks 2020-06-09 09:23:38 +02:00
jojo61
e4115f348b Update README 2020-06-07 12:56:51 +02:00
jojo61
03b770ce47 Support for mpv user shaders with libplacebo 2020-06-07 12:31:18 +02:00
jojo61
c7c4cb06a6 Fix aspect change within SD channels
Should fix #48
2020-06-01 17:15:07 +02:00
jojo61
a41f6b22fd Update for latest libplacebo 2020-05-15 16:37:06 +02:00
jojo61
628bad5006 Fix Build without PIP 2020-05-07 13:21:26 +02:00
jojo61
cb466dd894 No root for softhddrm needed anymore 2020-05-07 10:53:51 +02:00
jojo61
3578e3212d Merge pull request #54 from REELcoder/master
Fix PiP window size handling
2020-05-04 08:37:09 +02:00
jojo61
78337f5933 Update Minor Version 2020-05-01 12:08:12 +02:00
jojo61
2fea2ee69f Fix default Gamma in Initial Setup
Prepare für opengl placebo (not working yet)
2020-05-01 12:06:04 +02:00
jojo61
c1c345dd4d Merge pull request #53 from dnehring7/master
Remove unsupported auto-crop feature from source
2020-05-01 11:20:04 +02:00
Dirk Nehring
a3eedbff0c Remove unsupported auto-crop feature from source 2020-04-16 21:15:19 +02:00
jojo61
309ad1c90e Merge pull request #45 from dnehring7/master
Reindent all sources to common coding. Reworked aspect function.
2020-04-14 08:18:20 +00:00
Dirk Nehring
23651104f2 Reworked the aspect function. Now we calculate the display aspect from the pixel width and
pixel height (and not from the physical size, which was the root cause for rounding errors).
Cropping calculation is reworking, now the rounding is correct. I introduce a new aspect mode
"original" which displays the output with the original size (but after correction the
pixel aspect ratio). Tested with vaapi(X11) and vaapi(DRM) with and without libplacebo.
2020-04-13 18:04:57 +02:00
Dirk Nehring
36c208967e - Reindent all sources to common coding style again.
- Fix compile bug occurred with gcc10 (-fno-common is now default)
2020-04-10 16:17:23 +02:00
REELcoder
d1a1329beb Fix PiP window size handling 2020-04-06 20:45:01 +02:00
jojo61
5875e10479 Honor Softstartflag 2020-04-01 12:45:33 +02:00
jojo61
a6e65d953e Fix issue #42 and changes for faster a/v sync 2020-03-31 13:57:43 +02:00
jojo61
fecb81486d Revert Fix for tvguide 2020-03-29 18:36:54 +02:00
jojo61
f756334187 Fix epggrid scolling with tvguide 2020-03-26 12:36:00 +01:00
jojo61
14ba527a45 Fix issue #40 2020-03-16 16:31:47 +01:00
jojo61
ec09dbfb25 Fix issue #41 2020-03-13 08:19:58 +01:00
jojo61
34b1fccb28 Fis issues #36 #37 #38 2020-03-12 08:05:39 +01:00
jojo61
5586618c6e More fixes 2020-03-09 22:59:27 +01:00
jojo61
4dbf2dcc84 Fixes for Maintainer 2020-03-09 16:15:01 +01:00
jojo61
74a2285af0 Fixed typo 2020-03-06 13:58:56 +01:00
jojo61
b2bff4ebd0 Minor fix for vaapi compile 2020-03-06 13:03:21 +01:00
jojo61
84501d314e Switch to ffnvcodec headers 2020-03-06 12:42:14 +01:00
jojo61
04e1b8732d correct conversion BT709 colors to sRGB 2020-03-06 11:51:20 +01:00
jojo61
3e649a5cea Rework shaders - Better HLG Colors 2020-03-06 09:11:08 +01:00
jojo61
feb7479ff8 Update Readme 2020-03-06 09:02:27 +01:00
jojo61
3590eadbaa Update Readme 2020-03-06 08:59:24 +01:00
jojo61
6c0f80979f Try Fix for tvguide 2020-03-04 16:34:40 +01:00
jojo61
189d8cfa53 Fix issue #34 2020-03-04 15:55:57 +01:00
jojo61
73b355c52d Fixed issue #29 2020-03-02 16:20:34 +01:00
jojo61
3d23288bdc Fixed issue #32 2020-03-02 16:11:33 +01:00
jojo61
b1a642e64a Fixed issue #31 2020-03-02 16:06:27 +01:00
jojo61
1b7bfd2087 More openglosd fixes 2020-02-27 16:24:07 +01:00
jojo61
03e69b5e26 Use FT_ULong instead of uint 2020-02-26 16:52:48 +01:00
jojo61
a5b81f8de1 Set cache to zero for broken vaapi 2020-02-20 16:00:16 +01:00
jojo61
2485929c2b more OSD stuff 2020-02-19 15:54:09 +01:00
jojo61
b9fed82109 OSD fixes 2020-02-18 22:02:53 +01:00
jojo61
49db402de6 Fix in makefile 2020-02-02 13:28:36 +01:00
jojo61
7ce842b989 More info in Menue 2020-02-02 13:26:51 +01:00
jojo61
9d0417045e Fixed vaapi SD streams 2020-02-02 11:15:17 +01:00
jojo61
f7449c8d3a Prepare Makefile for batch processing 2020-02-01 11:47:11 +01:00
jojo61
5ef6597340 Fix cuvid compile 2020-01-31 14:15:56 +01:00
jojo61
c06b891c2b Final fix for memory leak 2020-01-31 12:28:18 +01:00
jojo61
26945ef9d2 One more try to fix memleak 2020-01-27 18:00:45 +01:00
jojo61
e0239a549e One more fix for memleak 2020-01-21 16:59:03 +01:00
jojo61
431e37e93f try to fix memleak 2020-01-19 16:58:28 +01:00
jojo61
4b5b27382a Fixed skindesigner shady in drm Version 2020-01-18 16:20:08 +01:00
jojo61
2ec7a250a3 Fix drm Filepointer 2020-01-18 12:58:02 +01:00
jojo61
1674600882 Fixes for drm Aspectratio and UHD HDR 2020-01-15 18:01:27 +01:00
jojo61
269c396a2c more drm DETA/ATTA fixes 2020-01-13 18:17:07 +01:00
jojo61
ce3813a9e9 Minor cleanups 2020-01-08 17:04:18 +01:00
jojo61
53314a17f5 Remove deprecated call 2020-01-08 17:00:53 +01:00
jojo61
e4e6a81f54 Add patch for skinnopacity 2020-01-08 16:44:37 +01:00
jojo61
70b67f4466 Fix DETA/ATTA with DRM (hopefully) 2020-01-07 22:22:07 +01:00
jojo61
2f0b1d0df9 Reverse DETA fix 2020-01-03 13:58:24 +01:00
jojo61
dfe70f4f96 More OpenGL and DRM fixes 2020-01-03 12:22:47 +01:00
jojo61
fe3681f6eb Fix UHD crash with DRM 2019-12-28 11:16:01 +01:00
jojo61
50299f178f Provide environment Setting for OpenGL
- no .drirc needad anymore
2019-12-28 10:57:47 +01:00
jojo61
6ea4f5076b Reverse commit need more testing 2019-12-28 07:55:51 +01:00
jojo61
3cb66dd3de make .drirc obsolete 2019-12-27 14:26:12 +01:00
jojo61
68fa4fe4dc Some refactoring of shaders 2019-12-23 14:53:30 +01:00
jojo61
463109fcb6 Prepare for v4l2m2m for Raspi 4 2019-12-23 11:14:38 +01:00
jojo61
d1bc51edb8 Fixed missing lib 2019-12-20 12:12:53 +01:00
jojo61
67832ac333 Enabled -C Option for DRM 2019-12-17 12:15:35 +01:00
jojo61
1274e673ec More cuvid fixes 2019-12-17 11:12:00 +01:00
jojo61
4334894515 Fix cuvid without placebo 2019-12-17 10:22:22 +01:00
jojo61
d2dedb40dd New Parameter -r for Refreshrate with DRM
Fixed aspectratio with DRM
2019-12-12 11:31:40 +01:00
jojo61
3bed988b14 Inital Support for DRM with HDR10 and HDR-HLG 2019-12-10 10:45:22 +01:00
jojo61
f17e58c7c5 Provide Patches for HDR with Intel NUC and LSPCON
The patches are for the drm-intel Branch of Linux
see https://github.com/freedesktop/drm-intel
2019-12-08 14:43:43 +01:00
jojo61
5cd68b6eed Merge pull request #21 from dnehring7/master
Fix indentation.
2019-11-19 13:15:27 +00:00
Dirk Nehring
1a56d620ac Fix indentation. 2019-11-18 13:01:19 +01:00
jojo61
695a6495dd Small Fix for Fonts 2019-11-17 11:07:15 +01:00
jojo61
780c594ba2 More fixes for OSD 2019-11-13 17:09:49 +01:00
jojo61
8838d4c754 Fixed OSD Size for skindesigner with UHD
Fixed corruption with fast Menue Switch
2019-11-11 17:45:59 +01:00
jojo61
b883fa606b Add repeat handling for X11 keyboard remote control 2019-11-06 16:44:53 +01:00
jojo61
6acd2feb3f Changed OSD Interface without Placebo 2019-11-05 22:06:54 +01:00
jojo61
499731fc1f Fixed Vaapi OSD with PLACEBO 2019-11-02 15:47:45 +01:00
jojo61
a4bee138f2 Improved Placebo Frame turnaroundtime 2019-11-02 13:28:42 +01:00
jojo61
146b826b4d OSD optimizations 2019-11-01 08:22:26 +01:00
jojo61
3dfaeaf7e2 Merge pull request #16 from dnehring7/master
Fix remaining indentation problems.
2019-10-28 22:28:48 +01:00
Dirk Nehring
132caa81de Remove old VDR API compatibility defines 2019-10-28 21:52:29 +01:00
Dirk Nehring
9ebdd3baae Fix remaining indentation problems. 2019-10-28 21:43:37 +01:00
jojo61
43fa0b8929 Merge pull request #15 from jojo61/revert-10-Add-glFinish()-before-unmapping-the-surface
Revert "Add glFinish() before unmapping the surface"
2019-10-28 20:02:17 +01:00
jojo61
2cd8415f2b Revert "Add glFinish() before unmapping the surface" 2019-10-28 20:01:37 +01:00
jojo61
2d3a7339e8 Merge pull request #14 from 9000h/fix-missing-break
fix missing break
2019-10-28 19:59:40 +01:00
jojo61
c4a7aefc20 Merge pull request #13 from 9000h/use-the-log-defines
use the log defines
2019-10-28 19:59:11 +01:00
jojo61
bdfe1a62c9 Merge pull request #12 from 9000h/Switch-to-posix-compaatible-sched_yield
Switch to posix compatible sched_yield
2019-10-28 19:58:27 +01:00
9000h
1bf5a841e9 Merge branch 'master' into Switch-to-posix-compaatible-sched_yield 2019-10-28 15:51:49 +01:00
9000h
7269e28f9a Update softhdcuvid.cpp 2019-10-28 15:49:03 +01:00
9000h
b83cb6167c Merge branch 'master' into fix-missing-break 2019-10-28 15:47:35 +01:00
9000h
7b0984381d fix missing break 2019-10-28 13:02:12 +01:00
9000h
ed5fa65837 use the log defines
use the log defines
2019-10-28 12:57:05 +01:00
9000h
70ef3d6602 Switch to posix compaatible sched_yield
Use sched_yield() instead of pthread_yield()
2019-10-28 12:34:46 +01:00
jojo61
557c17982f Merge pull request #10 from 9000h/Add-glFinish()-before-unmapping-the-surface
Add glFinish() before unmapping the surface
2019-10-27 10:52:02 +01:00
jojo61
01a564cf3d Merge pull request #9 from dnehring7/master
Reindent all sources to common coding style (second try). This makes it easier for future patches.
2019-10-27 10:48:39 +01:00
9000h
6044db40f3 Add glFinish() before unmapping the surface
cherry picked from rellla
22590848da
2019-10-26 23:03:35 +02:00
Dirk Nehring
ed53dd21ca Reindent all sources to common coding style. 2019-10-26 18:42:19 +02:00
jojo61
5653e31466 Merge pull request #7 from 9000h/second-tryfix-osd
second tryfix osd
2019-10-26 15:55:08 +02:00
9000h
59354108fc second tryfix osd
fix core dump at start when using vdr-plugin-skinnopacity
the code is maybe not needed for VAAPI anyway
2019-10-25 23:25:20 +02:00
jojo61
d25f456e67 Fixed StillPicture
Fixed YADIF Compile error
2019-10-24 16:00:09 +02:00
jojo61
9e8cd60f28 Improve Edit Speed 2019-10-23 16:25:10 +02:00
jojo61
6fbf240f6e Merge pull request #6 from 9000h/try-fix-osd
try fix osd
2019-10-23 12:05:27 +00:00
jojo61
51e2aaaeb0 Merge pull request #5 from 9000h/fix-missing-AudioSetBufferTime
fix missing AudioSetBufferTime
2019-10-23 12:04:18 +00:00
9000h
d03bd4661b try fix osd 2019-10-22 19:59:56 +02:00
9000h
11ea3baba3 fix missing AudioSetBufferTime 2019-10-22 19:46:24 +02:00
jojo61
ce294f55e8 Merge pull request #4 from dnehring7/master
Code cleanup for newer ffmpeg versions
2019-10-22 16:36:01 +02:00
Dirk Nehring
35cc401e13 Drop ffmpeg compatibility checks
Set minimum ffmpeg API to 3.4
Set minimum VDR API to 2.4.0
2019-10-22 15:21:26 +02:00
jojo61
f80d757704 Fixed ATTA and DETA
Cleanup some code
Testet GRAB funktions
2019-10-20 14:48:28 +02:00
jojo61
40115f4fc6 disable PIP for vaapi 2019-10-12 14:27:47 +02:00
jojo61
9003beee34 Fix PIP with PLACEBO 2019-10-12 11:23:32 +02:00
jojo61
e09d5a8001 Fix OSD for skindesigner 2019-10-12 11:14:51 +02:00
jojo61
6d91af9d74 More a/v Sync and cleanup 2019-10-12 09:04:04 +02:00
jojo61
cd4611d5cd Improve skindesigner handling 2019-10-11 13:35:52 +02:00
jojo61
06918ebbd2 removed gittid 2019-10-11 12:05:26 +02:00
jojo61
a2b52bb804 Fix jump in recordings
Fix in CUVID Bufferhandling
2019-10-11 11:47:11 +02:00
jojo61
6eb0a7f35a Fix Radio 2019-10-06 10:41:09 +02:00
jojo61
5fa43b5d71 Fix Makefile and DEBUG Error 2019-10-04 14:21:55 +02:00
jojo61
4d2735a971 Vaapi changed to egl 2019-10-04 10:37:57 +02:00
jojo61
cdac2bcc3d fix framedrop in vaapi 2019-08-26 15:00:40 +02:00
jojo61
18759e8dab fix device name 2019-08-23 14:26:43 +02:00
jojo61
9d84e34318 more fixes 2019-08-23 07:35:29 +02:00
jojo61
74c5a1f3ea Force openglosd with VAAPI 2019-08-23 07:25:27 +02:00
jojo61
6d7ef37384 fixed compile error 2019-08-23 07:11:05 +02:00
jojo61
6849d4d54f enable redeonsi 2019-08-22 15:53:45 +02:00
jojo61
5c2b801a45 First support for VAAPI 2019-08-22 12:34:29 +02:00
jojo61
fae0d3a9bd improve channelswitch 2019-04-26 09:33:59 +02:00
jojo61
053fd53bc4 improved still picture and channel switch 2019-04-13 11:45:17 +02:00
jojo61
de7311d233 make Fatal when YADIF fails 2019-04-07 15:55:40 +02:00
jojo61
71fb080fe4 added libavfilter 2019-04-05 14:33:48 +02:00
jojo61
7dbfba85f4 explain YADIF config 2019-04-05 07:26:18 +02:00
jojo61
7e5c6f349d YADIF Deinterlacer 2019-04-05 07:20:52 +02:00
jojo61
8682ab01c4 search in lib64 2019-01-04 13:12:34 +01:00
jojo61
24ccfefff3 optimized CodecAudioDecode 2019-01-04 12:27:13 +01:00
jojo61
33a3316344 removed deprecated ffmpeg functions 2019-01-04 11:27:54 +01:00
jojo61
8b16c0b490 fix DrawPixel 2019-01-04 10:15:51 +01:00
jojo61
6c5b65ed82 fix for compilerwarnings 2019-01-04 10:08:47 +01:00
jojo61
003d06b946 fix for checkCudaErrors 2019-01-04 09:21:47 +01:00
jojo61
24f679e1d3 fixed StillPicture for h265 2018-12-15 11:56:56 +01:00
jojo61
6787d9601b fixed resized GRAB 2018-12-12 11:10:20 +01:00
jojo61
3d183b0fdc fix PIP 2018-12-11 19:31:14 +01:00
jojo61
56ee6f0cec fixed GRAB 2018-12-11 17:29:01 +01:00
37 changed files with 15877 additions and 12958 deletions

37
.indent.pro vendored Normal file
View File

@@ -0,0 +1,37 @@
--blank-lines-before-block-comments
--blank-lines-after-declarations
--blank-lines-after-procedures
--no-blank-lines-after-commas
--braces-on-if-line
--no-blank-before-sizeof
--comment-indentation41
--declaration-comment-column41
--no-comment-delimiters-on-blank-lines
--swallow-optional-blank-lines
--dont-format-comments
--parameter-indentation4
--indent-level4
--line-comments-indentation0
--cuddle-else
--cuddle-do-while
--brace-indent0
--case-brace-indentation0
//--start-left-side-of-comments
--leave-preprocessor-space
//--continuation-indentation8
--case-indentation4
--else-endif-column0
--no-space-after-casts
--declaration-indentation1
--dont-line-up-parentheses
--no-space-after-function-call-names
--space-special-semicolon
--tab-size4
--no-tabs
--line-length119
--comment-line-length119
--honour-newlines
--dont-break-procedure-type
--break-before-boolean-operator
--continuation-indentation4
--ignore-newlines

220
Makefile
View File

@@ -7,40 +7,107 @@
# This name will be used in the '-P...' option of VDR to load the plugin.
# By default the main source file also carries this name.
PLUGIN = softhdcuvid
### Configuration (edit this for your needs)
# comment out if not needed
# what kind of decoder do we make -
# if VAAPI is enabled the pluginname is softhdvaapi
# if CUVID is enabled the pluginname is softhdcuvid
# if DRM is enabled the pluginname is softhddrm
VAAPI ?= 0
CUVID ?= 0
# if you enable DRM then the plugin will only run without X server
# only valid for VAAPI
DRM ?= 0
# use libplacebo -
# available for all decoders but for DRM you need LIBPLACEBO_GL
LIBPLACEBO ?= 1
LIBPLACEBO_GL ?= 0
# use YADIF deint - only available with cuvid
#YADIF=1
# use gamma correction
#GAMMA ?= 0
CONFIG := #-DDEBUG # remove # to enable debug output
#--------------------- no more config needed past this point--------------------------------
# sanitize selections --------
ifneq "$(MAKECMDGOALS)" "clean"
ifneq "$(MAKECMDGOALS)" "indent"
ifeq ($(VAAPI),0)
ifeq ($(CUVID),0)
ifeq ($(DRM),0)
$(error Please define a plugin in the Makefile)
exit 1;
endif
endif
endif
ifeq ($(CUVID),1)
ifeq ($(DRM),1)
$(error Missmatch in Plugin selection)
exit 1;
endif
endif
ifeq ($(CUVID),1)
ifeq ($(VAAPI),1)
$(error Missmatch in Plugin selection)
exit 1;
endif
endif
endif # MAKECMDGOALS!=indent
endif # MAKECMDGOALS!=clean
#--------------------------
PLUGIN = softhdcuvid
# support OPENGLOSD always needed
OPENGLOSD=1
# support alsa audio output module
ALSA ?= $(shell pkg-config --exists alsa && echo 1)
# support OSS audio output module
OSS ?= 1
# support OPENGLOSD
OPENGLOSD=1
# use Libplacebo
LIBPLACEBO=0
# use DMPS
SCREENSAVER=1
OPENGL=1
# use ffmpeg libswresample
#SWRESAMPLE ?= $(shell pkg-config --exists libswresample && echo 1)
SWRESAMPLE = 1
# use libav libavresample
ifneq ($(SWRESAMPLE),1)
AVRESAMPLE ?= $(shell pkg-config --exists libavresample && echo 1)
AVRESAMPLE = 0
endif
CONFIG := #-DDEBUG #-DOSD_DEBUG # enable debug output+functions
CONFIG += -DCUVID # enable CUVID decoder
# use ffmpeg libswresample
SWRESAMPLE ?= $(shell pkg-config --exists libswresample && echo 1)
SWRESAMPLE = 1
# use libav libavresample
#ifneq ($(SWRESAMPLE),1)
#AVRESAMPLE ?= $(shell pkg-config --exists libavresample && echo 1#)
#AVRESAMPLE = 1
#endif
CONFIG += -DHAVE_GL # needed for mpv libs
#CONFIG += -DSTILL_DEBUG=2 # still picture debug verbose level
CONFIG += -DAV_INFO -DAV_INFO_TIME=3000 # info/debug a/v sync
CONFIG += -DUSE_PIP # PIP support
#CONFIG += -DHAVE_PTHREAD_NAME # supports new pthread_setname_np
#CONFIG += -DNO_TS_AUDIO # disable ts audio parser
#CONFIG += -DUSE_TS_VIDEO # build new ts video parser
@@ -53,8 +120,9 @@ CONFIG += -DUSE_VDR_SPU # use VDR SPU decoder.
### The version number of this plugin (taken from the main source file):
VERSION = $(shell grep 'static const char \*const VERSION *=' $(PLUGIN).cpp | awk '{ print $$7 }' | sed -e 's/[";]//g')
VERSION = $(shell grep 'static const char \*const VERSION *=' softhdcuvid.cpp | awk '{ print $$7 }' | sed -e 's/[";]//g')
GIT_REV = $(shell git describe --always 2>/dev/null)
### The name of the distribution archive:
### The directory environment:
@@ -86,14 +154,7 @@ APIVERSION = $(call PKGCFG,apiversion)
-include $(PLGCFG)
### The name of the distribution archive:
ARCHIVE = $(PLUGIN)-$(VERSION)
PACKAGE = vdr-$(ARCHIVE)
### The name of the shared object file:
SOFILE = libvdr-$(PLUGIN).so
### Parse softhddevice config
@@ -102,37 +163,86 @@ CONFIG += -DUSE_ALSA
_CFLAGS += $(shell pkg-config --cflags alsa)
LIBS += $(shell pkg-config --libs alsa)
endif
ifeq ($(OSS),1)
CONFIG += -DUSE_OSS
endif
ifeq ($(OPENGL),1)
_CFLAGS += $(shell pkg-config --cflags libva-glx)
LIBS += $(shell pkg-config --libs libva-glx)
#_CFLAGS += $(shell pkg-config --cflags libva-glx)
#LIBS += $(shell pkg-config --libs libva-glx)
endif
ifeq ($(OPENGLOSD),1)
CONFIG += -DUSE_OPENGLOSD
endif
ifeq ($(OPENGL),1)
CONFIG += -DUSE_GLX
_CFLAGS += $(shell pkg-config --cflags gl glu glew)
LIBS += $(shell pkg-config --libs gl glu glew)
_CFLAGS += $(shell pkg-config --cflags glew)
LIBS += $(shell pkg-config --libs glew)
#LIBS += $(shell pkg-config --libs glu glew)
_CFLAGS += $(shell pkg-config --cflags freetype2)
LIBS += $(shell pkg-config --libs freetype2)
endif
ifeq ($(VAAPI),1)
CONFIG += -DVAAPI
#LIBPLACEBO=1
PLUGIN = softhdvaapi
endif
ifeq ($(LIBPLACEBO_GL),1)
CONFIG += -DPLACEBO_GL -DPLACEBO
LIBS += -lepoxy
LIBS += -lplacebo
else
LIBS += -lEGL
endif
ifeq ($(LIBPLACEBO),1)
CONFIG += -DPLACEBO
LIBS += -lEGL
LIBS += -lplacebo
endif
ifeq ($(DRM),1)
PLUGIN = softhddrm
CONFIG += -DUSE_DRM -DVAAPI
_CFLAGS += $(shell pkg-config --cflags libdrm)
LIBS += -lgbm -ldrm -lEGL
endif
ifeq ($(CUVID),1)
CONFIG += -DUSE_PIP # PIP support
CONFIG += -DCUVID # enable CUVID decoder
LIBS += -lEGL -lGL
ifeq ($(YADIF),1)
CONFIG += -DYADIF # Yadif only with CUVID
endif
endif
ifeq ($(GAMMA),1)
CONFIG += -DGAMMA
endif
ARCHIVE = $(PLUGIN)-$(VERSION)
PACKAGE = vdr-$(ARCHIVE)
### The name of the shared object file:
SOFILE = libvdr-$(PLUGIN).so
#
# Test that libswresample is available
#
ifneq (exists, $(shell pkg-config libswresample && echo exists))
$(warning ******************************************************************)
$(warning 'libswresample' not found!)
$(error ******************************************************************)
endif
#ifneq (exists, $(shell pkg-config libswresample && echo exists))
# $(warning ******************************************************************)
# $(warning 'libswresample' not found!)
# $(error ******************************************************************)
#endif
#
# Test and set config for libavutil
@@ -165,7 +275,7 @@ ifneq (exists, $(shell pkg-config libavcodec && echo exists))
$(error ******************************************************************)
endif
_CFLAGS += $(shell pkg-config --cflags libavcodec)
LIBS += $(shell pkg-config --libs libavcodec)
LIBS += $(shell pkg-config --libs libavcodec libavfilter)
ifeq ($(SCREENSAVER),1)
@@ -178,28 +288,27 @@ CONFIG += -DUSE_SWRESAMPLE
_CFLAGS += $(shell pkg-config --cflags libswresample)
LIBS += $(shell pkg-config --libs libswresample)
endif
#ifeq ($(AVRESAMPLE),1)
#CONFIG += -DUSE_AVRESAMPLE
#_CFLAGS += $(shell pkg-config --cflags libavresample)
#LIBS += $(shell pkg-config --libs libavresample)
#endif
ifeq ($(AVRESAMPLE),1)
CONFIG += -DUSE_AVRESAMPLE
_CFLAGS += $(shell pkg-config --cflags libavresample)
LIBS += $(shell pkg-config --libs libavresample)
endif
#_CFLAGS += $(shell pkg-config --cflags libavcodec x11 x11-xcb xcb xcb-icccm)
#LIBS += -lrt $(shell pkg-config --libs libavcodec x11 x11-xcb xcb xcb-icccm)
_CFLAGS += $(shell pkg-config --cflags x11 x11-xcb xcb xcb-icccm)
LIBS += -lrt $(shell pkg-config --libs x11 x11-xcb xcb xcb-icccm)
_CFLAGS += -I/usr/local/cuda/include
_CFLAGS += -I./opengl -I./
LIBS += -L/usr/lib64/opengl/nvidia/lib
LIBS += -L/usr/local/cuda/lib64
LIBS += -L/usr/lib64
ifeq ($(LIBPLACEBO),1)
LIBS += -lplacebo -lglut
ifeq ($(CUVID),1)
LIBS += -lcuda -lnvcuvid
endif
LIBS += -lGLEW -lGLX -ldl -lcuda -L/usr/local/cuda/targets/x86_64-linux/lib -lcudart -lnvcuvid
LIBS += -lGLEW -lGLU -ldl -lglut
### Includes and Defines (add further entries here):
INCLUDES +=
@@ -210,19 +319,24 @@ DEFINES += -DPLUGIN_NAME_I18N='"$(PLUGIN)"' -D_GNU_SOURCE $(CONFIG) \
### Make it standard
override CXXFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \
-g -Wextra -Winit-self -Werror=overloaded-virtual -std=c++0x
-g -W -Wextra -Winit-self -Werror=overloaded-virtual -Wno-unused-parameter
override CFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \
-g -W -Wextra -Winit-self -Wdeclaration-after-statement
### The object files (add further files here):
OBJS = $(PLUGIN).o softhddev.o video.o audio.o codec.o ringbuffer.o
ifeq ($(OPENGLOSD),1)
OBJS += openglosd.o
OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o openglosd.o
ifeq ($(GAMMA),1)
OBJS += colorramp.o
ifeq ($(DRM),1)
OBJS += gamma-drm.o
else
OBJS += gamma-vidmode.o
endif
endif
SRCS = $(wildcard $(OBJS:.o=.c)) $(PLUGIN).cpp
SRCS = $(wildcard $(OBJS:.o=.c)) *.cpp
### The main target:

126
README.md
View File

@@ -23,8 +23,8 @@ $Id: 5267da021a68b4a727b479417334bfbe67bbba14 $
A software and GPU emulated UHD output device plugin for VDR.
o Video decoder CPU / VDPAU
o Video output opengl
o Video decoder CUVID or VAAPI
o Video output opengl or DRM
o Audio FFMpeg / Alsa / Analog
o Audio FFMpeg / Alsa / Digital
o Audio FFMpeg / OSS / Analog
@@ -32,43 +32,36 @@ A software and GPU emulated UHD output device plugin for VDR.
o Software volume, compression, normalize and channel resample
o VDR ScaleVideo API
o CUDA deinterlacer
o Autocrop
o Suspend / Dettach
o PIP (Picture-in-Picture) (not working yet)
o Support for ambilight
o Support for Screencopy
o PIP (Picture-in-Picture) (only for CUVID)
To compile you must have the 'requires' installed.
This is a fork of johns original softhddevice work and I reworked ist to support HEVC with CUDA and opengl output.
This is a fork of johns original softhddevice work and I reworked it to support HEVC with CUDA and opengl output.
Currently I have tested it with a GTX 1050 from NVIDIA. SD, HD and UHD is working.
Current Status NVIDA:
Current Status NVIDIA:
The CUDA driver supports HEVC with 8 Bit and 10 Bit up to UHD resolution. Opengl is able to output also 10 Bit, but NVIDIA does not support to output 10 Bit via HDMI.
Only via DisplayPort you can get 10 Bit output to a compatible screen. This is a restriction from NVIDIA.
Current Status with VAAPI:
I tested it with Intel VAAPI. If you have problmes with the shaders then copy the drirc file in your home directory as .drirc
AMD VAAPI is broken by AMD and will not work currently.
You have to adapt the Makefile to your needs. I use FFMPEG 4.0
The Makefile expects the CUDA SDK in /usr/local/cuda. Currently it is tested with CUDA 10
Unfortunatly older FFMEGs has a bug with deinterlacing cuda frames. Best to get the latest FFMPEG Version.
Otherwise you have to patch the file in libavcodec/cuviddec.c
Somewhere near line 860 and 1066 depending on your release:
old:
ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
new:
ctx->frame_queue = av_fifo_alloc((ctx->nb_surfaces + 2 ) * sizeof(CuvidParsedFrame));
This Version supports building with libplacebo. https://github.com/haasn/libplacebo
You have to enable it in the Makefile and install libplacebo yourself.
At the moment this is Work in progress.
It also needs the NVIDIA driver 410.48 or newer as well as CUDA 10.
In the settings you can enable a correction for Colorblindness. First you have to decide what kind of colorblindness to use. And then the faktor of correction. If the faktor is negativ than the selected type of colorblindness is simulated. If the faktor is positiv then the colors are enhanced to try to correct the deficiency.
I recommend to use libplacebo. It has much better scaler and does colorconversion for HDR the correct way.
Also you can enable a Scaler Test feature. When enabled then the screen is split.On the left half you will see the scaler defined by Scaler Test and on the right side you will see the scaler defined at the Resolution setting. There is as small black line between the halfs to remaind you that Scaler Test is activ.
If your FFMEG supports it then you can enable YADIF in the Makefile and select between the buildin NVIDIA CUDA deinterlacer and the YADIF cuda deinterlacer.
Good luck
jojo61
@@ -76,7 +69,17 @@ jojo61
Quickstart:
-----------
Just type make and use.
You have to adapt the Makefile. There are 3 possible Version that you can build:
softhdcuvid
This is for NVIDA cards and uses cuvid as decoder. It uses xcb for output and needs a X Server to run.
softhdvaapi
This is for INTEL cards and uses Vaapi as decoder. It uses xcb for output and needs a X Server to run.
softhddrm
This is for INTEL cards and also uses Vaapi as decoder. It uses the DRM API for output and
runs without X Server. There are several commandline options to select the resolution and refresh rate.
Install:
--------
@@ -92,6 +95,34 @@ Install:
You have to start vdr with -P 'softhdcuvid -d :0.0 ..<more option>.. '
Beginners Guide for libplacebo:
-------------------------------
When using libplacebo you will find several config options.
First of all you need to set the right scaler for each resolution:
Best you beginn with setting all to "bilinear". If that works ok for you, you can try to change them
for more advanced scaler. I use ewa_robidouxsharp on my GTX1050, but your mileage may vary.
Unfortunatly on INTEL not all scalers may work or crash.
You can enable a Scaler Test feature. When enabled then the screen is split.On the left half you will
see the scaler defined by Scaler Test and on the right side you will see the scaler defined at the
Resolution setting. There is as small black line between the halfs to remaind you that Scaler Test
is activ.
Then you should set the Monitor Colorspace to "sRGB". This guarantees you the best colors on your screen.
At the moment all calculations internaly are done in RGB space and all cards output also RGB.
If you are colorblind you could try to remedy this with the Colorblind Settings. Realy only needed
in rare cases.
All other settings can be in their default state.
Beginning with libplacebo API 58 user shaders from mpv are supported. Use -S parameter to set the shader.
The plugins searches the shaders in $ConfigDir/plugins/shaders for the shaders. One example shader is
provided in the shader subdirectory. Copy it to e.g.: /etc/vdr/plugins/shaders and then start
vdr -P 'softhdcuvid -S filmgrain.glsl ...'
Setup: environment
------
@@ -205,18 +236,6 @@ Setup: /etc/vdr/setup.conf
0 = default (336 ms)
1 - 1000 = size of the buffer in ms
softhddevice.AutoCrop.Interval = 0
0 disables auto-crop
n each 'n' frames auto-crop is checked.
softhddevice.AutoCrop.Delay = 0
if auto-crop is over 'n' intervals the same, the cropping is
used.
softhddevice.AutoCrop.Tolerance = 0
if detected crop area is too small, cut max 'n' pixels at top and
bottom.
softhddevice.Background = 0
32bit RGBA background color
(Red * 16777216 + Green * 65536 + Blue * 256 + Alpha)
@@ -256,11 +275,13 @@ Setup: /etc/vdr/setup.conf
0 pan and scan
1 letter box
2 center cut-out
3 original
softhddevice.VideoOtherDisplayFormat = 1
0 pan and scan
1 pillar box
2 center cut-out
3 original
softhddevice.pip.X = 79
softhddevice.pip.Y = 78
@@ -287,22 +308,6 @@ Setup: /etc/vdr/setup.conf
PIP alternative video window position and size in percent.
Setup: /etc/vdr/remote.conf
------
Add "XKeySym." definitions to /etc/vdr/remote.conf to control
the vdr and plugin with the connected input device.
fe.
XKeySym.Up Up
XKeySym.Down Down
...
Additional to the x11 input sends the window close button "Close".
fe.
XKeySym.Power Close
Commandline:
------------
@@ -350,25 +355,4 @@ Running:
Known Bugs:
-----------
SD Stream not working very well
RESUME starts wirh black screen (channelswitch needed)
Requires:
---------
media-video/vdr (version >=1.7.xx)
Video Disk Recorder - turns a pc into a powerful set top box
for DVB.
http://www.tvdr.de/
media-video/ffmpeg (version >=0.7)
Complete solution to record, convert and stream audio and
video. Includes libavcodec and libswresample.
http://ffmpeg.org
media-libs/alsa-lib
Advanced Linux Sound Architecture Library
http://www.alsa-project.org
or
kernel support for oss/oss4 or alsa oss emulation
Optional:
SD Streams not working very well on vaapi

420
audio.c
View File

@@ -51,6 +51,7 @@
#include <string.h>
#include <math.h>
#include <sys/prctl.h>
#include <sched.h>
#include <libintl.h>
#define _(str) gettext(str) ///< gettext shortcut
@@ -85,6 +86,8 @@
#define __USE_GNU
#endif
#include <pthread.h>
#include <sys/syscall.h>
#include <sys/resource.h>
#ifndef HAVE_PTHREAD_NAME
/// only available with newer glibc
#define pthread_setname_np(thread, name)
@@ -258,8 +261,7 @@ static void AudioNormalizer(int16_t * samples, int count)
if (avg > 0) {
factor = ((INT16_MAX / 8) * 1000U) / (uint32_t) sqrt(avg);
// smooth normalize
AudioNormalizeFactor =
(AudioNormalizeFactor * 500 + factor * 500) / 1000;
AudioNormalizeFactor = (AudioNormalizeFactor * 500 + factor * 500) / 1000;
if (AudioNormalizeFactor < AudioMinNormalize) {
AudioNormalizeFactor = AudioMinNormalize;
}
@@ -269,8 +271,8 @@ static void AudioNormalizer(int16_t * samples, int count)
} else {
factor = 1000;
}
Debug(4, "audio/noramlize: avg %8d, fac=%6.3f, norm=%6.3f\n",
avg, factor / 1000.0, AudioNormalizeFactor / 1000.0);
Debug(4, "audio/noramlize: avg %8d, fac=%6.3f, norm=%6.3f\n", avg, factor / 1000.0,
AudioNormalizeFactor / 1000.0);
}
AudioNormIndex = (AudioNormIndex + 1) % AudioNormMaxIndex;
@@ -337,8 +339,7 @@ static void AudioCompressor(int16_t * samples, int count)
if (max_sample > 0) {
factor = (INT16_MAX * 1000) / max_sample;
// smooth compression (FIXME: make configurable?)
AudioCompressionFactor =
(AudioCompressionFactor * 950 + factor * 50) / 1000;
AudioCompressionFactor = (AudioCompressionFactor * 950 + factor * 50) / 1000;
if (AudioCompressionFactor > factor) {
AudioCompressionFactor = factor; // no clipping
}
@@ -349,8 +350,8 @@ static void AudioCompressor(int16_t * samples, int count)
return; // silent nothing todo
}
Debug(4, "audio/compress: max %5d, fac=%6.3f, com=%6.3f\n", max_sample,
factor / 1000.0, AudioCompressionFactor / 1000.0);
Debug(4, "audio/compress: max %5d, fac=%6.3f, com=%6.3f\n", max_sample, factor / 1000.0,
AudioCompressionFactor / 1000.0);
// apply compression factor
for (i = 0; i < count / AudioBytesProSample; ++i) {
@@ -458,8 +459,7 @@ static void AudioStereo2Mono(const int16_t * in, int frames, int16_t * out)
** @param frames number of frames in sample buffer
** @param out output sample buffer
*/
static void AudioSurround2Stereo(const int16_t * in, int in_chan, int frames,
int16_t * out)
static void AudioSurround2Stereo(const int16_t * in, int in_chan, int frames, int16_t * out)
{
while (frames--) {
int l;
@@ -538,8 +538,7 @@ static void AudioSurround2Stereo(const int16_t * in, int in_chan, int frames,
** @param out output sample buffer
** @param out_chan nr. of output channels
*/
static void AudioUpmix(const int16_t * in, int in_chan, int frames,
int16_t * out, int out_chan)
static void AudioUpmix(const int16_t * in, int in_chan, int frames, int16_t * out, int out_chan)
{
while (frames--) {
int i;
@@ -569,8 +568,7 @@ static void AudioUpmix(const int16_t * in, int in_chan, int frames,
** @param out output sample buffer
** @param out_chan nr. of output channels
*/
static void AudioResample(const int16_t * in, int in_chan, int frames,
int16_t * out, int out_chan)
static void AudioResample(const int16_t * in, int in_chan, int frames, int16_t * out, int out_chan)
{
switch (in_chan * 8 + out_chan) {
case 1 * 8 + 1:
@@ -605,8 +603,7 @@ static void AudioResample(const int16_t * in, int in_chan, int frames,
break;
default:
Error("audio: unsupported %d -> %d channels resample\n", in_chan,
out_chan);
Error("audio: unsupported %d -> %d channels resample\n", in_chan, out_chan);
// play silence
memset(out, 0, frames * out_chan * AudioBytesProSample);
break;
@@ -692,11 +689,10 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough)
AudioRing[AudioRingWrite].InChannels = channels;
AudioRing[AudioRingWrite].HwSampleRate = sample_rate;
AudioRing[AudioRingWrite].HwChannels = AudioChannelMatrix[u][channels];
AudioRing[AudioRingWrite].PTS = INT64_C(0x8000000000000000);
AudioRing[AudioRingWrite].PTS = AV_NOPTS_VALUE;
RingBufferReset(AudioRing[AudioRingWrite].RingBuffer);
Debug(3, "audio: %d ring buffer prepared\n",
atomic_read(&AudioRingFilled) + 1);
Debug(3, "audio: %d ring buffer prepared\n", atomic_read(&AudioRingFilled) + 1);
atomic_inc(&AudioRingFilled);
@@ -705,6 +701,7 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough)
// tell thread, that there is something todo
AudioRunning = 1;
pthread_cond_signal(&AudioStartCond);
Debug(3, "Start on AudioRingAdd\n");
}
#endif
@@ -793,14 +790,12 @@ static int AlsaPlayRingbuffer(void)
if (n == -EAGAIN) {
continue;
}
Warning(_("audio/alsa: avail underrun error? '%s'\n"),
snd_strerror(n));
Warning(_("audio/alsa: avail underrun error? '%s'\n"), snd_strerror(n));
err = snd_pcm_recover(AlsaPCMHandle, n, 0);
if (err >= 0) {
continue;
}
Error(_("audio/alsa: snd_pcm_avail_update(): %s\n"),
snd_strerror(n));
Error(_("audio/alsa: snd_pcm_avail_update(): %s\n"), snd_strerror(n));
return -1;
}
avail = snd_pcm_frames_to_bytes(AlsaPCMHandle, n);
@@ -809,23 +804,20 @@ static int AlsaPlayRingbuffer(void)
// happens with broken alsa drivers
if (AudioThread) {
if (!AudioAlsaDriverBroken) {
Error(_("audio/alsa: broken driver %d state '%s'\n"),
avail,
Error(_("audio/alsa: broken driver %d state '%s'\n"), avail,
snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle)));
}
// try to recover
if (snd_pcm_state(AlsaPCMHandle)
== SND_PCM_STATE_PREPARED) {
if ((err = snd_pcm_start(AlsaPCMHandle)) < 0) {
Error(_("audio/alsa: snd_pcm_start(): %s\n"),
snd_strerror(err));
Error(_("audio/alsa: snd_pcm_start(): %s\n"), snd_strerror(err));
}
}
usleep(5 * 1000);
}
}
Debug(4, "audio/alsa: break state '%s'\n",
snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle)));
Debug(4, "audio/alsa: break state '%s'\n", snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle)));
break;
}
@@ -846,8 +838,7 @@ static int AlsaPlayRingbuffer(void)
break;
}
// muting pass-through AC-3, can produce disturbance
if (AudioMute || (AudioSoftVolume
&& !AudioRing[AudioRingRead].Passthrough)) {
if (AudioMute || (AudioSoftVolume && !AudioRing[AudioRingRead].Passthrough)) {
// FIXME: quick&dirty cast
AudioSoftAmplifier((int16_t *) p, avail);
// FIXME: if not all are written, we double amplify them
@@ -876,14 +867,12 @@ static int AlsaPlayRingbuffer(void)
goto again;
}
*/
Warning(_("audio/alsa: writei underrun error? '%s'\n"),
snd_strerror(err));
Warning(_("audio/alsa: writei underrun error? '%s'\n"), snd_strerror(err));
err = snd_pcm_recover(AlsaPCMHandle, err, 0);
if (err >= 0) {
continue;
}
Error(_("audio/alsa: snd_pcm_writei failed: %s\n"),
snd_strerror(err));
Error(_("audio/alsa: snd_pcm_writei failed: %s\n"), snd_strerror(err));
return -1;
}
// this could happen, if underrun happened
@@ -894,8 +883,8 @@ static int AlsaPlayRingbuffer(void)
}
RingBufferReadAdvance(AudioRing[AudioRingRead].RingBuffer, avail);
first = 0;
}
}
return 0;
}
@@ -951,8 +940,7 @@ static int AlsaThread(void)
}
// wait for space in kernel buffers
if ((err = snd_pcm_wait(AlsaPCMHandle, 24)) < 0) {
Warning(_("audio/alsa: wait underrun error? '%s'\n"),
snd_strerror(err));
Warning(_("audio/alsa: wait underrun error? '%s'\n"), snd_strerror(err));
err = snd_pcm_recover(AlsaPCMHandle, err, 0);
if (err >= 0) {
continue;
@@ -976,8 +964,7 @@ static int AlsaThread(void)
state = snd_pcm_state(AlsaPCMHandle);
if (state != SND_PCM_STATE_RUNNING) {
Debug(3, "audio/alsa: stopping play '%s'\n",
snd_pcm_state_name(state));
Debug(3, "audio/alsa: stopping play '%s'\n", snd_pcm_state_name(state));
return 0;
}
@@ -1008,8 +995,7 @@ static snd_pcm_t *AlsaOpenPCM(int passthrough)
device = "default";
}
if (!AudioDoingInit) { // reduce blabla during init
Info(_("audio/alsa: using %sdevice '%s'\n"),
passthrough ? "pass-through " : "", device);
Info(_("audio/alsa: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device);
}
//
// for AC3 pass-through try to set the non-audio bit, use AES0=6
@@ -1032,11 +1018,8 @@ static snd_pcm_t *AlsaOpenPCM(int passthrough)
#endif
}
// open none blocking; if device is already used, we don't want wait
if ((err =
snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK,
SND_PCM_NONBLOCK)) < 0) {
Error(_("audio/alsa: playback open '%s' error: %s\n"), device,
snd_strerror(err));
if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
Error(_("audio/alsa: playback open '%s' error: %s\n"), device, snd_strerror(err));
return NULL;
}
@@ -1064,9 +1047,7 @@ static void AlsaInitPCM(void)
snd_pcm_hw_params_alloca(&hw_params);
// choose all parameters
if ((err = snd_pcm_hw_params_any(handle, hw_params)) < 0) {
Error(_
("audio: snd_pcm_hw_params_any: no configurations available: %s\n"),
snd_strerror(err));
Error(_("audio: snd_pcm_hw_params_any: no configurations available: %s\n"), snd_strerror(err));
}
AlsaCanPause = snd_pcm_hw_params_can_pause(hw_params);
Info(_("audio/alsa: supports pause: %s\n"), AlsaCanPause ? "yes" : "no");
@@ -1119,8 +1100,7 @@ static void AlsaInitMixer(void)
Debug(3, "audio/alsa: mixer %s - %s open\n", device, channel);
snd_mixer_open(&alsa_mixer, 0);
if (alsa_mixer && snd_mixer_attach(alsa_mixer, device) >= 0
&& snd_mixer_selem_register(alsa_mixer, NULL, NULL) >= 0
&& snd_mixer_load(alsa_mixer) >= 0) {
&& snd_mixer_selem_register(alsa_mixer, NULL, NULL) >= 0 && snd_mixer_load(alsa_mixer) >= 0) {
const char *const alsa_mixer_elem_name = channel;
@@ -1130,11 +1110,10 @@ static void AlsaInitMixer(void)
name = snd_mixer_selem_get_name(alsa_mixer_elem);
if (!strcasecmp(name, alsa_mixer_elem_name)) {
snd_mixer_selem_get_playback_volume_range(alsa_mixer_elem,
&alsa_mixer_elem_min, &alsa_mixer_elem_max);
snd_mixer_selem_get_playback_volume_range(alsa_mixer_elem, &alsa_mixer_elem_min, &alsa_mixer_elem_max);
AlsaRatio = 1000 * (alsa_mixer_elem_max - alsa_mixer_elem_min);
Debug(3, "audio/alsa: PCM mixer found %ld - %ld ratio %d\n",
alsa_mixer_elem_min, alsa_mixer_elem_max, AlsaRatio);
Debug(3, "audio/alsa: PCM mixer found %ld - %ld ratio %d\n", alsa_mixer_elem_min, alsa_mixer_elem_max,
AlsaRatio);
break;
}
@@ -1178,15 +1157,14 @@ static int64_t AlsaGetDelay(void)
//Debug(3, "audio/alsa: %ld frames delay ok, but not running\n", delay);
#endif
}
//Debug(3, "audio/alsa: %ld frames hw delay\n", delay);
Debug(4, "audio/alsa: %ld frames hw delay\n", delay);
// delay can be negative, when underrun occur
if (delay < 0) {
delay = 0L;
}
pts =
((int64_t) delay * 90 * 1000) / AudioRing[AudioRingRead].HwSampleRate;
pts = ((int64_t) delay * 90 * 1000) / AudioRing[AudioRingRead].HwSampleRate;
return pts;
}
@@ -1237,16 +1215,14 @@ static int AlsaSetup(int *freq, int *channels, int passthrough)
for (;;) {
if ((err =
snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16,
AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED :
SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1,
AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1,
96 * 1000))) {
// try reduced buffer size (needed for sunxi)
// FIXME: alternativ make this configurable
if ((err =
snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16,
AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED :
SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1,
72 * 1000))) {
AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED : SND_PCM_ACCESS_RW_INTERLEAVED, *channels,
*freq, 1, 72 * 1000))) {
/*
if ( err == -EBADFD ) {
@@ -1257,8 +1233,7 @@ static int AlsaSetup(int *freq, int *channels, int passthrough)
*/
if (!AudioDoingInit) {
Error(_("audio/alsa: set params error: %s\n"),
snd_strerror(err));
Error(_("audio/alsa: set params error: %s\n"), snd_strerror(err));
}
// FIXME: must stop sound, AudioChannels ... invalid
return -1;
@@ -1275,41 +1250,30 @@ static int AlsaSetup(int *freq, int *channels, int passthrough)
snd_pcm_sw_params_alloca(&sw_params);
err = snd_pcm_sw_params_current(AlsaPCMHandle, sw_params);
if (err < 0) {
Error(_("audio: snd_pcm_sw_params_current failed: %s\n"),
snd_strerror(err));
Error(_("audio: snd_pcm_sw_params_current failed: %s\n"), snd_strerror(err));
}
if ((err = snd_pcm_sw_params_get_boundary(sw_params, &boundary)) < 0) {
Error(_("audio: snd_pcm_sw_params_get_boundary failed: %s\n"),
snd_strerror(err));
Error(_("audio: snd_pcm_sw_params_get_boundary failed: %s\n"), snd_strerror(err));
}
Debug(4, "audio/alsa: boundary %lu frames\n", boundary);
if ((err =
snd_pcm_sw_params_set_stop_threshold(AlsaPCMHandle, sw_params,
boundary)) < 0) {
Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"),
snd_strerror(err));
if ((err = snd_pcm_sw_params_set_stop_threshold(AlsaPCMHandle, sw_params, boundary)) < 0) {
Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), snd_strerror(err));
}
if ((err =
snd_pcm_sw_params_set_silence_size(AlsaPCMHandle, sw_params,
boundary)) < 0) {
Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"),
snd_strerror(err));
if ((err = snd_pcm_sw_params_set_silence_size(AlsaPCMHandle, sw_params, boundary)) < 0) {
Error(_("audio: snd_pcm_sw_params_set_silence_size failed: %s\n"), snd_strerror(err));
}
if ((err = snd_pcm_sw_params(AlsaPCMHandle, sw_params)) < 0) {
Error(_("audio: snd_pcm_sw_params failed: %s\n"),
snd_strerror(err));
Error(_("audio: snd_pcm_sw_params failed: %s\n"), snd_strerror(err));
}
}
// update buffer
snd_pcm_get_params(AlsaPCMHandle, &buffer_size, &period_size);
Debug(3, "audio/alsa: buffer size %lu %zdms, period size %lu %zdms\n",
buffer_size, snd_pcm_frames_to_bytes(AlsaPCMHandle,
buffer_size) * 1000 / (*freq * *channels * AudioBytesProSample),
Debug(3, "audio/alsa: buffer size %lu %zdms, period size %lu %zdms\n", buffer_size,
snd_pcm_frames_to_bytes(AlsaPCMHandle, buffer_size) * 1000 / (*freq * *channels * AudioBytesProSample),
period_size, snd_pcm_frames_to_bytes(AlsaPCMHandle,
period_size) * 1000 / (*freq * *channels * AudioBytesProSample));
Debug(3, "audio/alsa: state %s\n",
snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle)));
Debug(3, "audio/alsa: state %s\n", snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle)));
AudioStartThreshold = snd_pcm_frames_to_bytes(AlsaPCMHandle, period_size);
// buffer time/delay in ms
@@ -1317,10 +1281,8 @@ static int AlsaSetup(int *freq, int *channels, int passthrough)
if (VideoAudioDelay > 0) {
delay += VideoAudioDelay / 90;
}
if (AudioStartThreshold <
(*freq * *channels * AudioBytesProSample * delay) / 1000U) {
AudioStartThreshold =
(*freq * *channels * AudioBytesProSample * delay) / 1000U;
if (AudioStartThreshold < (*freq * *channels * AudioBytesProSample * delay) / 1000U) {
AudioStartThreshold = (*freq * *channels * AudioBytesProSample * delay) / 1000U;
}
// no bigger, than 1/3 the buffer
if (AudioStartThreshold > AudioRingBufferSize / 3) {
@@ -1330,7 +1292,6 @@ static int AlsaSetup(int *freq, int *channels, int passthrough)
Info(_("audio/alsa: start delay %ums\n"), (AudioStartThreshold * 1000)
/ (*freq * *channels * AudioBytesProSample));
}
return 0;
}
@@ -1476,8 +1437,7 @@ static int OssPlayRingbuffer(void)
int n;
if (ioctl(OssPcmFildes, SNDCTL_DSP_GETOSPACE, &bi) == -1) {
Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"),
strerror(errno));
Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"), strerror(errno));
return -1;
}
Debug(4, "audio/oss: %d bytes free\n", bi.bytes);
@@ -1531,8 +1491,7 @@ static void OssFlushBuffers(void)
if (OssPcmFildes != -1) {
// flush kernel buffers
if (ioctl(OssPcmFildes, SNDCTL_DSP_HALT_OUTPUT, NULL) < 0) {
Error(_("audio/oss: ioctl(SNDCTL_DSP_HALT_OUTPUT): %s\n"),
strerror(errno));
Error(_("audio/oss: ioctl(SNDCTL_DSP_HALT_OUTPUT): %s\n"), strerror(errno));
}
}
}
@@ -1587,7 +1546,7 @@ static int OssThread(void)
if (err < 0) { // underrun error
return -1;
}
pthread_yield();
sched_yield();
usleep(OssFragmentTime * 1000); // let fill/empty the buffers
return 0;
}
@@ -1616,13 +1575,11 @@ static int OssOpenPCM(int passthrough)
device = "/dev/dsp";
}
if (!AudioDoingInit) {
Info(_("audio/oss: using %sdevice '%s'\n"),
passthrough ? "pass-through " : "", device);
Info(_("audio/oss: using %sdevice '%s'\n"), passthrough ? "pass-through " : "", device);
}
if ((fildes = open(device, O_WRONLY)) < 0) {
Error(_("audio/oss: can't open dsp device '%s': %s\n"), device,
strerror(errno));
Error(_("audio/oss: can't open dsp device '%s': %s\n"), device, strerror(errno));
return -1;
}
return fildes;
@@ -1668,8 +1625,7 @@ static void OssSetVolume(int volume)
/**
** Mixer channel name table.
*/
static const char *OssMixerChannelNames[SOUND_MIXER_NRDEVICES] =
SOUND_DEVICE_NAMES;
static const char *OssMixerChannelNames[SOUND_MIXER_NRDEVICES] = SOUND_DEVICE_NAMES;
/**
** Initialize OSS mixer.
@@ -1695,14 +1651,12 @@ static void OssInitMixer(void)
Debug(3, "audio/oss: mixer %s - %s open\n", device, channel);
if ((fildes = open(device, O_RDWR)) < 0) {
Error(_("audio/oss: can't open mixer device '%s': %s\n"), device,
strerror(errno));
Error(_("audio/oss: can't open mixer device '%s': %s\n"), device, strerror(errno));
return;
}
// search channel name
if (ioctl(fildes, SOUND_MIXER_READ_DEVMASK, &devmask) < 0) {
Error(_("audio/oss: ioctl(SOUND_MIXER_READ_DEVMASK): %s\n"),
strerror(errno));
Error(_("audio/oss: ioctl(SOUND_MIXER_READ_DEVMASK): %s\n"), strerror(errno));
close(fildes);
return;
}
@@ -1746,8 +1700,7 @@ static int64_t OssGetDelay(void)
// delay in bytes in kernel buffers
delay = -1;
if (ioctl(OssPcmFildes, SNDCTL_DSP_GETODELAY, &delay) == -1) {
Error(_("audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n"),
strerror(errno));
Error(_("audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n"), strerror(errno));
return 0L;
}
if (delay < 0) {
@@ -1755,8 +1708,7 @@ static int64_t OssGetDelay(void)
}
pts = ((int64_t) delay * 90 * 1000)
/ (AudioRing[AudioRingRead].HwSampleRate *
AudioRing[AudioRingRead].HwChannels * AudioBytesProSample);
/ (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels * AudioBytesProSample);
return pts;
}
@@ -1812,13 +1764,11 @@ static int OssSetup(int *sample_rate, int *channels, int passthrough)
tmp = *channels;
if (ioctl(OssPcmFildes, SNDCTL_DSP_CHANNELS, &tmp) == -1) {
Error(_("audio/oss: ioctl(SNDCTL_DSP_CHANNELS): %s\n"),
strerror(errno));
Error(_("audio/oss: ioctl(SNDCTL_DSP_CHANNELS): %s\n"), strerror(errno));
return -1;
}
if (tmp != *channels) {
Warning(_("audio/oss: device doesn't support %d channels.\n"),
*channels);
Warning(_("audio/oss: device doesn't support %d channels.\n"), *channels);
*channels = tmp;
ret = 1;
}
@@ -1829,8 +1779,7 @@ static int OssSetup(int *sample_rate, int *channels, int passthrough)
return -1;
}
if (tmp != *sample_rate) {
Warning(_("audio/oss: device doesn't support %dHz sample rate.\n"),
*sample_rate);
Warning(_("audio/oss: device doesn't support %dHz sample rate.\n"), *sample_rate);
*sample_rate = tmp;
ret = 1;
}
@@ -1844,8 +1793,7 @@ static int OssSetup(int *sample_rate, int *channels, int passthrough)
#endif
if (ioctl(OssPcmFildes, SNDCTL_DSP_GETOSPACE, &bi) == -1) {
Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"),
strerror(errno));
Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"), strerror(errno));
bi.fragsize = 4096;
bi.fragstotal = 16;
} else {
@@ -1855,10 +1803,9 @@ static int OssSetup(int *sample_rate, int *channels, int passthrough)
OssFragmentTime = (bi.fragsize * 1000)
/ (*sample_rate * *channels * AudioBytesProSample);
Debug(3, "audio/oss: buffer size %d %dms, fragment size %d %dms\n",
bi.fragsize * bi.fragstotal, (bi.fragsize * bi.fragstotal * 1000)
/ (*sample_rate * *channels * AudioBytesProSample), bi.fragsize,
OssFragmentTime);
Debug(3, "audio/oss: buffer size %d %dms, fragment size %d %dms\n", bi.fragsize * bi.fragstotal,
(bi.fragsize * bi.fragstotal * 1000)
/ (*sample_rate * *channels * AudioBytesProSample), bi.fragsize, OssFragmentTime);
// start when enough bytes for initial write
AudioStartThreshold = (bi.fragsize - 1) * bi.fragstotal;
@@ -1868,10 +1815,8 @@ static int OssSetup(int *sample_rate, int *channels, int passthrough)
if (VideoAudioDelay > 0) {
delay += VideoAudioDelay / 90;
}
if (AudioStartThreshold <
(*sample_rate * *channels * AudioBytesProSample * delay) / 1000U) {
AudioStartThreshold =
(*sample_rate * *channels * AudioBytesProSample * delay) / 1000U;
if (AudioStartThreshold < (*sample_rate * *channels * AudioBytesProSample * delay) / 1000U) {
AudioStartThreshold = (*sample_rate * *channels * AudioBytesProSample * delay) / 1000U;
}
// no bigger, than 1/3 the buffer
if (AudioStartThreshold > AudioRingBufferSize / 3) {
@@ -2027,8 +1972,7 @@ static int AudioNextRing(void)
sample_rate = AudioRing[AudioRingRead].HwSampleRate;
channels = AudioRing[AudioRingRead].HwChannels;
if (AudioUsedModule->Setup(&sample_rate, &channels, passthrough)) {
Error(_("audio: can't set channels %d sample-rate %dHz\n"), channels,
sample_rate);
Error(_("audio: can't set channels %d sample-rate %dHz\n"), channels, sample_rate);
// FIXME: handle error
AudioRing[AudioRingRead].HwSampleRate = 0;
AudioRing[AudioRingRead].InSampleRate = 0;
@@ -2041,13 +1985,11 @@ static int AudioNextRing(void)
Debug(3, "audio: a/v next buf(%d,%4zdms)\n", atomic_read(&AudioRingFilled),
(RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer) * 1000)
/ (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample));
/ (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample));
// stop, if not enough in next buffer
used = RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer);
if (AudioStartThreshold * 4 < used || (AudioVideoIsReady
&& AudioStartThreshold < used)) {
if (AudioStartThreshold * 4 < used || (AudioVideoIsReady && AudioStartThreshold < used)) {
return 0;
}
return 1;
@@ -2062,6 +2004,7 @@ static void *AudioPlayHandlerThread(void *dummy)
{
Debug(3, "audio: play thread started\n");
prctl(PR_SET_NAME, "cuvid audio", 0, 0, 0);
for (;;) {
// check if we should stop the thread
if (AudioThreadStop) {
@@ -2078,11 +2021,10 @@ static void *AudioPlayHandlerThread(void *dummy)
} while (!AudioRunning);
pthread_mutex_unlock(&AudioMutex);
Debug(3, "audio: ----> %dms start\n", (AudioUsedBytes() * 1000)
/ (!AudioRing[AudioRingWrite].HwSampleRate +
!AudioRing[AudioRingWrite].HwChannels +
AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample));
Debug(3, "audio: ----> %dms %d start\n", (AudioUsedBytes() * 1000)
/ (!AudioRing[AudioRingWrite].HwSampleRate + !AudioRing[AudioRingWrite].HwChannels +
AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),
AudioUsedBytes());
do {
int filled;
@@ -2116,10 +2058,8 @@ static void *AudioPlayHandlerThread(void *dummy)
AudioUsedModule->FlushBuffers();
atomic_sub(flush, &AudioRingFilled);
if (AudioNextRing()) {
Debug(3, "audio: break after flush\n");
break;
}
Debug(3, "audio: continue after flush\n");
}
// try to play some samples
err = 0;
@@ -2137,6 +2077,7 @@ static void *AudioPlayHandlerThread(void *dummy)
// underrun, and no new ring buffer, goto sleep.
if (!atomic_read(&AudioRingFilled)) {
Debug(3, "audio: HandlerThread Underrun with no new data\n");
break;
}
@@ -2151,14 +2092,13 @@ static void *AudioPlayHandlerThread(void *dummy)
passthrough = AudioRing[AudioRingRead].Passthrough;
sample_rate = AudioRing[AudioRingRead].HwSampleRate;
channels = AudioRing[AudioRingRead].HwChannels;
Debug(3, "audio: thread channels %d frequency %dHz %s\n",
channels, sample_rate, passthrough ? "pass-through" : "");
Debug(3, "audio: thread channels %d frequency %dHz %s\n", channels, sample_rate,
passthrough ? "pass-through" : "");
// audio config changed?
if (old_passthrough != passthrough
|| old_sample_rate != sample_rate
|| old_channels != channels) {
if (old_passthrough != passthrough || old_sample_rate != sample_rate || old_channels != channels) {
// FIXME: wait for buffer drain
if (AudioNextRing()) {
Debug(3, "audio: HandlerThread break on nextring");
break;
}
} else {
@@ -2168,6 +2108,7 @@ static void *AudioPlayHandlerThread(void *dummy)
}
// FIXME: check AudioPaused ...Thread()
if (AudioPaused) {
Debug(3, "audio: HandlerThread break on paused");
break;
}
} while (AudioRing[AudioRingRead].HwSampleRate);
@@ -2227,6 +2168,27 @@ static const AudioModule *AudioModules[] = {
&NoopModule,
};
void AudioDelayms(int delayms)
{
int count;
unsigned char *p;
#ifdef DEBUG
printf("Try Delay Audio for %d ms Samplerate %d Channels %d bps %d\n", delayms,
AudioRing[AudioRingWrite].HwSampleRate, AudioRing[AudioRingWrite].HwChannels, AudioBytesProSample);
#endif
count =
delayms * AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample /
1000;
if (delayms < 5000 && delayms > 0) { // not more than 5seconds
p = calloc(1, count);
RingBufferWrite(AudioRing[AudioRingWrite].RingBuffer, p, count);
free(p);
}
}
/**
** Place samples in audio output queue.
**
@@ -2260,37 +2222,28 @@ void AudioEnqueue(const void *samples, int count)
}
// audio sample modification allowed and needed?
buffer = (void *)samples;
if (!AudioRing[AudioRingWrite].Passthrough && (AudioCompression
|| AudioNormalize
|| AudioRing[AudioRingWrite].InChannels !=
AudioRing[AudioRingWrite].HwChannels)) {
if (!AudioRing[AudioRingWrite].Passthrough && (AudioCompression || AudioNormalize
|| AudioRing[AudioRingWrite].InChannels != AudioRing[AudioRingWrite].HwChannels)) {
int frames;
// resample into ring-buffer is too complex in the case of a roundabout
// just use a temporary buffer
frames =
count / (AudioRing[AudioRingWrite].InChannels *
AudioBytesProSample);
buffer =
alloca(frames * AudioRing[AudioRingWrite].HwChannels *
AudioBytesProSample);
frames = count / (AudioRing[AudioRingWrite].InChannels * AudioBytesProSample);
buffer = alloca(frames * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample);
#ifdef USE_AUDIO_MIXER
// Convert / resample input to hardware format
AudioResample(samples, AudioRing[AudioRingWrite].InChannels, frames,
buffer, AudioRing[AudioRingWrite].HwChannels);
AudioResample(samples, AudioRing[AudioRingWrite].InChannels, frames, buffer,
AudioRing[AudioRingWrite].HwChannels);
#else
#ifdef DEBUG
if (AudioRing[AudioRingWrite].InChannels !=
AudioRing[AudioRingWrite].HwChannels) {
if (AudioRing[AudioRingWrite].InChannels != AudioRing[AudioRingWrite].HwChannels) {
Debug(3, "audio: internal failure channels mismatch\n");
return;
}
#endif
memcpy(buffer, samples, count);
#endif
count =
frames * AudioRing[AudioRingWrite].HwChannels *
AudioBytesProSample;
count = frames * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample;
if (AudioCompression) { // in place operation
AudioCompressor(buffer, count);
@@ -2317,11 +2270,9 @@ void AudioEnqueue(const void *samples, int count)
// FIXME: round to packet size
Debug(4, "audio: start? %4zdms skip %dms\n", (n * 1000)
/ (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),
/ (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),
(skip * 1000)
/ (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample));
/ (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample));
if (skip) {
if (n < (unsigned)skip) {
@@ -2334,18 +2285,19 @@ void AudioEnqueue(const void *samples, int count)
// forced start or enough video + audio buffered
// for some exotic channels * 4 too small
if (AudioStartThreshold * 4 < n || (AudioVideoIsReady
// if ((AudioVideoIsReady
&& AudioStartThreshold < n)) {
// restart play-back
// no lock needed, can wakeup next time
AudioRunning = 1;
pthread_cond_signal(&AudioStartCond);
Debug(3, "Start on AudioEnque Threshold %d n %d\n", AudioStartThreshold, n);
}
}
// Update audio clock (stupid gcc developers thinks INT64_C is unsigned)
if (AudioRing[AudioRingWrite].PTS != (int64_t) INT64_C(0x8000000000000000)) {
if (AudioRing[AudioRingWrite].PTS != (int64_t) AV_NOPTS_VALUE) {
AudioRing[AudioRingWrite].PTS += ((int64_t) count * 90 * 1000)
/ (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample);
/ (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample);
}
}
@@ -2359,15 +2311,13 @@ void AudioVideoReady(int64_t pts)
int64_t audio_pts;
size_t used;
if (pts == (int64_t) INT64_C(0x8000000000000000)) {
if (pts == (int64_t) AV_NOPTS_VALUE) {
Debug(3, "audio: a/v start, no valid video\n");
return;
}
// no valid audio known
if (!AudioRing[AudioRingWrite].HwSampleRate
|| !AudioRing[AudioRingWrite].HwChannels
|| AudioRing[AudioRingWrite].PTS ==
(int64_t) INT64_C(0x8000000000000000)) {
if (!AudioRing[AudioRingWrite].HwSampleRate || !AudioRing[AudioRingWrite].HwChannels
|| AudioRing[AudioRingWrite].PTS == (int64_t) AV_NOPTS_VALUE) {
Debug(3, "audio: a/v start, no valid audio\n");
AudioVideoIsReady = 1;
return;
@@ -2377,96 +2327,52 @@ void AudioVideoReady(int64_t pts)
used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer);
audio_pts =
AudioRing[AudioRingWrite].PTS -
(used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample);
(used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels *
AudioBytesProSample);
Debug(3, "audio: a/v sync buf(%d,%4zdms) %s|%s = %dms %s\n",
atomic_read(&AudioRingFilled),
(used * 1000) / (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),
Timestamp2String(pts), Timestamp2String(audio_pts),
(int)(pts - audio_pts) / 90, AudioRunning ? "running" : "ready");
Debug(3, "audio: a/v sync buf(%d,%4zdms) %s | %s = %dms %s\n", atomic_read(&AudioRingFilled),
(used * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels *
AudioBytesProSample), Timestamp2String(pts), Timestamp2String(audio_pts), (int)(pts - audio_pts) / 90,
AudioRunning ? "running" : "ready");
if (!AudioRunning) {
int skip;
// buffer ~15 video frames
// FIXME: HDTV can use smaller video buffer
skip =
pts - 15 * 20 * 90 - AudioBufferTime * 90 - audio_pts +
VideoAudioDelay;
skip = pts - 0 * 20 * 90 - AudioBufferTime * 90 - audio_pts + VideoAudioDelay;
#ifdef DEBUG
fprintf(stderr, "%dms %dms %dms\n", (int)(pts - audio_pts) / 90,
VideoAudioDelay / 90, skip / 90);
// fprintf(stderr, "a/v-diff %dms a/v-delay %dms skip %dms Audiobuffer %d\n", (int)(pts - audio_pts) / 90, VideoAudioDelay / 90, skip / 90,AudioBufferTime);
#endif
// guard against old PTS
if (skip > 0 && skip < 2000 * 90) {
skip = (((int64_t) skip * AudioRing[AudioRingWrite].HwSampleRate)
/ (1000 * 90))
if (skip > 0 && skip < 4000 * 90) {
skip = (((int64_t) skip * AudioRing[AudioRingWrite].HwSampleRate) / (1000 * 90))
* AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample;
// FIXME: round to packet size
if ((unsigned)skip > used) {
AudioSkip = skip - used;
skip = used;
}
Debug(3, "audio: sync advance %dms %d/%zd\n",
(skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels *
AudioBytesProSample), skip, used);
Debug(3, "audio: sync advance %dms %d/%zd Rest %d\n",
(skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels *
AudioBytesProSample), skip, used, AudioSkip);
RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip);
used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer);
} else {
Debug(3, "No audio skip -> should skip %d\n", skip / 90);
}
// FIXME: skip<0 we need bigger audio buffer
// enough video + audio buffered
if (AudioStartThreshold < used) {
AudioRunning = 1;
pthread_cond_signal(&AudioStartCond);
Debug(3, "Start on AudioVideoReady\n");
}
}
AudioVideoIsReady = 1;
#if 0
if (AudioRing[AudioRingWrite].HwSampleRate
&& AudioRing[AudioRingWrite].HwChannels) {
if (pts != (int64_t) INT64_C(0x8000000000000000)
&& AudioRing[AudioRingWrite].PTS !=
(int64_t) INT64_C(0x8000000000000000)) {
Debug(3, "audio: a/v %d %s\n",
(int)(pts - AudioRing[AudioRingWrite].PTS) / 90,
AudioRunning ? "running" : "stopped");
}
Debug(3, "audio: start %4zdms %s|%s video ready\n",
(RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) * 1000)
/ (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),
Timestamp2String(pts),
Timestamp2String(AudioRing[AudioRingWrite].PTS));
if (!AudioRunning) {
size_t used;
used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer);
// enough video + audio buffered
if (AudioStartThreshold < used) {
// too much audio buffered, skip it
if (AudioStartThreshold < used) {
Debug(3, "audio: start %4zdms skip video ready\n",
((used - AudioStartThreshold) * 1000)
/ (AudioRing[AudioRingWrite].HwSampleRate *
AudioRing[AudioRingWrite].HwChannels *
AudioBytesProSample));
RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer,
used - AudioStartThreshold);
}
AudioRunning = 1;
pthread_cond_signal(&AudioStartCond);
}
}
}
AudioVideoIsReady = 1;
#endif
}
/**
@@ -2501,7 +2407,7 @@ void AudioFlushBuffers(void)
AudioRing[AudioRingWrite].HwChannels = AudioRing[old].HwChannels;
AudioRing[AudioRingWrite].InSampleRate = AudioRing[old].InSampleRate;
AudioRing[AudioRingWrite].InChannels = AudioRing[old].InChannels;
AudioRing[AudioRingWrite].PTS = INT64_C(0x8000000000000000);
AudioRing[AudioRingWrite].PTS = AV_NOPTS_VALUE;
RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer,
RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer));
Debug(3, "audio: reset video ready\n");
@@ -2515,6 +2421,7 @@ void AudioFlushBuffers(void)
if (!AudioRunning) { // wakeup thread to flush buffers
AudioRunning = 1;
pthread_cond_signal(&AudioStartCond);
Debug(3, "Start on Flush\n");
}
// FIXME: waiting on zero isn't correct, but currently works
if (!atomic_read(&AudioRingFilled)) {
@@ -2538,8 +2445,7 @@ void AudioPoller(void)
*/
int AudioFreeBytes(void)
{
return AudioRing[AudioRingWrite].RingBuffer ?
RingBufferFreeBytes(AudioRing[AudioRingWrite].RingBuffer)
return AudioRing[AudioRingWrite].RingBuffer ? RingBufferFreeBytes(AudioRing[AudioRingWrite].RingBuffer)
: INT32_MAX;
}
@@ -2549,8 +2455,7 @@ int AudioFreeBytes(void)
int AudioUsedBytes(void)
{
// FIXME: not correct, if multiple buffer are in use
return AudioRing[AudioRingWrite].RingBuffer ?
RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) : 0;
return AudioRing[AudioRingWrite].RingBuffer ? RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) : 0;
}
/**
@@ -2573,10 +2478,10 @@ int64_t AudioGetDelay(void)
}
pts = AudioUsedModule->GetDelay();
pts += ((int64_t) RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer)
* 90 * 1000) / (AudioRing[AudioRingRead].HwSampleRate *
AudioRing[AudioRingRead].HwChannels * AudioBytesProSample);
Debug(4, "audio: hw+sw delay %zd %" PRId64 "ms\n",
RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer), pts / 90);
* 90 * 1000) / (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels *
AudioBytesProSample);
Debug(4, "audio: hw+sw delay %zd %" PRId64 "ms\n", RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer),
pts / 90);
return pts;
}
@@ -2589,10 +2494,10 @@ int64_t AudioGetDelay(void)
void AudioSetClock(int64_t pts)
{
if (AudioRing[AudioRingWrite].PTS != pts) {
Debug(4, "audio: set clock %s -> %s pts\n",
Timestamp2String(AudioRing[AudioRingWrite].PTS),
Debug(4, "audio: set clock %s -> %s pts\n", Timestamp2String(AudioRing[AudioRingWrite].PTS),
Timestamp2String(pts));
}
// printf("Audiosetclock pts %#012" PRIx64 " %d\n",pts,RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer));
AudioRing[AudioRingWrite].PTS = pts;
}
@@ -2604,7 +2509,7 @@ void AudioSetClock(int64_t pts)
int64_t AudioGetClock(void)
{
// (cast) needed for the evil gcc
if (AudioRing[AudioRingRead].PTS != (int64_t) INT64_C(0x8000000000000000)) {
if (AudioRing[AudioRingRead].PTS != (int64_t) AV_NOPTS_VALUE) {
int64_t delay;
// delay zero, if no valid time stamp
@@ -2615,7 +2520,7 @@ int64_t AudioGetClock(void)
return AudioRing[AudioRingRead].PTS + 0 * 90 - delay;
}
}
return INT64_C(0x8000000000000000);
return AV_NOPTS_VALUE;
}
/**
@@ -2628,8 +2533,7 @@ void AudioSetVolume(int volume)
AudioVolume = volume;
AudioMute = !volume;
// reduce loudness for stereo output
if (AudioStereoDescent && AudioRing[AudioRingRead].InChannels == 2
&& !AudioRing[AudioRingRead].Passthrough) {
if (AudioStereoDescent && AudioRing[AudioRingRead].InChannels == 2 && !AudioRing[AudioRingRead].Passthrough) {
volume -= AudioStereoDescent;
if (volume < 0) {
volume = 0;
@@ -2658,8 +2562,7 @@ void AudioSetVolume(int volume)
*/
int AudioSetup(int *freq, int *channels, int passthrough)
{
Debug(3, "audio: setup channels %d frequency %dHz %s\n", *channels, *freq,
passthrough ? "pass-through" : "");
Debug(3, "audio: setup channels %d frequency %dHz %s\n", *channels, *freq, passthrough ? "pass-through" : "");
// invalid parameter
if (!freq || !channels || !*freq || !*channels) {
@@ -2999,12 +2902,9 @@ void AudioInit(void)
}
}
for (u = 0; u < AudioRatesMax; ++u) {
Info(_("audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n"),
AudioRatesTable[u], AudioChannelMatrix[u][1],
AudioChannelMatrix[u][2], AudioChannelMatrix[u][3],
AudioChannelMatrix[u][4], AudioChannelMatrix[u][5],
AudioChannelMatrix[u][6], AudioChannelMatrix[u][7],
AudioChannelMatrix[u][8]);
Info(_("audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n"), AudioRatesTable[u],
AudioChannelMatrix[u][1], AudioChannelMatrix[u][2], AudioChannelMatrix[u][3], AudioChannelMatrix[u][4],
AudioChannelMatrix[u][5], AudioChannelMatrix[u][6], AudioChannelMatrix[u][7], AudioChannelMatrix[u][8]);
}
#ifdef USE_AUDIO_THREAD
if (AudioUsedModule->Thread) { // supports threads
@@ -3077,8 +2977,7 @@ static void PrintVersion(void)
#ifdef GIT_REV
"(GIT-" GIT_REV ")"
#endif
",\n\t(c) 2009 - 2013 by Johns\n"
"\tLicense AGPLv3: GNU Affero General Public License version 3\n");
",\n\t(c) 2009 - 2013 by Johns\n" "\tLicense AGPLv3: GNU Affero General Public License version 3\n");
}
/**
@@ -3086,8 +2985,7 @@ static void PrintVersion(void)
*/
static void PrintUsage(void)
{
printf("Usage: audio_test [-?dhv]\n"
"\t-d\tenable debug, more -d increase the verbosity\n"
printf("Usage: audio_test [-?dhv]\n" "\t-d\tenable debug, more -d increase the verbosity\n"
"\t-? -h\tdisplay this message\n" "\t-v\tdisplay version information\n"
"Only idiots print usage on stderr!\n");
}

992
codec.c

File diff suppressed because it is too large Load Diff

10
codec.h
View File

@@ -35,7 +35,8 @@
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000
enum HWAccelID {
enum HWAccelID
{
HWACCEL_NONE = 0,
HWACCEL_AUTO,
HWACCEL_VDPAU,
@@ -48,6 +49,7 @@ enum HWAccelID {
};
extern AVBufferRef *hw_device_ctx;
///
/// Video decoder structure.
///
@@ -61,7 +63,9 @@ struct _video_decoder_
// #ifdef FFMPEG_WORKAROUND_ARTIFACTS
int FirstKeyFrame; ///< flag first frame
// #endif
AVFrame *Frame; ///< decoded video frame
// AVFrame *Frame; ///< decoded video frame
int filter; // flag for deint filter
/* hwaccel options */
enum HWAccelID hwaccel_id;
@@ -84,8 +88,6 @@ struct _video_decoder_
// From VO
struct mp_hwdec_devices *hwdec_devs;
};
//----------------------------------------------------------------------------

View File

@@ -1,3 +1,4 @@
/*
* This file is part of mpv.
*
@@ -52,7 +53,8 @@
struct GL;
typedef struct GL GL;
enum {
enum
{
MPGL_CAP_ROW_LENGTH = (1 << 4), // GL_[UN]PACK_ROW_LENGTH
MPGL_CAP_FB = (1 << 5),
MPGL_CAP_VAO = (1 << 6),
@@ -78,16 +80,15 @@ enum {
#define MPGL_VER_P(ver) MPGL_VER_GET_MAJOR(ver), MPGL_VER_GET_MINOR(ver)
void mpgl_load_functions(GL *gl, void *(*getProcAddress)(const GLubyte *),
const char *ext2, struct mp_log *log);
void mpgl_load_functions2(GL *gl, void *(*get_fn)(void *ctx, const char *n),
void *fn_ctx, const char *ext2, struct mp_log *log);
void mpgl_load_functions(GL * gl, void *(*getProcAddress)(const GLubyte *), const char *ext2, struct mp_log *log);
void mpgl_load_functions2(GL * gl, void *(*get_fn)(void *ctx, const char *n), void *fn_ctx, const char *ext2,
struct mp_log *log);
typedef void (GLAPIENTRY *MP_GLDEBUGPROC)(GLenum, GLenum, GLuint, GLenum,
GLsizei, const GLchar *,const void *);
typedef void (GLAPIENTRY * MP_GLDEBUGPROC) (GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar *, const void *);
// function pointers loaded from the OpenGL library
struct GL {
struct GL
{
int version; // MPGL_VER() mangled (e.g. 210 for 2.1)
int es; // es version (e.g. 300), 0 for desktop GL
int glsl_version; // e.g. 130 for GLSL 1.30
@@ -108,17 +109,12 @@ struct GL {
void (GLAPIENTRY * Flush) (void);
void (GLAPIENTRY * Finish) (void);
void (GLAPIENTRY * PixelStorei) (GLenum, GLint);
void (GLAPIENTRY *TexImage1D)(GLenum, GLint, GLint, GLsizei, GLint,
GLenum, GLenum, const GLvoid *);
void (GLAPIENTRY *TexImage2D)(GLenum, GLint, GLint, GLsizei, GLsizei,
GLint, GLenum, GLenum, const GLvoid *);
void (GLAPIENTRY *TexSubImage2D)(GLenum, GLint, GLint, GLint,
GLsizei, GLsizei, GLenum, GLenum,
const GLvoid *);
void (GLAPIENTRY * TexImage1D) (GLenum, GLint, GLint, GLsizei, GLint, GLenum, GLenum, const GLvoid *);
void (GLAPIENTRY * TexImage2D) (GLenum, GLint, GLint, GLsizei, GLsizei, GLint, GLenum, GLenum, const GLvoid *);
void (GLAPIENTRY * TexSubImage2D) (GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, const GLvoid *);
void (GLAPIENTRY * TexParameteri) (GLenum, GLenum, GLint);
void (GLAPIENTRY * GetIntegerv) (GLenum, GLint *);
void (GLAPIENTRY *ReadPixels)(GLint, GLint, GLsizei, GLsizei, GLenum,
GLenum, GLvoid *);
void (GLAPIENTRY * ReadPixels) (GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, GLvoid *);
void (GLAPIENTRY * ReadBuffer) (GLenum);
void (GLAPIENTRY * DrawArrays) (GLenum, GLint, GLsizei);
GLenum(GLAPIENTRY * GetError) (void);
@@ -129,15 +125,13 @@ struct GL {
void (GLAPIENTRY * DeleteBuffers) (GLsizei, const GLuint *);
void (GLAPIENTRY * BindBuffer) (GLenum, GLuint);
void (GLAPIENTRY * BindBufferBase) (GLenum, GLuint, GLuint);
GLvoid * (GLAPIENTRY *MapBufferRange)(GLenum, GLintptr, GLsizeiptr,
GLbitfield);
GLvoid *(GLAPIENTRY * MapBufferRange) (GLenum, GLintptr, GLsizeiptr, GLbitfield);
GLboolean(GLAPIENTRY * UnmapBuffer) (GLenum);
void (GLAPIENTRY * BufferData) (GLenum, intptr_t, const GLvoid *, GLenum);
void (GLAPIENTRY * ActiveTexture) (GLenum);
void (GLAPIENTRY * BindTexture) (GLenum, GLuint);
int (GLAPIENTRY * SwapInterval) (int);
void (GLAPIENTRY *TexImage3D)(GLenum, GLint, GLenum, GLsizei, GLsizei,
GLsizei, GLint, GLenum, GLenum,
void (GLAPIENTRY * TexImage3D) (GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum,
const GLvoid *);
void (GLAPIENTRY * GenVertexArrays) (GLsizei, GLuint *);
@@ -145,16 +139,14 @@ struct GL {
GLint(GLAPIENTRY * GetAttribLocation) (GLuint, const GLchar *);
void (GLAPIENTRY * EnableVertexAttribArray) (GLuint);
void (GLAPIENTRY * DisableVertexAttribArray) (GLuint);
void (GLAPIENTRY *VertexAttribPointer)(GLuint, GLint, GLenum, GLboolean,
GLsizei, const GLvoid *);
void (GLAPIENTRY * VertexAttribPointer) (GLuint, GLint, GLenum, GLboolean, GLsizei, const GLvoid *);
void (GLAPIENTRY * DeleteVertexArrays) (GLsizei, const GLuint *);
void (GLAPIENTRY * UseProgram) (GLuint);
GLint(GLAPIENTRY * GetUniformLocation) (GLuint, const GLchar *);
void (GLAPIENTRY * CompileShader) (GLuint);
GLuint(GLAPIENTRY * CreateProgram) (void);
GLuint(GLAPIENTRY * CreateShader) (GLenum);
void (GLAPIENTRY *ShaderSource)(GLuint, GLsizei, const GLchar **,
const GLint *);
void (GLAPIENTRY * ShaderSource) (GLuint, GLsizei, const GLchar **, const GLint *);
void (GLAPIENTRY * LinkProgram) (GLuint);
void (GLAPIENTRY * AttachShader) (GLuint, GLuint);
void (GLAPIENTRY * DeleteShader) (GLuint);
@@ -169,22 +161,17 @@ struct GL {
void (GLAPIENTRY * GenFramebuffers) (GLsizei, GLuint *);
void (GLAPIENTRY * DeleteFramebuffers) (GLsizei, const GLuint *);
GLenum(GLAPIENTRY * CheckFramebufferStatus) (GLenum);
void (GLAPIENTRY *FramebufferTexture2D)(GLenum, GLenum, GLenum, GLuint,
GLint);
void (GLAPIENTRY *BlitFramebuffer)(GLint, GLint, GLint, GLint, GLint, GLint,
GLint, GLint, GLbitfield, GLenum);
void (GLAPIENTRY *GetFramebufferAttachmentParameteriv)(GLenum, GLenum,
GLenum, GLint *);
void (GLAPIENTRY * FramebufferTexture2D) (GLenum, GLenum, GLenum, GLuint, GLint);
void (GLAPIENTRY * BlitFramebuffer) (GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLbitfield, GLenum);
void (GLAPIENTRY * GetFramebufferAttachmentParameteriv) (GLenum, GLenum, GLenum, GLint *);
void (GLAPIENTRY * Uniform1f) (GLint, GLfloat);
void (GLAPIENTRY * Uniform2f) (GLint, GLfloat, GLfloat);
void (GLAPIENTRY * Uniform3f) (GLint, GLfloat, GLfloat, GLfloat);
void (GLAPIENTRY * Uniform4f) (GLint, GLfloat, GLfloat, GLfloat, GLfloat);
void (GLAPIENTRY * Uniform1i) (GLint, GLint);
void (GLAPIENTRY *UniformMatrix2fv)(GLint, GLsizei, GLboolean,
const GLfloat *);
void (GLAPIENTRY *UniformMatrix3fv)(GLint, GLsizei, GLboolean,
const GLfloat *);
void (GLAPIENTRY * UniformMatrix2fv) (GLint, GLsizei, GLboolean, const GLfloat *);
void (GLAPIENTRY * UniformMatrix3fv) (GLint, GLsizei, GLboolean, const GLfloat *);
void (GLAPIENTRY * InvalidateFramebuffer) (GLenum, GLsizei, const GLenum *);
@@ -216,27 +203,21 @@ struct GL {
#if HAVE_GL_WIN32
// The HANDLE type might not be present on non-Win32
BOOL (GLAPIENTRY *DXSetResourceShareHandleNV)(void *dxObject,
HANDLE shareHandle);
BOOL(GLAPIENTRY * DXSetResourceShareHandleNV) (void *dxObject, HANDLE shareHandle);
HANDLE(GLAPIENTRY * DXOpenDeviceNV) (void *dxDevice);
BOOL(GLAPIENTRY * DXCloseDeviceNV) (HANDLE hDevice);
HANDLE (GLAPIENTRY *DXRegisterObjectNV)(HANDLE hDevice, void *dxObject,
GLuint name, GLenum type, GLenum access);
HANDLE(GLAPIENTRY * DXRegisterObjectNV) (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access);
BOOL(GLAPIENTRY * DXUnregisterObjectNV) (HANDLE hDevice, HANDLE hObject);
BOOL (GLAPIENTRY *DXLockObjectsNV)(HANDLE hDevice, GLint count,
HANDLE *hObjects);
BOOL (GLAPIENTRY *DXUnlockObjectsNV)(HANDLE hDevice, GLint count,
HANDLE *hObjects);
BOOL(GLAPIENTRY * DXLockObjectsNV) (HANDLE hDevice, GLint count, HANDLE * hObjects);
BOOL(GLAPIENTRY * DXUnlockObjectsNV) (HANDLE hDevice, GLint count, HANDLE * hObjects);
#endif
GLint(GLAPIENTRY * GetVideoSync) (GLuint *);
GLint(GLAPIENTRY * WaitVideoSync) (GLint, GLint, unsigned int *);
void (GLAPIENTRY *GetTranslatedShaderSourceANGLE)(GLuint, GLsizei,
GLsizei*, GLchar* source);
void (GLAPIENTRY * GetTranslatedShaderSourceANGLE) (GLuint, GLsizei, GLsizei *, GLchar * source);
void (GLAPIENTRY *DebugMessageCallback)(MP_GLDEBUGPROC callback,
const void *userParam);
void (GLAPIENTRY * DebugMessageCallback) (MP_GLDEBUGPROC callback, const void *userParam);
void *(GLAPIENTRY * MPGetNativeDisplay) (const char *name);
};

View File

@@ -1,3 +1,4 @@
/*
* This file is part of libplacebo.
*
@@ -39,5 +40,4 @@
#define PL_HAVE_SHADERC 0
#define PL_HAVE_VULKAN 1
#endif // LIBPLACEBO_CONTEXT_H_

54
drirc Normal file
View File

@@ -0,0 +1,54 @@
<driconf>
<device screen="0" driver="i965">
<application name="Default">
<option name="always_flush_cache" value="false" />
<option name="mesa_no_error" value="false" />
<option name="precise_trig" value="false" />
<option name="mesa_glthread" value="false" />
<option name="disable_glsl_line_continuations" value="false" />
<option name="disable_blend_func_extended" value="false" />
<option name="shader_precompile" value="true" />
<option name="clamp_max_samples" value="-1" />
<option name="allow_glsl_extension_directive_midshader" value="false" />
<option name="allow_rgb10_configs" value="true" />
<option name="allow_glsl_cross_stage_interpolation_mismatch" value="false" />
<option name="force_glsl_abs_sqrt" value="false" />
<option name="dual_color_blend_by_location" value="false" />
<option name="bo_reuse" value="1" />
<option name="always_flush_batch" value="false" />
<option name="allow_rgb565_configs" value="true" />
<option name="allow_glsl_builtin_variable_redeclaration" value="true" />
<option name="force_glsl_extensions_warn" value="false" />
<option name="disable_throttling" value="false" />
<option name="force_glsl_version" value="330" />
<option name="glsl_zero_init" value="false" />
<option name="allow_higher_compat_version" value="true" />
</application>
</device>
<device screen="0" driver="radeonsi">
<application name="Default">
<option name="always_flush_cache" value="false" />
<option name="mesa_no_error" value="false" />
<option name="precise_trig" value="false" />
<option name="mesa_glthread" value="false" />
<option name="disable_glsl_line_continuations" value="false" />
<option name="disable_blend_func_extended" value="false" />
<option name="shader_precompile" value="true" />
<option name="clamp_max_samples" value="-1" />
<option name="allow_glsl_extension_directive_midshader" value="false" />
<option name="allow_rgb10_configs" value="true" />
<option name="allow_glsl_cross_stage_interpolation_mismatch" value="false" />
<option name="force_glsl_abs_sqrt" value="false" />
<option name="dual_color_blend_by_location" value="false" />
<option name="bo_reuse" value="1" />
<option name="always_flush_batch" value="false" />
<option name="allow_rgb565_configs" value="true" />
<option name="allow_glsl_builtin_variable_redeclaration" value="true" />
<option name="force_glsl_extensions_warn" value="false" />
<option name="disable_throttling" value="false" />
<option name="force_glsl_version" value="330" />
<option name="glsl_zero_init" value="false" />
<option name="allow_higher_compat_version" value="true" />
</application>
</device>
</driconf>

628
drm.c Normal file
View File

@@ -0,0 +1,628 @@
#include <unistd.h>
#include <gbm.h>
#include <sys/mman.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
#include <drm_fourcc.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
#define DRM_DEBUG
//----------------------------------------------------------------------------
// DRM
//----------------------------------------------------------------------------
struct _Drm_Render_
{
int fd_drm;
drmModeModeInfo mode;
drmModeCrtc *saved_crtc;
// drmEventContext ev;
int bpp;
uint32_t connector_id, crtc_id, video_plane;
uint32_t hdr_metadata;
uint32_t mmWidth,mmHeight; // Size in mm
uint32_t hdr_blob_id;
};
typedef struct _Drm_Render_ VideoRender;
struct {
struct gbm_device *dev;
struct gbm_surface *surface;
} gbm;
VideoRender *render;
//----------------------------------------------------------------------------
// Helper functions
//----------------------------------------------------------------------------
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#endif
struct type_name {
unsigned int type;
const char *name;
};
static const char *util_lookup_type_name(unsigned int type,
const struct type_name *table,
unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++)
if (table[i].type == type)
return table[i].name;
return NULL;
}
static const struct type_name connector_type_names[] = {
{ DRM_MODE_CONNECTOR_Unknown, "unknown" },
{ DRM_MODE_CONNECTOR_VGA, "VGA" },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
{ DRM_MODE_CONNECTOR_Composite, "composite" },
{ DRM_MODE_CONNECTOR_SVIDEO, "s-video" },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
{ DRM_MODE_CONNECTOR_Component, "component" },
{ DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN" },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
};
const char *util_lookup_connector_type_name(unsigned int type)
{
return util_lookup_type_name(type, connector_type_names,
ARRAY_SIZE(connector_type_names));
}
static uint64_t GetPropertyValue(int fd_drm, uint32_t objectID,
uint32_t objectType, const char *propName)
{
uint32_t i;
int found = 0;
uint64_t value = 0;
drmModePropertyPtr Prop;
drmModeObjectPropertiesPtr objectProps =
drmModeObjectGetProperties(fd_drm, objectID, objectType);
for (i = 0; i < objectProps->count_props; i++) {
if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL)
fprintf(stderr, "GetPropertyValue: Unable to query property.\n");
if (strcmp(propName, Prop->name) == 0) {
value = objectProps->prop_values[i];
found = 1;
}
drmModeFreeProperty(Prop);
if (found)
break;
}
drmModeFreeObjectProperties(objectProps);
#ifdef DRM_DEBUG
if (!found)
fprintf(stderr, "GetPropertyValue: Unable to find value for property \'%s\'.\n",
propName);
#endif
return value;
}
static uint32_t GetPropertyID(int fd_drm, uint32_t objectID,
uint32_t objectType, const char *propName)
{
uint32_t i;
int found = 0;
uint32_t value = -1;
drmModePropertyPtr Prop;
drmModeObjectPropertiesPtr objectProps =
drmModeObjectGetProperties(fd_drm, objectID, objectType);
for (i = 0; i < objectProps->count_props; i++) {
if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL)
fprintf(stderr, "GetPropertyValue: Unable to query property.\n");
if (strcmp(propName, Prop->name) == 0) {
value = objectProps->props[i];
found = 1;
}
drmModeFreeProperty(Prop);
if (found)
break;
}
drmModeFreeObjectProperties(objectProps);
#ifdef DRM_DEBUG
if (!found)
Debug(3,"GetPropertyValue: Unable to find ID for property \'%s\'.\n",propName);
#endif
return value;
}
static int SetPropertyRequest(drmModeAtomicReqPtr ModeReq, int fd_drm,
uint32_t objectID, uint32_t objectType,
const char *propName, uint64_t value)
{
uint32_t i;
uint64_t id = 0;
drmModePropertyPtr Prop;
drmModeObjectPropertiesPtr objectProps =
drmModeObjectGetProperties(fd_drm, objectID, objectType);
for (i = 0; i < objectProps->count_props; i++) {
if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL)
printf( "SetPropertyRequest: Unable to query property.\n");
if (strcmp(propName, Prop->name) == 0) {
id = Prop->prop_id;
drmModeFreeProperty(Prop);
break;
}
drmModeFreeProperty(Prop);
}
drmModeFreeObjectProperties(objectProps);
if (id == 0)
printf( "SetPropertyRequest: Unable to find value for property \'%s\'.\n",
propName);
return drmModeAtomicAddProperty(ModeReq, objectID, id, value);
}
static void CuvidSetVideoMode(void);
void set_video_mode(int width, int height)
{
drmModeConnector *connector;
drmModeModeInfo *mode;
int ii;
if (height != 1080 && height != 2160)
return;
connector = drmModeGetConnector(render->fd_drm, render->connector_id);
for (ii = 0; ii < connector->count_modes; ii++) {
mode = &connector->modes[ii];
printf("Mode %d %dx%d Rate %d\n",ii,mode->hdisplay,mode->vdisplay,mode->vrefresh);
if (width == mode->hdisplay &&
height == mode->vdisplay &&
mode->vrefresh == DRMRefresh &&
render->mode.hdisplay != width &&
render->mode.vdisplay != height &&
!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
VideoWindowWidth = mode->hdisplay;
VideoWindowHeight = mode->vdisplay;
eglDestroySurface (eglDisplay, eglSurface);
EglCheck();
gbm_surface_destroy (gbm.surface);
InitBo(render->bpp);
CuvidSetVideoMode();
Debug(3,"Set new mode %d:%d\n",mode->hdisplay,mode->vdisplay);
break;
}
}
}
static int FindDevice(VideoRender * render)
{
drmVersion *version;
drmModeRes *resources;
drmModeConnector *connector;
drmModeEncoder *encoder = 0;
drmModeModeInfo *mode;
drmModePlane *plane;
drmModePlaneRes *plane_res;
drmModeObjectPropertiesPtr props;
uint32_t j, k;
uint64_t has_dumb;
uint64_t has_prime;
int i,ii=0;
char connectorstr[10];
int found = 0;
#ifdef RASPI
render->fd_drm = open("/dev/dri/card1", O_RDWR);
#else
render->fd_drm = open("/dev/dri/card0", O_RDWR);
#endif
if (render->fd_drm < 0) {
fprintf(stderr, "FindDevice: cannot open /dev/dri/card0: %m\n");
return -errno;
}
int ret = drmSetMaster(render->fd_drm);
if (ret < 0)
{
drm_magic_t magic;
ret = drmGetMagic(render->fd_drm, &magic);
if (ret < 0)
{
Debug(3, "drm:%s - failed to get drm magic: %s\n", __FUNCTION__, strerror(errno));
return -1;
}
ret = drmAuthMagic(render->fd_drm, magic);
if (ret < 0)
{
Debug(3, "drm:%s - failed to authorize drm magic: %s\n", __FUNCTION__, strerror(errno));
return -1;
}
}
version = drmGetVersion(render->fd_drm);
fprintf(stderr, "FindDevice: open /dev/dri/card0: %s\n", version->name);
// check capability
if (drmGetCap(render->fd_drm, DRM_CAP_DUMB_BUFFER, &has_dumb) < 0 || has_dumb == 0)
fprintf(stderr, "FindDevice: drmGetCap DRM_CAP_DUMB_BUFFER failed or doesn't have dumb buffer\n");
if (drmSetClientCap(render->fd_drm, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1) != 0)
fprintf(stderr, "FindDevice: DRM_CLIENT_CAP_UNIVERSAL_PLANES not available.\n");
if (drmSetClientCap(render->fd_drm, DRM_CLIENT_CAP_ATOMIC, 1) != 0)
fprintf(stderr, "FindDevice: DRM_CLIENT_CAP_ATOMIC not available.\n");
if (drmGetCap(render->fd_drm, DRM_CAP_PRIME, &has_prime) < 0)
fprintf(stderr, "FindDevice: DRM_CAP_PRIME not available.\n");
if (drmGetCap(render->fd_drm, DRM_PRIME_CAP_EXPORT, &has_prime) < 0)
fprintf(stderr, "FindDevice: DRM_PRIME_CAP_EXPORT not available.\n");
if (drmGetCap(render->fd_drm, DRM_PRIME_CAP_IMPORT, &has_prime) < 0)
fprintf(stderr, "FindDevice: DRM_PRIME_CAP_IMPORT not available.\n");
if ((resources = drmModeGetResources(render->fd_drm)) == NULL){
fprintf(stderr, "FindDevice: cannot retrieve DRM resources (%d): %m\n", errno);
return -errno;
}
#ifdef DEBUG
Debug(3,"[FindDevice] DRM have %i connectors, %i crtcs, %i encoders\n",
resources->count_connectors, resources->count_crtcs,
resources->count_encoders);
#endif
// find all available connectors
for (i = 0; i < resources->count_connectors; i++) {
connector = drmModeGetConnector(render->fd_drm, resources->connectors[i]);
if (!connector) {
fprintf(stderr, "FindDevice: cannot retrieve DRM connector (%d): %m\n", errno);
return -errno;
}
sprintf(connectorstr,"%s-%u",util_lookup_connector_type_name(connector->connector_type),connector->connector_type_id);
printf("Connector >%s< is %sconnected\n",connectorstr,connector->connection == DRM_MODE_CONNECTED?"":"not ");
if (DRMConnector && strcmp(DRMConnector,connectorstr))
continue;
if (connector->connection == DRM_MODE_CONNECTED && connector->count_modes > 0) {
float aspect = (float)connector->mmWidth / (float)connector->mmHeight;
if ((aspect > 1.70) && (aspect < 1.85)) {
render->mmHeight = 90;
render->mmWidth = 160;
} else {
render->mmHeight = connector->mmHeight;
render->mmWidth = connector->mmWidth;
}
render->connector_id = connector->connector_id;
// FIXME: use default encoder/crtc pair
if ((encoder = drmModeGetEncoder(render->fd_drm, connector->encoder_id)) == NULL){
fprintf(stderr, "FindDevice: cannot retrieve encoder (%d): %m\n", errno);
return -errno;
}
render->crtc_id = encoder->crtc_id;
render->hdr_metadata = GetPropertyID(render->fd_drm, connector->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "HDR_OUTPUT_METADATA");
printf("ID %d of METADATA in Connector %d connected %d\n",render->hdr_metadata,connector->connector_id,connector->connection);
memcpy(&render->mode, &connector->modes[0], sizeof(drmModeModeInfo)); // set fallback
// search Modes for Connector
for (ii = 0; ii < connector->count_modes; ii++) {
mode = &connector->modes[ii];
printf("Mode %d %dx%d Rate %d\n",ii,mode->hdisplay,mode->vdisplay,mode->vrefresh);
if (VideoWindowWidth && VideoWindowHeight) { // preset by command line
if (VideoWindowWidth == mode->hdisplay &&
VideoWindowHeight == mode->vdisplay &&
mode->vrefresh == DRMRefresh &&
!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
break;
}
}
else {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
VideoWindowWidth = mode->hdisplay;
VideoWindowHeight = mode->vdisplay;
break;
}
}
}
found = 1;
i = resources->count_connectors; // uuuuhh
}
VideoWindowWidth = render->mode.hdisplay;
VideoWindowHeight = render->mode.vdisplay;
if (found)
printf("Use Mode %d %dx%d Rate %d\n",ii,render->mode.hdisplay,render->mode.vdisplay,render->mode.vrefresh);
drmModeFreeConnector(connector);
}
if (!found) {
Debug(3,"Requested Connector not found or not connected\n");
printf("Requested Connector not found or not connected\n");
return -1;
}
// find first plane
if ((plane_res = drmModeGetPlaneResources(render->fd_drm)) == NULL)
fprintf(stderr, "FindDevice: cannot retrieve PlaneResources (%d): %m\n", errno);
for (j = 0; j < plane_res->count_planes; j++) {
plane = drmModeGetPlane(render->fd_drm, plane_res->planes[j]);
if (plane == NULL)
fprintf(stderr, "FindDevice: cannot query DRM-KMS plane %d\n", j);
for (i = 0; i < resources->count_crtcs; i++) {
if (plane->possible_crtcs & (1 << i))
break;
}
uint64_t type = GetPropertyValue(render->fd_drm, plane_res->planes[j],
DRM_MODE_OBJECT_PLANE, "type");
uint64_t zpos = 0;
#ifdef DRM_DEBUG // If more then 2 crtcs this must rewriten!!!
printf("[FindDevice] Plane id %i crtc_id %i possible_crtcs %i possible CRTC %i type %s\n",
plane->plane_id, plane->crtc_id, plane->possible_crtcs, resources->crtcs[i],
(type == DRM_PLANE_TYPE_PRIMARY) ? "primary plane" :
(type == DRM_PLANE_TYPE_OVERLAY) ? "overlay plane" :
(type == DRM_PLANE_TYPE_CURSOR) ? "cursor plane" : "No plane type");
#endif
// test pixel format and plane caps
for (k = 0; k < plane->count_formats; k++) {
if (encoder->possible_crtcs & plane->possible_crtcs) {
switch (plane->formats[k]) {
#ifdef RASPI
case DRM_FORMAT_ARGB8888:
#else
case DRM_FORMAT_XRGB2101010:
#endif
if (!render->video_plane) {
render->video_plane = plane->plane_id;
}
break;
default:
break;
}
}
}
drmModeFreePlane(plane);
}
drmModeFreePlaneResources(plane_res);
drmModeFreeEncoder(encoder);
drmModeFreeResources(resources);
#ifdef DRM_DEBUG
printf("[FindDevice] DRM setup CRTC: %i video_plane: %i \n",
render->crtc_id, render->video_plane);
#endif
// save actual modesetting
render->saved_crtc = drmModeGetCrtc(render->fd_drm, render->crtc_id);
return 0;
}
///
/// Initialize video output module.
///
void VideoInitDrm()
{
int i;
if (!(render = calloc(1, sizeof(*render)))) {
Fatal(_("video/DRM: out of memory\n"));
return;
}
if (FindDevice(render)){
Fatal(_( "VideoInit: FindDevice() failed\n"));
}
gbm.dev = gbm_create_device (render->fd_drm);
assert (gbm.dev != NULL);
PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display = NULL;
get_platform_display =
(void *) eglGetProcAddress("eglGetPlatformDisplay");
assert(get_platform_display != NULL);
eglDisplay = get_platform_display(EGL_PLATFORM_GBM_KHR, gbm.dev, NULL);
assert (eglDisplay != NULL);
// return;
drmModeAtomicReqPtr ModeReq;
const uint32_t flags = DRM_MODE_ATOMIC_ALLOW_MODESET;
uint32_t modeID = 0;
if (drmModeCreatePropertyBlob(render->fd_drm, &render->mode, sizeof(render->mode), &modeID) != 0) {
fprintf(stderr, "Failed to create mode property.\n");
return;
}
if (!(ModeReq = drmModeAtomicAlloc())) {
fprintf(stderr, "cannot allocate atomic request (%d): %m\n", errno);
return;
}
printf("set CRTC %d of Connector %d aktiv\n",render->crtc_id,render->connector_id);
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID);
SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id);
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 1);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
if (drmModeDestroyPropertyBlob(render->fd_drm, modeID) != 0)
fprintf(stderr, "cannot destroy property blob (%d): %m\n", errno);
drmModeAtomicFree(ModeReq);
}
void get_drm_aspect(int *num,int *den)
{
*num = VideoWindowWidth;
*den = VideoWindowHeight;
}
struct gbm_bo *bo = NULL, *next_bo=NULL;
struct drm_fb *fb;
static int m_need_modeset = 0;
static int old_color=-1,old_trc=-1;
void InitBo(int bpp) {
// create the GBM and EGL surface
render->bpp = bpp;
gbm.surface = gbm_surface_create (gbm.dev, VideoWindowWidth,VideoWindowHeight,
bpp==10?GBM_FORMAT_XRGB2101010:GBM_FORMAT_ARGB8888,
GBM_BO_USE_SCANOUT|GBM_BO_USE_RENDERING);
assert(gbm.surface != NULL);
eglSurface = eglCreateWindowSurface (eglDisplay, eglConfig, gbm.surface, NULL);
assert(eglSurface != NULL);
}
static struct gbm_bo *previous_bo = NULL;
static uint32_t previous_fb;
static void drm_swap_buffers () {
uint32_t fb;
eglSwapBuffers (eglDisplay, eglSurface);
struct gbm_bo *bo = gbm_surface_lock_front_buffer (gbm.surface);
#if 1
if (bo == NULL)
bo = gbm_surface_lock_front_buffer (gbm.surface);
#endif
assert (bo != NULL);
uint32_t handle = gbm_bo_get_handle (bo).u32;
uint32_t pitch = gbm_bo_get_stride (bo);
drmModeAddFB (render->fd_drm, VideoWindowWidth,VideoWindowHeight,render->bpp==10? 30:24, 32, pitch, handle, &fb);
// drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode);
if (m_need_modeset) {
drmModeAtomicReqPtr ModeReq;
const uint32_t flags = DRM_MODE_ATOMIC_ALLOW_MODESET;
uint32_t modeID = 0;
if (drmModeCreatePropertyBlob(render->fd_drm, &render->mode, sizeof(render->mode), &modeID) != 0) {
fprintf(stderr, "Failed to create mode property.\n");
return;
}
if (!(ModeReq = drmModeAtomicAlloc())) {
fprintf(stderr, "cannot allocate atomic request (%d): %m\n", errno);
return;
}
// Need to disable the CRTC in order to submit the HDR data....
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 0);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
sleep(2);
SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "Colorspace",old_color==AVCOL_PRI_BT2020?9:2 );
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID);
SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id);
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 1);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
if (drmModeDestroyPropertyBlob(render->fd_drm, modeID) != 0)
fprintf(stderr, "cannot destroy prperty blob (%d): %m\n", errno);
drmModeAtomicFree(ModeReq);
m_need_modeset = 0;
}
drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode);
if (previous_bo) {
drmModeRmFB (render->fd_drm, previous_fb);
gbm_surface_release_buffer (gbm.surface, previous_bo);
}
previous_bo = bo;
previous_fb = fb;
}
static void drm_clean_up () {
// set the previous crtc
if (!render)
return;
Debug(3,"drm clean up\n");
if (previous_bo) {
drmModeRmFB (render->fd_drm, previous_fb);
gbm_surface_release_buffer (gbm.surface, previous_bo);
}
drmModeSetCrtc (render->fd_drm, render->saved_crtc->crtc_id, render->saved_crtc->buffer_id,
render->saved_crtc->x, render->saved_crtc->y, &render->connector_id, 1, &render->saved_crtc->mode);
drmModeFreeCrtc (render->saved_crtc);
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
render->hdr_blob_id = 0;
eglDestroySurface (eglDisplay, eglSurface);
EglCheck();
gbm_surface_destroy (gbm.surface);
eglDestroyContext (eglDisplay, eglContext);
EglCheck();
eglDestroyContext (eglDisplay, eglSharedContext);
EglCheck();
eglSharedContext = NULL;
eglTerminate (eglDisplay);
EglCheck();
gbm_device_destroy (gbm.dev);
drmDropMaster(render->fd_drm);
close (render->fd_drm);
eglDisplay = NULL;
free(render);
}

View File

@@ -1,3 +1,4 @@
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
@@ -16,8 +17,6 @@
#include <string.h>
#include <stdlib.h>
#ifdef __cuda_cuda_h__ // check to see if CUDA_H is included above
// Error Code string definitions here
typedef struct
{
@@ -28,8 +27,8 @@ typedef struct
/**
* Error codes
*/
s_CudaErrorStr sCudaDrvErrorString[] =
{
s_CudaErrorStr sCudaDrvErrorString[] = {
/**
* The API call returned with no errors. In the case of query calls, this
* can also mean that the operation being queried is complete (see
@@ -65,21 +64,25 @@ s_CudaErrorStr sCudaDrvErrorString[] =
* in visual profiler mode.
*/
{"CUDA_ERROR_PROFILER_DISABLED", 5},
/**
* This indicates profiling has not been initialized for this context.
* Call cuProfilerInitialize() to resolve this.
*/
{"CUDA_ERROR_PROFILER_NOT_INITIALIZED", 6},
/**
* This indicates profiler has already been started and probably
* cuProfilerStart() is incorrectly called.
*/
{"CUDA_ERROR_PROFILER_ALREADY_STARTED", 7},
/**
* This indicates profiler has already been stopped and probably
* cuProfilerStop() is incorrectly called.
*/
{"CUDA_ERROR_PROFILER_ALREADY_STOPPED", 8},
/**
* This indicates that no CUDA-capable devices were detected by the installed
* CUDA driver.
@@ -92,7 +95,6 @@ s_CudaErrorStr sCudaDrvErrorString[] =
*/
{"CUDA_ERROR_INVALID_DEVICE (device specified is not a valid CUDA device)", 101},
/**
* This indicates that the device kernel image is invalid. This can also
* indicate an invalid CUDA module.
@@ -240,21 +242,18 @@ s_CudaErrorStr sCudaDrvErrorString[] =
*/
{"CUDA_ERROR_OPERATING_SYSTEM", 304},
/**
* This indicates that a resource handle passed to the API call was not
* valid. Resource handles are opaque types like ::CUstream and ::CUevent.
*/
{"CUDA_ERROR_INVALID_HANDLE", 400},
/**
* This indicates that a named symbol was not found. Examples of symbols
* are global/constant variable names, texture names }, and surface names.
*/
{"CUDA_ERROR_NOT_FOUND", 500},
/**
* This indicates that asynchronous operations issued previously have not
* completed yet. This result is not actually an error, but must be indicated
@@ -263,7 +262,6 @@ s_CudaErrorStr sCudaDrvErrorString[] =
*/
{"CUDA_ERROR_NOT_READY", 600},
/**
* While executing a kernel, the device encountered a
* load or store instruction on an invalid memory address.
@@ -419,7 +417,6 @@ s_CudaErrorStr sCudaDrvErrorString[] =
*/
{"CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE", 720},
/**
* This error indicates that the attempted operation is not permitted.
*/
@@ -431,7 +428,6 @@ s_CudaErrorStr sCudaDrvErrorString[] =
*/
{"CUDA_ERROR_NOT_SUPPORTED", 801},
/**
* This indicates that an unknown internal error has occurred.
*/
@@ -441,13 +437,11 @@ s_CudaErrorStr sCudaDrvErrorString[] =
// This is just a linear search through the array, since the error_id's are not
// always ocurring consecutively
inline const char *getCudaDrvErrorString(CUresult error_id)
static inline const char *getCudaDrvErrorString(CUresult error_id)
{
int index = 0;
while (sCudaDrvErrorString[index].error_id != error_id &&
(int)sCudaDrvErrorString[index].error_id != -1)
{
while (sCudaDrvErrorString[index].error_id != error_id && (int)sCudaDrvErrorString[index].error_id != -1) {
index++;
}
@@ -457,7 +451,4 @@ inline const char *getCudaDrvErrorString(CUresult error_id)
return (const char *)"CUDA_ERROR not found!";
}
#endif // __cuda_cuda_h__
#endif

492
hdr.c Normal file
View File

@@ -0,0 +1,492 @@
#include <libavutil/mastering_display_metadata.h>
/**
* struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
*
* HDR Metadata Infoframe as per CTA 861.G spec. This is expected
* to match exactly with the spec.
*
* Userspace is expected to pass the metadata information as per
* the format described in this structure.
*/
struct hdr_metadata_infoframe {
/**
* @eotf: Electro-Optical Transfer Function (EOTF)
* used in the stream.
*/
__u8 eotf;
/**
* @metadata_type: Static_Metadata_Descriptor_ID.
*/
__u8 metadata_type;
/**
* @display_primaries: Color Primaries of the Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @display_primaries.x: X cordinate of color primary.
* @display_primaries.y: Y cordinate of color primary.
*/
struct {
__u16 x, y;
} display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @white_point.x: X cordinate of whitepoint of color primary.
* @white_point.y: Y cordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
} white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_display_mastering_luminance;
/**
* @min_display_mastering_luminance: Min Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of
* 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF
* represents 6.5535 cd/m2.
*/
__u16 min_display_mastering_luminance;
/**
* @max_cll: Max Content Light Level.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_cll;
/**
* @max_fall: Max Frame Average Light Level.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_fall;
};
/**
* struct hdr_output_metadata - HDR output metadata
*
* Metadata Information to be passed from userspace
*/
struct hdr_output_metadata {
/**
* @metadata_type: Static_Metadata_Descriptor_ID.
*/
__u32 metadata_type;
/**
* @hdmi_metadata_type1: HDR Metadata Infoframe.
*/
union {
struct hdr_metadata_infoframe hdmi_metadata_type1;
};
};
enum hdr_metadata_eotf {
EOTF_TRADITIONAL_GAMMA_SDR,
EOTF_TRADITIONAL_GAMMA_HDR,
EOTF_ST2084,
EOTF_HLG,
};
enum metadata_id {
METADATA_TYPE1,
};
void
weston_hdr_metadata(void *data,
uint16_t display_primary_r_x,
uint16_t display_primary_r_y,
uint16_t display_primary_g_x,
uint16_t display_primary_g_y,
uint16_t display_primary_b_x,
uint16_t display_primary_b_y,
uint16_t white_point_x,
uint16_t white_point_y,
uint16_t min_luminance,
uint16_t max_luminance,
uint16_t max_cll,
uint16_t max_fall,
enum hdr_metadata_eotf eotf)
{
uint8_t *data8;
uint16_t *data16;
data8 = data;
*data8++ = eotf;
*data8++ = METADATA_TYPE1;
data16 = (void*)data8;
*data16++ = display_primary_r_x;
*data16++ = display_primary_r_y;
*data16++ = display_primary_g_x;
*data16++ = display_primary_g_y;
*data16++ = display_primary_b_x;
*data16++ = display_primary_b_y;
*data16++ = white_point_x;
*data16++ = white_point_y;
*data16++ = max_luminance;
*data16++ = min_luminance;
*data16++ = max_cll;
*data16++ = max_fall;
}
struct weston_vector {
float f[4];
};
struct weston_colorspace {
struct weston_vector r, g, b;
struct weston_vector whitepoint;
const char *name;
const char *whitepoint_name;
};
struct weston_colorspace hdr10;
static const struct weston_colorspace bt470m = {
.r = {{ 0.670f, 0.330f, }},
.g = {{ 0.210f, 0.710f, }},
.b = {{ 0.140f, 0.080f, }},
.whitepoint = {{ 0.3101f, 0.3162f, }},
.name = "BT.470 M",
.whitepoint_name = "C",
};
static const struct weston_colorspace bt470bg = {
.r = {{ 0.640f, 0.330f, }},
.g = {{ 0.290f, 0.600f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "BT.470 B/G",
.whitepoint_name = "D65",
};
static const struct weston_colorspace smpte170m = {
.r = {{ 0.630f, 0.340f, }},
.g = {{ 0.310f, 0.595f, }},
.b = {{ 0.155f, 0.070f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "SMPTE 170M",
.whitepoint_name = "D65",
};
static const struct weston_colorspace smpte240m = {
.r = {{ 0.630f, 0.340f, }},
.g = {{ 0.310f, 0.595f, }},
.b = {{ 0.155f, 0.070f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "SMPTE 240M",
.whitepoint_name = "D65",
};
static const struct weston_colorspace bt709 = {
.r = {{ 0.640f, 0.330f, }},
.g = {{ 0.300f, 0.600f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "BT.709",
.whitepoint_name = "D65",
};
static const struct weston_colorspace bt2020 = {
.r = {{ 0.708f, 0.292f, }},
.g = {{ 0.170f, 0.797f, }},
.b = {{ 0.131f, 0.046f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "BT.2020",
.whitepoint_name = "D65",
};
static const struct weston_colorspace srgb = {
.r = {{ 0.640f, 0.330f, }},
.g = {{ 0.300f, 0.600f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "sRGB",
.whitepoint_name = "D65",
};
static const struct weston_colorspace adobergb = {
.r = {{ 0.640f, 0.330f, }},
.g = {{ 0.210f, 0.710f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "AdobeRGB",
.whitepoint_name = "D65",
};
static const struct weston_colorspace dci_p3 = {
.r = {{ 0.680f, 0.320f, }},
.g = {{ 0.265f, 0.690f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "DCI-P3 D65",
.whitepoint_name = "D65",
};
static const struct weston_colorspace prophotorgb = {
.r = {{ 0.7347f, 0.2653f, }},
.g = {{ 0.1596f, 0.8404f, }},
.b = {{ 0.0366f, 0.0001f, }},
.whitepoint = {{ .3457, .3585 }},
.name = "ProPhoto RGB",
.whitepoint_name = "D50",
};
static const struct weston_colorspace ciergb = {
.r = {{ 0.7347f, 0.2653f, }},
.g = {{ 0.2738f, 0.7174f, }},
.b = {{ 0.1666f, 0.0089f, }},
.whitepoint = {{ 1.0f / 3.0f, 1.0f / 3.0f, }},
.name = "CIE RGB",
.whitepoint_name = "E",
};
static const struct weston_colorspace ciexyz = {
.r = {{ 1.0f, 0.0f, }},
.g = {{ 0.0f, 1.0f, }},
.b = {{ 0.0f, 0.0f, }},
.whitepoint = {{ 1.0f / 3.0f, 1.0f / 3.0f, }},
.name = "CIE XYZ",
.whitepoint_name = "E",
};
const struct weston_colorspace ap0 = {
.r = {{ 0.7347f, 0.2653f, }},
.g = {{ 0.0000f, 1.0000f, }},
.b = {{ 0.0001f, -0.0770f, }},
.whitepoint = {{ .32168f, .33767f, }},
.name = "ACES primaries #0",
.whitepoint_name = "D60",
};
const struct weston_colorspace ap1 = {
.r = {{ 0.713f, 0.393f, }},
.g = {{ 0.165f, 0.830f, }},
.b = {{ 0.128f, 0.044f, }},
.whitepoint = {{ 0.32168f, 0.33767f, }},
.name = "ACES primaries #1",
.whitepoint_name = "D60",
};
static const struct weston_colorspace * const colorspaces[] = {
&bt470m,
&bt470bg,
&smpte170m,
&smpte240m,
&bt709,
&bt2020,
&srgb,
&adobergb,
&dci_p3,
&prophotorgb,
&ciergb,
&ciexyz,
&ap0,
&ap1,
};
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof(a)[0])
const struct weston_colorspace *
weston_colorspace_lookup(const char *name)
{
unsigned i;
if (!name)
return NULL;
for (i = 0; i < ARRAY_LENGTH(colorspaces); i++) {
const struct weston_colorspace *c = colorspaces[i];
if (!strcmp(c->name, name))
return c;
}
return NULL;
}
static int cleanup=0;
static uint16_t encode_xyy(float xyy)
{
return xyy * 50000;
}
static AVMasteringDisplayMetadata md_save = {0};
static AVContentLightMetadata ld_save = {0};
static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSideData *sd2)
{
drmModeAtomicReqPtr ModeReq;
struct weston_colorspace *cs;
enum hdr_metadata_eotf eotf;
struct hdr_output_metadata data;
int ret,MaxCLL=1500,MaxFALL=400;
int max_lum=4000,min_lum=0050;
struct AVMasteringDisplayMetadata *md = NULL;
struct AVContentLightMetadata *ld = NULL;
if (render->hdr_metadata == -1) { // Metadata not supported
return;
}
// clean up FFMEPG stuff
if (trc == AVCOL_TRC_BT2020_10)
trc = AVCOL_TRC_ARIB_STD_B67;
if (trc == AVCOL_TRC_UNSPECIFIED)
trc = AVCOL_TRC_BT709;
if (color == AVCOL_PRI_UNSPECIFIED)
color = AVCOL_PRI_BT709;
if ((old_color == color && old_trc == trc && !sd1 && !sd2) || !render->hdr_metadata)
return; // nothing to do
if (sd1)
md = sd1->data;
if (sd2)
ld = sd2->data;
if (md && !memcmp(md,&md_save,sizeof(md_save)))
if (ld && !memcmp(ld,&ld_save,sizeof(ld_save))) {
return;
}
else if (ld && !memcmp(ld,&ld_save,sizeof(ld_save))) {
return;
}
if (ld)
memcpy(&ld_save,ld,sizeof(ld_save));
if (md)
memcpy(&md_save,md,sizeof(md_save));
Debug(3,"Update HDR to TRC %d color %d\n",trc,color);
if (trc == AVCOL_TRC_BT2020_10)
trc = AVCOL_TRC_ARIB_STD_B67;
old_color = color;
old_trc = trc;
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
switch(trc) {
case AVCOL_TRC_BT709: // 1
case AVCOL_TRC_UNSPECIFIED: // 2
eotf = EOTF_TRADITIONAL_GAMMA_SDR;
break;
case AVCOL_TRC_BT2020_10: // 14
case AVCOL_TRC_BT2020_12:
case AVCOL_TRC_ARIB_STD_B67: // 18 HLG
eotf = EOTF_HLG;
break;
case AVCOL_TRC_SMPTE2084: // 16
eotf = EOTF_ST2084;
break;
default:
eotf = EOTF_TRADITIONAL_GAMMA_SDR;
break;
}
switch (color) {
case AVCOL_PRI_BT709: // 1
case AVCOL_PRI_UNSPECIFIED: // 2
cs = weston_colorspace_lookup("BT.709");
break;
case AVCOL_PRI_BT2020: // 9
cs = weston_colorspace_lookup("BT.2020");
break;
case AVCOL_PRI_BT470BG: // 5
cs = weston_colorspace_lookup("BT.470 B/G"); // BT.601
break;
default:
cs = weston_colorspace_lookup("BT.709");
break;
}
if (md) { // we got Metadata
if (md->has_primaries) {
Debug(3,"Mastering Display Metadata,\n has_primaries:%d has_luminance:%d \n"
"r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f) \n"
"min_luminance=%f, max_luminance=%f\n",
md->has_primaries, md->has_luminance,
av_q2d(md->display_primaries[0][0]),
av_q2d(md->display_primaries[0][1]),
av_q2d(md->display_primaries[1][0]),
av_q2d(md->display_primaries[1][1]),
av_q2d(md->display_primaries[2][0]),
av_q2d(md->display_primaries[2][1]),
av_q2d(md->white_point[0]), av_q2d(md->white_point[1]),
av_q2d(md->min_luminance), av_q2d(md->max_luminance));
cs = &hdr10;
cs->r.f[0] = (float)md->display_primaries[0][0].num / (float)md->display_primaries[0][0].den;
cs->r.f[1] = (float)md->display_primaries[0][1].num / (float)md->display_primaries[0][1].den;
cs->g.f[0] = (float)md->display_primaries[1][0].num / (float)md->display_primaries[1][0].den;
cs->g.f[1] = (float)md->display_primaries[1][1].num / (float)md->display_primaries[1][1].den;
cs->b.f[0] = (float)md->display_primaries[2][0].num / (float)md->display_primaries[2][0].den;
cs->b.f[1] = (float)md->display_primaries[2][1].num / (float)md->display_primaries[2][1].den;
cs->whitepoint.f[0] = (float)md->white_point[0].num / (float)md->white_point[0].den;
cs->whitepoint.f[1] = (float)md->white_point[1].num / (float)md->white_point[1].den;
}
if (md->has_luminance) {
max_lum = av_q2d(md->max_luminance);
min_lum = av_q2d(md->min_luminance) * 10000 ;
printf("max_lum %d min_lum %d\n",max_lum,min_lum);
}
}
if (ld) {
Debug(3,"Has MaxCLL %d MaxFALL %d\n",ld->MaxCLL,ld->MaxFALL);
MaxCLL = ld->MaxCLL;
MaxFALL = ld->MaxFALL;
}
data.metadata_type = 7; // ????????????????????????
weston_hdr_metadata(&data.hdmi_metadata_type1,
encode_xyy(cs->r.f[0]),
encode_xyy(cs->r.f[1]),
encode_xyy(cs->g.f[0]),
encode_xyy(cs->g.f[1]),
encode_xyy(cs->b.f[0]),
encode_xyy(cs->b.f[1]),
encode_xyy(cs->whitepoint.f[0]),
encode_xyy(cs->whitepoint.f[1]),
max_lum, // max_display_mastering_luminance
min_lum, // min_display_mastering_luminance
MaxCLL, // Maximum Content Light Level (MaxCLL)
MaxFALL, // Maximum Frame-Average Light Level (MaxFALL)
eotf);
ret = drmModeCreatePropertyBlob(render->fd_drm, &data, sizeof(data), &render->hdr_blob_id);
if (ret) {
printf("DRM: HDR metadata: failed blob create \n");
render->hdr_blob_id = 0;
return;
}
ret = drmModeConnectorSetProperty(render->fd_drm, render->connector_id,
render->hdr_metadata, render->hdr_blob_id);
if (ret) {
printf("DRM: HDR metadata: failed property set %d\n",ret);
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
render->hdr_blob_id = 0;
return;
}
m_need_modeset = 1;
Debug(3,"DRM: HDR metadata: prop set\n");
}

14
misc.h
View File

@@ -81,7 +81,7 @@ static inline void Syslog(const int level, const char *format, ...)
/**
** Show error.
*/
#define Error(fmt...) Syslog(0, fmt)
#define Error(fmt...) Syslog(LOG_ERR, fmt)
/**
** Show fatal error.
@@ -91,12 +91,12 @@ static inline void Syslog(const int level, const char *format, ...)
/**
** Show warning.
*/
#define Warning(fmt...) Syslog(1, fmt)
#define Warning(fmt...) Syslog(LOG_WARNING, fmt)
/**
** Show info.
*/
#define Info(fmt...) Syslog(2, fmt)
#define Info(fmt...) Syslog(LOG_INFO, fmt)
/**
** Show debug.
@@ -125,9 +125,8 @@ static inline const char *Timestamp2String(int64_t ts)
return "--:--:--.---";
}
idx = (idx + 1) % 3;
snprintf(buf[idx], sizeof(buf[idx]), "%2d:%02d:%02d.%03d",
(int)(ts / (90 * 3600000)), (int)((ts / (90 * 60000)) % 60),
(int)((ts / (90 * 1000)) % 60), (int)((ts / 90) % 1000));
snprintf(buf[idx], sizeof(buf[idx]), "%2d:%02d:%02d.%03d", (int)(ts / (90 * 3600000)),
(int)((ts / (90 * 60000)) % 60), (int)((ts / (90 * 1000)) % 60), (int)((ts / 90) % 1000));
return buf[idx];
}
@@ -153,6 +152,7 @@ static inline uint32_t GetMsTicks(void)
return (tval.tv_sec * 1000) + (tval.tv_usec / 1000);
#endif
}
static inline uint64_t GetusTicks(void)
{
@@ -160,7 +160,7 @@ static inline uint64_t GetusTicks(void)
struct timespec tspec;
clock_gettime(CLOCK_MONOTONIC, &tspec);
return (tspec.tv_sec * 1000000) + (tspec.tv_nsec) ;
return (uint64_t) (tspec.tv_sec * 1000000) + (tspec.tv_nsec);
#else
struct timeval tval;

File diff suppressed because it is too large Load Diff

View File

@@ -20,22 +20,18 @@
#define FT_ERRORDEF( e, v, s ) { e, s },
#define FT_ERROR_START_LIST {
#define FT_ERROR_END_LIST { 0, 0 } };
const struct {
const struct
{
int code;
const char *message;
} FT_Errors[] =
#include FT_ERRORS_H
#include <memory>
#include <queue>
#include <vdr/plugin.h>
#include <vdr/osd.h>
#include <vdr/thread.h>
#include "softhddev.h"
extern "C"
{
#include <stdint.h>
@@ -49,7 +45,8 @@ extern "C"
extern "C" pthread_mutex_t OSDMutex;
struct sOglImage {
struct sOglImage
{
GLuint texture;
GLint width;
GLint height;
@@ -65,22 +62,28 @@ void ConvertColor(const GLint &colARGB, glm::vec4 &col);
/****************************************************************************************
* cShader
****************************************************************************************/
enum eShaderType {
enum eShaderType
{
stRect,
stTexture,
stText,
stCount
};
class cShader {
class cShader
{
private:
eShaderType type;
GLuint id;
bool Compile(const char *vertexCode, const char *fragmentCode);
bool CheckCompileErrors(GLuint object, bool program = false);
public:
cShader(void) {};
virtual ~cShader(void) {};
cShader(void)
{
};
virtual ~ cShader(void)
{
};
bool Load(eShaderType type);
void Use(void);
void SetFloat(const GLchar * name, GLfloat value);
@@ -94,44 +97,67 @@ public:
/****************************************************************************************
* cOglGlyph
****************************************************************************************/
class cOglGlyph : public cListObject {
class cOglGlyph:public cListObject
{
private:
struct tKerning {
struct tKerning
{
public:
tKerning(uint prevSym, GLfloat kerning = 0.0f) {
tKerning(FT_ULong prevSym, GLfloat kerning = 0.0f) {
this->prevSym = prevSym;
this->kerning = kerning;
}
uint prevSym;
FT_ULong prevSym;
GLfloat kerning;
};
uint charCode;
FT_ULong charCode;
int bearingLeft;
int bearingTop;
int width;
int height;
int advanceX;
cVector < tKerning > kerningCache;
GLuint texture;
void LoadTexture(FT_BitmapGlyph ftGlyph);
public:
cOglGlyph(uint charCode, FT_BitmapGlyph ftGlyph);
cOglGlyph(FT_ULong charCode, FT_BitmapGlyph ftGlyph);
virtual ~ cOglGlyph();
uint CharCode(void) { return charCode; }
int AdvanceX(void) { return advanceX; }
int BearingLeft(void) const { return bearingLeft; }
int BearingTop(void) const { return bearingTop; }
int Width(void) const { return width; }
int Height(void) const { return height; }
int GetKerningCache(uint prevSym);
void SetKerningCache(uint prevSym, int kerning);
FT_ULong CharCode(void)
{
return charCode;
}
int AdvanceX(void)
{
return advanceX;
}
int BearingLeft(void) const
{
return bearingLeft;
}
int BearingTop(void) const
{
return bearingTop;
}
int Width(void) const
{
return width;
}
int Height(void) const
{
return height;
}
int GetKerningCache(FT_ULong prevSym);
void SetKerningCache(FT_ULong prevSym, int kerning);
void BindTexture(void);
};
/****************************************************************************************
* cOglFont
****************************************************************************************/
class cOglFont : public cListObject {
class cOglFont:public cListObject
{
private:
static bool initiated;
cString name;
@@ -148,19 +174,32 @@ public:
virtual ~ cOglFont(void);
static cOglFont *Get(const char *name, int charHeight);
static void Cleanup(void);
const char *Name(void) { return *name; };
int Size(void) { return size; };
int Bottom(void) {return bottom; };
int Height(void) {return height; };
cOglGlyph* Glyph(uint charCode) const;
int Kerning(cOglGlyph *glyph, uint prevSym) const;
const char *Name(void)
{
return *name;
};
int Size(void)
{
return size;
};
int Bottom(void)
{
return bottom;
};
int Height(void)
{
return height;
};
cOglGlyph *Glyph(FT_ULong charCode) const;
int Kerning(cOglGlyph * glyph, FT_ULong prevSym) const;
};
/****************************************************************************************
* cOglFb
* Framebuffer Object - OpenGL part of a Pixmap
****************************************************************************************/
class cOglFb {
class cOglFb
{
protected:
bool initiated;
// GLuint fb;
@@ -174,7 +213,10 @@ public:
cOglFb(GLint width, GLint height, GLint viewPortWidth, GLint viewPortHeight);
virtual ~ cOglFb(void);
bool Initiated(void) { return initiated; }
bool Initiated(void)
{
return initiated;
}
virtual bool Init(void);
void Bind(void);
void BindRead(void);
@@ -182,18 +224,34 @@ public:
virtual void Unbind(void);
bool BindTexture(void);
void Blit(GLint destX1, GLint destY1, GLint destX2, GLint destY2);
GLint Width(void) { return width; };
GLint Height(void) { return height; };
bool Scrollable(void) { return scrollable; };
GLint ViewportWidth(void) { return viewPortWidth; };
GLint ViewportHeight(void) { return viewPortHeight; };
GLint Width(void)
{
return width;
};
GLint Height(void)
{
return height;
};
bool Scrollable(void)
{
return scrollable;
};
GLint ViewportWidth(void)
{
return viewPortWidth;
};
GLint ViewportHeight(void)
{
return viewPortHeight;
};
};
/****************************************************************************************
* cOglOutputFb
* Output Framebuffer Object - holds Vdpau Output Surface which is our "output framebuffer"
****************************************************************************************/
class cOglOutputFb : public cOglFb {
class cOglOutputFb:public cOglFb
{
protected:
bool initiated;
private:
@@ -212,7 +270,8 @@ public:
* cOglVb
* Vertex Buffer - OpenGl Vertices for the different drawing commands
****************************************************************************************/
enum eVertexBufferType {
enum eVertexBufferType
{
vbRect,
vbEllipse,
vbSlope,
@@ -221,7 +280,8 @@ enum eVertexBufferType {
vbCount
};
class cOglVb {
class cOglVb
{
private:
eVertexBufferType type;
eShaderType shader;
@@ -250,91 +310,141 @@ public:
/****************************************************************************************
* cOpenGLCmd
****************************************************************************************/
class cOglCmd {
class cOglCmd
{
protected:
cOglFb * fb;
public:
cOglCmd(cOglFb *fb) { this->fb = fb; };
virtual ~cOglCmd(void) {};
cOglCmd(cOglFb * fb)
{
this->fb = fb;
};
virtual ~ cOglCmd(void)
{
};
virtual const char *Description(void) = 0;
virtual bool Execute(void) = 0;
};
class cOglCmdInitOutputFb : public cOglCmd {
class cOglCmdInitOutputFb:public cOglCmd
{
private:
cOglOutputFb * oFb;
public:
cOglCmdInitOutputFb(cOglOutputFb * oFb);
virtual ~cOglCmdInitOutputFb(void) {};
virtual const char* Description(void) { return "InitOutputFramebuffer"; }
virtual ~ cOglCmdInitOutputFb(void)
{
};
virtual const char *Description(void)
{
return "InitOutputFramebuffer";
}
virtual bool Execute(void);
};
class cOglCmdInitFb : public cOglCmd {
class cOglCmdInitFb:public cOglCmd
{
private:
cCondWait * wait;
public:
cOglCmdInitFb(cOglFb * fb, cCondWait * wait = NULL);
virtual ~cOglCmdInitFb(void) {};
virtual const char* Description(void) { return "InitFramebuffer"; }
virtual ~ cOglCmdInitFb(void)
{
};
virtual const char *Description(void)
{
return "InitFramebuffer";
}
virtual bool Execute(void);
};
class cOglCmdDeleteFb : public cOglCmd {
class cOglCmdDeleteFb:public cOglCmd
{
public:
cOglCmdDeleteFb(cOglFb * fb);
virtual ~cOglCmdDeleteFb(void) {};
virtual const char* Description(void) { return "DeleteFramebuffer"; }
virtual ~ cOglCmdDeleteFb(void)
{
};
virtual const char *Description(void)
{
return "DeleteFramebuffer";
}
virtual bool Execute(void);
};
class cOglCmdRenderFbToBufferFb : public cOglCmd {
class cOglCmdRenderFbToBufferFb:public cOglCmd
{
private:
cOglFb * buffer;
GLfloat x, y;
GLfloat drawPortX, drawPortY;
GLint transparency;
public:
cOglCmdRenderFbToBufferFb(cOglFb *fb, cOglFb *buffer, GLint x, GLint y, GLint transparency, GLint drawPortX, GLint drawPortY);
virtual ~cOglCmdRenderFbToBufferFb(void) {};
virtual const char* Description(void) { return "Render Framebuffer to Buffer"; }
cOglCmdRenderFbToBufferFb(cOglFb * fb, cOglFb * buffer, GLint x, GLint y, GLint transparency, GLint drawPortX,
GLint drawPortY);
virtual ~ cOglCmdRenderFbToBufferFb(void)
{
};
virtual const char *Description(void)
{
return "Render Framebuffer to Buffer";
}
virtual bool Execute(void);
};
class cOglCmdCopyBufferToOutputFb : public cOglCmd {
class cOglCmdCopyBufferToOutputFb:public cOglCmd
{
private:
cOglOutputFb * oFb;
GLint x, y;
public:
cOglCmdCopyBufferToOutputFb(cOglFb * fb, cOglOutputFb * oFb, GLint x, GLint y);
virtual ~cOglCmdCopyBufferToOutputFb(void) {};
virtual const char* Description(void) { return "Copy buffer to OutputFramebuffer"; }
virtual ~ cOglCmdCopyBufferToOutputFb(void)
{
};
virtual const char *Description(void)
{
return "Copy buffer to OutputFramebuffer";
}
virtual bool Execute(void);
};
class cOglCmdFill : public cOglCmd {
class cOglCmdFill:public cOglCmd
{
private:
GLint color;
public:
cOglCmdFill(cOglFb * fb, GLint color);
virtual ~cOglCmdFill(void) {};
virtual const char* Description(void) { return "Fill"; }
virtual ~ cOglCmdFill(void)
{
};
virtual const char *Description(void)
{
return "Fill";
}
virtual bool Execute(void);
};
class cOglCmdDrawRectangle : public cOglCmd {
class cOglCmdDrawRectangle:public cOglCmd
{
private:
GLint x, y;
GLint width, height;
GLint color;
public:
cOglCmdDrawRectangle(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color);
virtual ~cOglCmdDrawRectangle(void) {};
virtual const char* Description(void) { return "DrawRectangle"; }
virtual ~ cOglCmdDrawRectangle(void)
{
};
virtual const char *Description(void)
{
return "DrawRectangle";
}
virtual bool Execute(void);
};
class cOglCmdDrawEllipse : public cOglCmd {
class cOglCmdDrawEllipse:public cOglCmd
{
private:
GLint x, y;
GLint width, height;
@@ -345,12 +455,18 @@ private:
GLfloat *CreateVerticesHalf(int &numVertices);
public:
cOglCmdDrawEllipse(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint quadrants);
virtual ~cOglCmdDrawEllipse(void) {};
virtual const char* Description(void) { return "DrawEllipse"; }
virtual ~ cOglCmdDrawEllipse(void)
{
};
virtual const char *Description(void)
{
return "DrawEllipse";
}
virtual bool Execute(void);
};
class cOglCmdDrawSlope : public cOglCmd {
class cOglCmdDrawSlope:public cOglCmd
{
private:
GLint x, y;
GLint width, height;
@@ -358,12 +474,18 @@ private:
GLint type;
public:
cOglCmdDrawSlope(cOglFb * fb, GLint x, GLint y, GLint width, GLint height, GLint color, GLint type);
virtual ~cOglCmdDrawSlope(void) {};
virtual const char* Description(void) { return "DrawSlope"; }
virtual ~ cOglCmdDrawSlope(void)
{
};
virtual const char *Description(void)
{
return "DrawSlope";
}
virtual bool Execute(void);
};
class cOglCmdDrawText : public cOglCmd {
class cOglCmdDrawText:public cOglCmd
{
private:
GLint x, y;
GLint limitX;
@@ -372,55 +494,80 @@ private:
int fontSize;
unsigned int *symbols;
public:
cOglCmdDrawText(cOglFb *fb, GLint x, GLint y, unsigned int *symbols, GLint limitX, const char *name, int fontSize, tColor colorText);
cOglCmdDrawText(cOglFb * fb, GLint x, GLint y, unsigned int *symbols, GLint limitX, const char *name,
int fontSize, tColor colorText);
virtual ~ cOglCmdDrawText(void);
virtual const char* Description(void) { return "DrawText"; }
virtual const char *Description(void)
{
return "DrawText";
}
virtual bool Execute(void);
};
class cOglCmdDrawImage : public cOglCmd {
class cOglCmdDrawImage:public cOglCmd
{
private:
tColor * argb;
GLint x, y, width, height;
bool overlay;
GLfloat scaleX, scaleY;
public:
cOglCmdDrawImage(cOglFb *fb, tColor *argb, GLint width, GLint height, GLint x, GLint y, bool overlay = true, double scaleX = 1.0f, double scaleY = 1.0f);
cOglCmdDrawImage(cOglFb * fb, tColor * argb, GLint width, GLint height, GLint x, GLint y, bool overlay =
true, double scaleX = 1.0f, double scaleY = 1.0f);
virtual ~ cOglCmdDrawImage(void);
virtual const char* Description(void) { return "Draw Image"; }
virtual const char *Description(void)
{
return "Draw Image";
}
virtual bool Execute(void);
};
class cOglCmdDrawTexture : public cOglCmd {
class cOglCmdDrawTexture:public cOglCmd
{
private:
sOglImage * imageRef;
GLint x, y;
public:
cOglCmdDrawTexture(cOglFb * fb, sOglImage * imageRef, GLint x, GLint y);
virtual ~cOglCmdDrawTexture(void) {};
virtual const char* Description(void) { return "Draw Texture"; }
virtual ~ cOglCmdDrawTexture(void)
{
};
virtual const char *Description(void)
{
return "Draw Texture";
}
virtual bool Execute(void);
};
class cOglCmdStoreImage : public cOglCmd {
class cOglCmdStoreImage:public cOglCmd
{
private:
sOglImage * imageRef;
tColor *data;
public:
cOglCmdStoreImage(sOglImage * imageRef, tColor * argb);
virtual ~ cOglCmdStoreImage(void);
virtual const char* Description(void) { return "Store Image"; }
virtual const char *Description(void)
{
return "Store Image";
}
virtual bool Execute(void);
};
class cOglCmdDropImage : public cOglCmd {
class cOglCmdDropImage:public cOglCmd
{
private:
sOglImage * imageRef;
cCondWait *wait;
public:
cOglCmdDropImage(sOglImage * imageRef, cCondWait * wait);
virtual ~cOglCmdDropImage(void) {};
virtual const char* Description(void) { return "Drop Image"; }
virtual ~ cOglCmdDropImage(void)
{
};
virtual const char *Description(void)
{
return "Drop Image";
}
virtual bool Execute(void);
};
@@ -430,7 +577,8 @@ public:
#define OGL_MAX_OSDIMAGES 256
#define OGL_CMDQUEUE_SIZE 100
class cOglThread : public cThread {
class cOglThread:public cThread
{
private:
cCondWait * startWait;
cCondWait *wait;
@@ -459,25 +607,44 @@ public:
int StoreImage(const cImage & image);
void DropImageData(int imageHandle);
sOglImage *GetImageRef(int slot);
int MaxTextureSize(void) { return maxTextureSize; };
int MaxTextureSize(void)
{
return maxTextureSize;
};
};
/****************************************************************************************
* cOglPixmap
****************************************************************************************/
class cOglPixmap : public cPixmap {
class cOglPixmap:public cPixmap
{
private:
cOglFb * fb;
std::shared_ptr < cOglThread > oglThread;
bool dirty;
public:
cOglPixmap(std::shared_ptr<cOglThread> oglThread, int Layer, const cRect &ViewPort, const cRect &DrawPort = cRect::Null);
cOglPixmap(std::shared_ptr < cOglThread > oglThread, int Layer, const cRect & ViewPort, const cRect & DrawPort =
cRect::Null);
virtual ~ cOglPixmap(void);
cOglFb *Fb(void) { return fb; };
int X(void) { return ViewPort().X(); };
int Y(void) { return ViewPort().Y(); };
virtual bool IsDirty(void) { return dirty; }
virtual void SetDirty(bool dirty = true) { this->dirty = dirty; }
cOglFb *Fb(void)
{
return fb;
};
int X(void)
{
return ViewPort().X();
};
int Y(void)
{
return ViewPort().Y();
};
virtual bool IsDirty(void)
{
return dirty;
}
virtual void SetDirty(bool dirty = true) {
this->dirty = dirty;
}
virtual void SetAlpha(int Alpha);
virtual void SetTile(bool Tile);
virtual void SetViewPort(const cRect & Rect);
@@ -487,8 +654,10 @@ public:
virtual void DrawImage(const cPoint & Point, const cImage & Image);
virtual void DrawImage(const cPoint & Point, int ImageHandle);
virtual void DrawPixel(const cPoint & Point, tColor Color);
virtual void DrawBitmap(const cPoint &Point, const cBitmap &Bitmap, tColor ColorFg = 0, tColor ColorBg = 0, bool Overlay = false);
virtual void DrawText(const cPoint &Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont *Font, int Width = 0, int Height = 0, int Alignment = taDefault);
virtual void DrawBitmap(const cPoint & Point, const cBitmap & Bitmap, tColor ColorFg = 0, tColor ColorBg =
0, bool Overlay = false);
virtual void DrawText(const cPoint & Point, const char *s, tColor ColorFg, tColor ColorBg, const cFont * Font,
int Width = 0, int Height = 0, int Alignment = taDefault);
virtual void DrawRectangle(const cRect & Rect, tColor Color);
virtual void DrawEllipse(const cRect & Rect, tColor Color, int Quadrants = 0);
virtual void DrawSlope(const cRect & Rect, tColor Color, int Type);
@@ -501,7 +670,8 @@ public:
/******************************************************************************
* cOglOsd
******************************************************************************/
class cOglOsd : public cOsd {
class cOglOsd:public cOsd
{
private:
cOglFb * bFb;
std::shared_ptr < cOglThread > oglThread;
@@ -515,7 +685,8 @@ public:
virtual cPixmap *CreatePixmap(int Layer, const cRect & ViewPort, const cRect & DrawPort = cRect::Null);
virtual void DestroyPixmap(cPixmap * Pixmap);
virtual void Flush(void);
virtual void DrawScaledBitmap(int x, int y, const cBitmap &Bitmap, double FactorX, double FactorY, bool AntiAlias = false);
virtual void DrawScaledBitmap(int x, int y, const cBitmap & Bitmap, double FactorX, double FactorY,
bool AntiAlias = false);
static cOglOutputFb *oFb;
};

View File

@@ -0,0 +1,312 @@
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 8358152e403e..573ab6ea1a6e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1274,6 +1274,7 @@ struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
enum lspcon_vendor vendor;
+ bool hdr_supported;
};
struct intel_digital_port {
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f8f1308643a9..a1d0127b7f57 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -35,6 +35,8 @@
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+#define DPCD_MCA_LSPCON_HDR_STATUS 0x70003
+
/* AUX addresses to write MCA AVI IF */
#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
@@ -104,6 +106,31 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
return true;
}
+static bool lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ u8 hdr_caps;
+ int ret;
+
+ /* Enable HDR for MCA based LSPCON devices */
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = drm_dp_dpcd_read(&dp->aux, DPCD_MCA_LSPCON_HDR_STATUS,
+ &hdr_caps, 1);
+ else
+ return false;
+
+ if (ret < 0) {
+ DRM_DEBUG_KMS("hdr capability detection failed\n");
+ lspcon->hdr_supported = false;
+ return false;
+ } else if (hdr_caps & 0x1) {
+ DRM_DEBUG_KMS("lspcon capable of HDR\n");
+ lspcon->hdr_supported = true;
+ }
+
+ return true;
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -581,6 +608,11 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
+ if (!lspcon_detect_hdr_capability(lspcon)) {
+ DRM_ERROR("LSPCON hdr detection failed\n");
+ return false;
+ }
+
connector->ycbcr_420_allowed = true;
lspcon->active = true;
DRM_DEBUG_KMS("Success: LSPCON init\n");
--
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index b54ccbb5aad5..051e30ad80e7 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -576,6 +576,16 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
return val & mask;
}
+void lspcon_drm_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ DRM_DEBUG_KMS("Update HDR metadata for lspcon\n");
+ /* It uses the legacy hsw implementation for the same */
+ hsw_write_infoframe(encoder, crtc_state, type, frame, len);
+}
+
static const u8 infoframe_type_to_idx[] = {
HDMI_PACKET_TYPE_GENERAL_CONTROL,
HDMI_PACKET_TYPE_GAMUT_METADATA,
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index a1d0127b7f57..51ad5f02e700 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -460,27 +460,41 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- bool ret;
+ bool ret = true;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
- /* LSPCON only needs AVI IF */
- if (type != HDMI_INFOFRAME_TYPE_AVI)
+ if (!(type == HDMI_INFOFRAME_TYPE_AVI ||
+ type == HDMI_PACKET_TYPE_GAMUT_METADATA))
return;
- if (lspcon->vendor == LSPCON_VENDOR_MCA)
- ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
- frame, len);
- else
- ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
- frame, len);
+ /*
+ * Supporting HDR on MCA LSPCON
+ * Todo: Add support for Parade later
+ */
+ if (type == HDMI_PACKET_TYPE_GAMUT_METADATA &&
+ lspcon->vendor != LSPCON_VENDOR_MCA)
+ return;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA) {
+ if (type == HDMI_INFOFRAME_TYPE_AVI)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else if (type == HDMI_PACKET_TYPE_GAMUT_METADATA)
+ lspcon_drm_write_infoframe(encoder, crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ frame, VIDEO_DIP_DATA_SIZE);
+ } else {
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, frame,
+ len);
+ }
if (!ret) {
- DRM_ERROR("Failed to write AVI infoframes\n");
+ DRM_ERROR("Failed to write infoframes\n");
return;
}
- DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
+ DRM_DEBUG_DRIVER("Infoframes updated successfully\n");
}
void lspcon_read_infoframe(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index 37cfddf8a9c5..65878904f672 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -35,4 +35,8 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
void lspcon_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *crtc_state);
+void lspcon_drm_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
#endif /* __INTEL_LSPCON_H__ */
--
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 51ad5f02e700..c32452360eeb 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -627,6 +627,11 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
+ if (lspcon->vendor == LSPCON_VENDOR_MCA && lspcon->hdr_supported)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+
connector->ycbcr_420_allowed = true;
lspcon->active = true;
DRM_DEBUG_KMS("Success: LSPCON init\n");
--
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index d0a937fb0c56..e78b3a1626fd 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -416,6 +416,7 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
if (state->hdr_output_metadata)
drm_property_blob_get(state->hdr_output_metadata);
+ state->hdr_metadata_changed = false;
/* Don't copy over a writeback job, they are used only once */
state->writeback_job = NULL;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 0d466d3b0809..5beabcd42d30 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -734,6 +734,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
val,
sizeof(struct hdr_output_metadata), -1,
&replaced);
+ state->hdr_metadata_changed |= replaced;
return ret;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9ba794cb9b4f..dee3a593564c 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3851,6 +3851,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_lspcon *lspcon =
+ enc_to_intel_lspcon(&encoder->base);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -3860,6 +3862,12 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_psr_enable(intel_dp, crtc_state);
intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
+
+ /* Set the infoframe for NON modeset cases as well */
+ if (lspcon->active && lspcon->hdr_supported &&
+ conn_state->hdr_metadata_changed)
+ intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp, crtc_state,
+ conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 5eeafa45831a..cc616fd31d8b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -4651,7 +4651,7 @@ intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
}
-static void
+void
intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index 65878904f672..3404cff8c337 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -14,6 +14,7 @@ struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
struct intel_lspcon;
+struct intel_dp;
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
@@ -39,4 +40,7 @@ void lspcon_drm_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
+void intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
#endif /* __INTEL_LSPCON_H__ */
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 5f8c3389d46f..1f0b4fcf0bd3 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -661,6 +661,7 @@ struct drm_connector_state {
* DRM blob property for HDR output metadata
*/
struct drm_property_blob *hdr_output_metadata;
+ u8 hdr_metadata_changed : 1;
};
/**
--
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index c32452360eeb..8565bf73c4cd 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -505,6 +505,11 @@ void lspcon_read_infoframe(struct intel_encoder *encoder,
/* FIXME implement this */
}
+/* HDMI HDR Colorspace Spec Definitions */
+#define NORMAL_COLORIMETRY_MASK 0x3
+#define EXTENDED_COLORIMETRY_MASK 0x7
+#define HDMI_COLORIMETRY_BT2020_YCC ((3 << 0) | (6 << 2) | (0 << 5))
+
void lspcon_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -549,6 +554,19 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
+ /*
+ * Set BT2020 colorspace if driving HDR data
+ * ToDo: Make this generic and expose all colorspaces for lspcon
+ */
+ if (lspcon->active && conn_state->hdr_metadata_changed) {
+ frame.avi.colorimetry =
+ HDMI_COLORIMETRY_BT2020_YCC &
+ NORMAL_COLORIMETRY_MASK;
+ frame.avi.extended_colorimetry =
+ (HDMI_COLORIMETRY_BT2020_YCC >> 2) &
+ EXTENDED_COLORIMETRY_MASK;
+ }
+
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
DRM_ERROR("Failed to pack AVI IF\n");
--

38
patches/UHD-10Bit.patch Normal file
View File

@@ -0,0 +1,38 @@
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index cc616fd31d8b..f2d1d7bd87d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -616,8 +616,10 @@ intel_dp_mode_valid(struct drm_connector *connector,
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(&intel_encoder->base);
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
@@ -639,6 +641,21 @@ intel_dp_mode_valid(struct drm_connector *connector,
target_clock = fixed_mode->clock;
}
+ /*
+ * Reducing Blanking to incorporate DP and HDMI timing/link bandwidth
+ * limitations for CEA modes (4k@60 at 10 bpp). DP can drive 17.28Gbs
+ * while 4k modes (VIC97 etc) at 10 bpp required 17.8 Gbps. This will
+ * cause mode to blank out. Reduced Htotal by shortening the back porch
+ * and front porch within permissible limits.
+ */
+ if (lspcon->active && lspcon->hdr_supported &&
+ mode->clock > 570000) {
+ mode->clock = 570000;
+ mode->htotal -= 180;
+ mode->hsync_start -= 72;
+ mode->hsync_end -= 72;
+ }
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
--

View File

@@ -0,0 +1,14 @@
--- helpers.c.orig 2019-11-19 20:02:37.851039110 +0100
+++ helpers.c 2019-11-19 20:02:03.733164221 +0100
@@ -5,9 +5,9 @@
#include <vdr/skins.h>
cOsd *CreateOsd(int Left, int Top, int Width, int Height) {
- cOsd *osd = cOsdProvider::NewOsd(Left, Top);
+ cOsd *osd = cOsdProvider::NewOsd(cOsd::OsdLeft() + Left,cOsd::OsdTop() + Top);
if (osd) {
- tArea Area = { 0, 0, Width, Height, 32 };
+ tArea Area = { 0, 0, Width - 1, Height - 1, 32 };
if (osd->SetAreas(&Area, 1) == oeOk) {
return osd;
}

View File

@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: VDR \n"
"Report-Msgid-Bugs-To: <see README>\n"
"POT-Creation-Date: 2018-08-18 15:05+0200\n"
"POT-Creation-Date: 2020-04-15 18:57+0200\n"
"PO-Revision-Date: blabla\n"
"Last-Translator: blabla\n"
"Language-Team: blabla\n"
@@ -251,23 +251,28 @@ msgstr ""
msgid "audio: %6dHz supports %d %d %d %d %d %d %d %d channels\n"
msgstr ""
msgid "codec: buggy libav, use ffmpeg\n"
msgstr ""
msgid "codec: can't allocate vodeo decoder\n"
msgstr ""
msgid "codec: missing close\n"
msgstr ""
#, c-format
msgid "codec: codec ID %#06x not found\n"
msgid " No decoder found"
msgstr ""
msgid "codec: can't allocate video codec context\n"
msgstr ""
msgid "codec: can't set options to video codec!\n"
msgid "codec: can't set option deint to video codec!\n"
msgstr ""
msgid "codec: can't set option surfces to video codec!\n"
msgstr ""
msgid "codec: can't set option drop 2.field to video codec!\n"
msgstr ""
msgid "codec: can't set option drop 2.field to video codec!\n"
msgstr ""
msgid "codec: can't open video codec!\n"
@@ -276,12 +281,19 @@ msgstr ""
msgid "codec: can't allocate video decoder frame buffer\n"
msgstr ""
msgid "video: Init of YADIF Filter failed\n"
msgstr ""
msgid "codec: can't allocate audio decoder\n"
msgstr ""
msgid "codec: can't allocate audio decoder frame buffer\n"
msgstr ""
#, c-format
msgid "codec: codec ID %#06x not found\n"
msgstr ""
msgid "codec: can't allocate audio codec context\n"
msgstr ""
@@ -291,42 +303,12 @@ msgstr ""
msgid "codec/audio: decoded data smaller than encoded\n"
msgstr ""
msgid "codec/audio: resample setup error\n"
msgstr ""
msgid "codec/audio: overwrite resample\n"
msgstr ""
msgid "codec/audio: AvResample setup error\n"
msgstr ""
msgid "codec: latm\n"
msgstr ""
msgid "codec: error audio data\n"
msgstr ""
msgid "codec: error more than one frame data\n"
msgstr ""
msgid "codec/audio: can't setup resample\n"
msgstr ""
msgid "codec/audio: can't open resample\n"
msgstr ""
msgid "codec/audio: latm\n"
msgstr ""
msgid "codec/audio: bad audio frame\n"
msgstr ""
msgid "codec/audio: error more than one frame data\n"
msgstr ""
msgid "codec/audio: no frame\n"
msgstr ""
msgid "A software and GPU emulated UHD device"
msgstr ""
@@ -360,6 +342,9 @@ msgstr "OSD Breite"
msgid "Osd height"
msgstr "OSD Höhe"
msgid "GPU mem used for image caching (MB)"
msgstr ""
msgid "Suspend"
msgstr "Unterbrechen"
@@ -384,8 +369,8 @@ msgstr "Video Hintergrundfrabe (RGB)"
msgid "Video background color (Alpha)"
msgstr "Video Hintergrundfarbe (Alpha)"
msgid "Use studio levels (vdpau only)"
msgstr "Benutze Studio Levels (nur vdpau)"
msgid "Use studio levels"
msgstr ""
msgid "60hz display mode"
msgstr "60Hz Anzeigemodus"
@@ -399,8 +384,11 @@ msgstr "Schwarz während Kanalwechsel"
msgid "Clear decoder on channel switch"
msgstr "Decoder bei Kanalwechsel leeren"
msgid "Brightness (-1000..1000) (vdpau)"
msgstr "Helligkeit (-1000..1000) (vdpau)"
msgid "Scaler Test"
msgstr ""
msgid "Brightness (-100..100)"
msgstr ""
msgid "min"
msgstr "min"
@@ -408,14 +396,26 @@ msgstr "min"
msgid "max"
msgstr "max"
msgid "Contrast (0..10000) (vdpau)"
msgstr "Kontrast (0..10000) (vdpau)"
msgid "Contrast (0..100)"
msgstr ""
msgid "Saturation (0..10000) (vdpau)"
msgstr "Sättigung (0..10000) (vdpau)"
msgid "Saturation (0..100)"
msgstr ""
msgid "Hue (-3141..3141) (vdpau)"
msgstr "Farbton (-3141..3141) (vdpau)"
msgid "Gamma (0..100)"
msgstr ""
msgid "Hue (-314..314) "
msgstr ""
msgid "Monitor Colorspace"
msgstr ""
msgid "Color Blindness"
msgstr ""
msgid "Color Correction (-100..100) "
msgstr ""
msgid "Scaling"
msgstr "Skalierung"
@@ -450,18 +450,6 @@ msgstr "Schneide oben und unten ab (Pixel)"
msgid "Cut left and right (pixel)"
msgstr "Schneide links und rechts ab (Pixel)"
msgid "Auto-crop"
msgstr ""
msgid "Autocrop interval (frames)"
msgstr ""
msgid "Autocrop delay (n * interval)"
msgstr ""
msgid "Autocrop tolerance (pixel)"
msgstr ""
msgid "Audio"
msgstr "Audio"
@@ -474,6 +462,15 @@ msgstr "Audio Driftkorrektur"
msgid "Pass-through default"
msgstr "Pass-Through-Standard"
msgid " PCM pass-through"
msgstr ""
msgid " AC-3 pass-through"
msgstr ""
msgid " E-AC-3 pass-through"
msgstr ""
msgid "Enable (E-)AC-3 (decoder) downmix"
msgstr "Aktiviere (E-)AC-3 (decoder) downmix"
@@ -611,6 +608,10 @@ msgstr "PIP schließen"
msgid " Frames missed(%d) duped(%d) dropped(%d) total(%d)"
msgstr " Frames verloren(%d) verdoppelt(%d) übersprungen(%d) Gesamt(%d)"
#, c-format
msgid " Video %dx%d Color: %s Gamma: %s"
msgstr ""
msgid "pass-through disabled"
msgstr ""
@@ -627,12 +628,6 @@ msgstr ""
msgid "surround downmix disabled"
msgstr ""
msgid "auto-crop disabled and freezed"
msgstr ""
msgid "auto-crop enabled"
msgstr ""
#, c-format
msgid "[softhddev]: hot key %d is not supported\n"
msgstr ""
@@ -690,10 +685,6 @@ msgstr ""
msgid "[softhddev] out of memory\n"
msgstr ""
#, c-format
msgid "video: packet buffer too small for %d\n"
msgstr ""
msgid "video: no empty slot in packet ringbuffer\n"
msgstr ""
@@ -714,10 +705,6 @@ msgstr ""
msgid "[softhddev] invalid video packet %d/%d bytes\n"
msgstr ""
#, c-format
msgid "[softhddev] empty video packet %d bytes\n"
msgstr ""
#, c-format
msgid "softhddev: %s called without hw decoder\n"
msgstr ""
@@ -766,34 +753,24 @@ msgstr ""
msgid "[softhddev] ready%s\n"
msgstr ""
msgid "video/glx: can't make glx context current\n"
msgid "video: can't lock thread\n"
msgstr ""
msgid "video: can't unlock thread\n"
msgstr ""
msgid "video/egl: GlxSetupWindow can't make egl/glx context current\n"
msgstr ""
msgid "video/glx: no v-sync\n"
msgstr ""
msgid "glewinit failed\n"
msgstr ""
msgid "video/glx: no GLX support\n"
msgstr ""
#, c-format
msgid "video/glx: glx version %d.%d\n"
msgstr ""
msgid "did not get FBconfig"
msgstr ""
msgid "video/glx: can't get a RGB visual\n"
msgstr ""
msgid "video/glx: no valid visual found\n"
msgstr ""
msgid "video/glx: need atleast 8-bits per RGB\n"
msgstr ""
msgid "video/glx: can't create glx context\n"
msgstr ""
@@ -815,8 +792,7 @@ msgstr ""
msgid "video/glx: SGI v-sync enabled\n"
msgstr ""
#, c-format
msgid "checkCudaErrors() Driver API error = %04d"
msgid "video/egl: can't create egl context\n"
msgstr ""
msgid "video/cuvid: surface needed not set\n"
@@ -826,88 +802,69 @@ msgstr ""
msgid "video/cuvid: release surface %#08x, which is not in use\n"
msgstr ""
msgid "Wrong ES version \n"
msgstr ""
msgid " Could not bind API!\n"
msgstr ""
msgid "Can't get EGL Extentions\n"
msgstr ""
msgid "Could not initialize EGL.\n"
msgstr ""
msgid "Could not create EGL Context\n"
msgstr ""
msgid "Could not create EGL surface!\n"
msgstr ""
msgid "Could not make context current!\n"
msgstr ""
msgid "video/cuvid: out of decoders\n"
msgstr ""
msgid "video/cuvid: out of memory\n"
msgstr ""
msgid "video/cuvid: need 1 future, 1 current, 1 back and 1 work surface\n"
msgstr ""
msgid "video/cuvid: decoder not in decoder list.\n"
msgstr ""
msgid "video/glx: glx error\n"
msgid "video/egl: egl init error\n"
msgstr ""
#, c-format
msgid "video/cuvid: SDL error %d: %d\n"
msgid "video/cuvid: SDL error %d\n"
msgstr ""
msgid "video/vdpau: no valid vdpau pixfmt found\n"
msgid "Unable to create placebo textures"
msgstr ""
msgid "video/vdpau: no valid profile found\n"
msgid "video: no valid pixfmt found\n"
msgstr ""
msgid "CUVID Init failed\n"
msgid "video: no valid profile found\n"
msgstr ""
msgid "CUVID Not found\n"
msgid "NO Format valid"
msgstr ""
msgid "Failed rendering frame!\n"
msgstr ""
#, c-format
msgid "video/vdpau: can't get output surface parameters: %s\n"
msgid "video/cuvid: output buffer full, dropping frame (%d/%d)\n"
msgstr ""
#, c-format
msgid "video/vdpau: can't render output surface: %s\n"
msgstr ""
msgid "video/vdpau: video surface size mismatch\n"
msgstr ""
msgid "video/vdpau: out of memory\n"
msgstr ""
#, c-format
msgid "video/vdpau: unsupported rgba format %d\n"
msgstr ""
#, c-format
msgid "video/vdpau: can't get video surface bits native: %s\n"
msgstr ""
#, c-format
msgid "video/vdpau: can't get video surface parameters: %s\n"
msgstr ""
#, c-format
msgid "video/vdpau: unsupported chroma type %d\n"
msgstr ""
#, c-format
msgid "video/vdpau: can't get video surface bits: %s\n"
msgstr ""
#, c-format
msgid "video/vdpau: output buffer full, dropping frame (%d/%d)\n"
msgstr ""
#, c-format
msgid "video/vdpau: pixel format %d not supported\n"
msgid "Could not dynamically load CUDA\n"
msgstr ""
msgid "Kein Cuda device gefunden"
msgstr ""
#, c-format
msgid "video/cuvid: can't block queue: %s\n"
msgstr ""
#, c-format
msgid "video/cuvid: missed frame (%d/%d)\n"
msgid "Failed to submit swapchain buffer\n"
msgstr ""
#, c-format
@@ -921,16 +878,25 @@ msgstr ""
msgid "video/event: No symbol for %d\n"
msgstr ""
msgid "video: can't lock thread\n"
msgid "Cant get memory for PLACEBO struct"
msgstr ""
msgid "video: can't unlock thread\n"
msgid "Failed initializing libplacebo\n"
msgstr ""
msgid "video: can't queue cancel video display thread\n"
msgid "Failed to create XCB Surface\n"
msgstr ""
msgid "video: can't cancel video display thread\n"
msgid "Failed to create Vulkan Device"
msgstr ""
msgid "Failed creating vulkan swapchain!"
msgstr ""
msgid "Failed initializing libplacebo renderer\n"
msgstr ""
msgid "video/egl: can't create thread egl context\n"
msgstr ""
#, c-format
@@ -944,11 +910,11 @@ msgid "softhddev: grab unsupported\n"
msgstr ""
#, c-format
msgid "video: Can't connect to X11 server on '%s'\n"
msgid "video: Can't initialize X11 thread support on '%s'\n"
msgstr ""
#, c-format
msgid "video: Can't initialize X11 thread support on '%s'\n"
msgid "video: Can't connect to X11 server on '%s'\n"
msgstr ""
msgid "video: Can't convert XLIB display to XCB connection\n"
@@ -964,5 +930,20 @@ msgstr ""
#~ msgid "A software and GPU emulated HD device"
#~ msgstr "Ein Software und GPU emulieres HD-Gerät"
#~ msgid "Use studio levels (vdpau only)"
#~ msgstr "Benutze Studio Levels (nur vdpau)"
#~ msgid "Brightness (-1000..1000) (vdpau)"
#~ msgstr "Helligkeit (-1000..1000) (vdpau)"
#~ msgid "Contrast (0..10000) (vdpau)"
#~ msgstr "Kontrast (0..10000) (vdpau)"
#~ msgid "Saturation (0..10000) (vdpau)"
#~ msgstr "Sättigung (0..10000) (vdpau)"
#~ msgid "Hue (-3141..3141) (vdpau)"
#~ msgstr "Farbton (-3141..3141) (vdpau)"
#~ msgid "SoftHdDevice"
#~ msgstr "SoftHdDevice"

431
shaders.h
View File

@@ -1,103 +1,15 @@
// shader
#define SHADER_LENGTH 10000
char vertex[] = {"\
#version 330\n\
in vec2 vertex_position;\n\
in vec2 vertex_texcoord0;\n\
out vec2 texcoord0;\n\
in vec2 vertex_texcoord1;\n\
out vec2 texcoord1;\n\
in vec2 vertex_texcoord2;\n\
out vec2 texcoord2;\n\
in vec2 vertex_texcoord3;\n\
out vec2 texcoord3;\n\
in vec2 vertex_texcoord4;\n\
out vec2 texcoord4;\n\
in vec2 vertex_texcoord5;\n\
out vec2 texcoord5;\n\
void main() {\n\
gl_Position = vec4(vertex_position, 1.0, 1.0);\n\
texcoord0 = vertex_texcoord0;\n\
texcoord1 = vertex_texcoord1;\n\
texcoord2 = vertex_texcoord2;\n\
texcoord3 = vertex_texcoord3;\n\
texcoord4 = vertex_texcoord4;\n\
texcoord5 = vertex_texcoord5;\n\
}\n"};
char fragment[] = {"\
#version 330\n\
#define texture1D texture\n\
#define texture3D texture\n\
layout(location = 0) out vec4 out_color;\n\
in vec2 texcoord0;\n\
in vec2 texcoord1;\n\
in vec2 texcoord2;\n\
in vec2 texcoord3;\n\
in vec2 texcoord4;\n\
in vec2 texcoord5;\n\
uniform mat3 colormatrix;\n\
uniform vec3 colormatrix_c;\n\
uniform sampler2D texture0;\n\
//uniform vec2 texture_size0;\n\
//uniform mat2 texture_rot0;\n\
//uniform vec2 pixel_size0;\n\
uniform sampler2D texture1;\n\
//uniform vec2 texture_size1;\n\
//uniform mat2 texture_rot1;\n\
//uniform vec2 pixel_size1;\n\
//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\
void main() {\n\
vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\
color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\
color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;\n\
// color conversion\n\
color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\
color.a = 1.0;\n\
// color mapping\n\
out_color = color;\n\
}\n"};
char fragment_bt2100[] = {"\
#version 330\n\
#define texture1D texture\n\
#define texture3D texture\n\
layout(location = 0) out vec4 out_color;\n\
in vec2 texcoord0;\n\
in vec2 texcoord1;\n\
in vec2 texcoord2;\n\
in vec2 texcoord3;\n\
in vec2 texcoord4;\n\
in vec2 texcoord5;\n\
uniform mat3 colormatrix;\n\
uniform vec3 colormatrix_c;\n\
uniform mat3 cms_matrix;\n\
uniform sampler2D texture0;\n\
//uniform vec2 texture_size0;\n\
//uniform mat2 texture_rot0;\n\
//uniform vec2 pixel_size0;\n\
uniform sampler2D texture1;\n\
//uniform vec2 texture_size1;\n\
//uniform mat2 texture_rot1;\n\
//uniform vec2 pixel_size1;\n\
//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\
void main() {\n\
vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\
color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;\n\
color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n\
// color conversion\n\
color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\
color.a = 1.0;\n\
// color mapping\n\
color.rgb = clamp(color.rgb, 0.0, 1.0);\n\
color.rgb = pow(color.rgb, vec3(2.4));\n\
color.rgb = cms_matrix * color.rgb;\n\
color.rgb = clamp(color.rgb, 0.0, 1.0);\n\
color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\
out_color = color;\n\
}\n"};
#ifdef CUVID
const char *gl_version = "#version 330";
#else
#ifdef RASPI
const char *gl_version = "#version 300 es";
#else
const char *gl_version = "#version 300 es ";
#endif
#endif
/* Color conversion matrix: RGB = m * YUV + c
* m is in row-major matrix, with m[row][col], e.g.:
@@ -112,50 +24,65 @@ out_color = color;\n\
* is the Y vector (1, 1, 1), the 2nd is the U vector, the 3rd the V vector.
* The matrix might also be used for other conversions and colorspaces.
*/
struct mp_cmat {
float m[3][3]; // colormatrix
float c[3]; //colormatrix_c
struct mp_cmat
{
GLfloat m[3][3]; // colormatrix
GLfloat c[3]; //colormatrix_c
};
struct mp_mat {
float m[3][3];
struct mp_mat
{
GLfloat m[3][3];
};
// YUV input limited range (16-235 for luma, 16-240 for chroma)
// ITU-R BT.601 (SD)
struct mp_cmat yuv_bt601 = {\
{{ 1.164384, 1.164384, 1.164384 },\
{ 0.00000, -0.391762, 2.017232 },\
{ 1.596027, -0.812968 , 0.000000 }},\
{-0.874202, 0.531668, -1.085631 } };
struct mp_cmat yuv_bt601 = { {{1.164384, 1.164384, 1.164384},
{0.00000, -0.391762, 2.017232},
{1.596027, -0.812968, 0.000000}},
{-0.874202, 0.531668, -1.085631}
};
// ITU-R BT.709 (HD)
struct mp_cmat yuv_bt709 = {\
{{ 1.164384, 1.164384, 1.164384 },\
{ 0.00000, -0.213249, 2.112402 },\
{ 1.792741, -0.532909 , 0.000000 }},\
{-0.972945, 0.301483, -1.133402 } };
struct mp_cmat yuv_bt709 = { {{1.164384, 1.164384, 1.164384},
{0.00000, -0.213249, 2.112402},
{1.792741, -0.532909, 0.000000}},
{-0.972945, 0.301483, -1.133402}
};
// ITU-R BT.2020 non-constant luminance system
struct mp_cmat yuv_bt2020ncl = {\
{{ 1.164384, 1.164384, 1.164384 },\
{ 0.00000, -0.187326, 2.141772 },\
{ 1.678674, -0.650424 , 0.000000 }},\
{-0.915688, 0.347459, -1.148145 } };
struct mp_cmat yuv_bt2020ncl = { {{1.164384, 1.164384, 1.164384},
{0.00000, -0.187326, 2.141772},
{1.678674, -0.650424, 0.000000}},
{-0.915688, 0.347459, -1.148145}
};
// ITU-R BT.2020 constant luminance system
struct mp_cmat yuv_bt2020cl = {\
{{ 0.0000, 1.164384, 0.000000 },\
{ 0.00000, 0.000000, 1.138393 },\
{ 1.138393, 0.000000 , 0.000000 }},\
{-0.571429, -0.073059, -0.571429 } };
struct mp_cmat yuv_bt2020cl = { {{0.0000, 1.164384, 0.000000},
{0.00000, 0.000000, 1.138393},
{1.138393, 0.000000, 0.000000}},
{-0.571429, -0.073059, -0.571429}
};
float cms_matrix[3][3] = \
{{ 1.660497, -0.124547, -0.018154},\
{-0.587657, 1.132895, -0.100597},\
{-0.072840, -0.008348, 1.118751}};
float cms_matrix[3][3] = { {1.660497, -0.124547, -0.018154},
{-0.587657, 1.132895, -0.100597},
{-0.072840, -0.008348, 1.118751}
};
struct gl_vao_entry {
// Common constants for SMPTE ST.2084 (PQ)
static const float PQ_M1 = 2610. / 4096 * 1. / 4,
PQ_M2 = 2523. / 4096 * 128,
PQ_C1 = 3424. / 4096,
PQ_C2 = 2413. / 4096 * 32,
PQ_C3 = 2392. / 4096 * 32;
// Common constants for ARIB STD-B67 (HLG)
static const float HLG_A = 0.17883277,
HLG_B = 0.28466892,
HLG_C = 0.55991073;
struct gl_vao_entry
{
// used for shader / glBindAttribLocation
const char *name;
// glVertexAttribPointer() arguments
@@ -165,18 +92,20 @@ struct gl_vao_entry {
int offset;
};
struct vertex_pt {
struct vertex_pt
{
float x, y;
};
struct vertex_pi {
struct vertex_pi
{
GLint x, y;
};
#define TEXUNIT_VIDEO_NUM 6
struct vertex {
struct vertex
{
struct vertex_pt position;
struct vertex_pt texcoord[TEXUNIT_VIDEO_NUM];
};
@@ -185,32 +114,76 @@ static const struct gl_vao_entry vertex_vao[] = {
{"position", 2, GL_FLOAT, false, offsetof(struct vertex, position)},
{"texcoord0", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[0])},
{"texcoord1", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[1])},
{"texcoord2", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[2])},
{"texcoord3", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[3])},
{"texcoord4", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[4])},
{"texcoord5", 2, GL_FLOAT, false, offsetof(struct vertex, texcoord[5])},
{0}
};
#define GLSL(...) pl_shader_append(__VA_ARGS__)
#define GLSLV(...) pl_shader_append_v(__VA_ARGS__)
static void compile_attach_shader(GLuint program,
GLenum type, const char *source)
char sh[SHADER_LENGTH];
char shv[SHADER_LENGTH];
GL_init()
{
sh[0] = 0;
}
GLV_init()
{
shv[0] = 0;
}
pl_shader_append(const char *fmt, ...)
{
char temp[1000];
va_list ap;
va_start(ap, fmt);
vsprintf(temp, fmt, ap);
va_end(ap);
if (strlen(sh) + strlen(temp) > SHADER_LENGTH)
Fatal(_("Shaderlenght fault\n"));
strcat(sh, temp);
}
pl_shader_append_v(const char *fmt, ...)
{
char temp[1000];
va_list ap;
va_start(ap, fmt);
vsprintf(temp, fmt, ap);
va_end(ap);
if (strlen(shv) + strlen(temp) > SHADER_LENGTH)
Fatal(_("Shaderlenght fault\n"));
strcat(shv, temp);
}
static void compile_attach_shader(GLuint program, GLenum type, const char *source)
{
GLuint shader;
GLint status, log_length;
GLint status = 1234, log_length;
char log[4000];
GLsizei len;
shader = glCreateShader(type);
glShaderSource(shader, 1, &source, NULL);
glShaderSource(shader, 1, (const GLchar **)&source, NULL); // &buffer, NULL);
glCompileShader(shader);
status = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
log_length = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length);
Debug(3,"compile Status %d loglen %d\n",status,log_length);
glGetShaderInfoLog(shader, 4000, &len, log);
GlxCheck();
Debug(3, "compile Status %d loglen %d >%s<\n", status, log_length, log);
glAttachShader(program, shader);
glDeleteShader(shader);
}
static void link_shader(GLuint program)
@@ -223,11 +196,55 @@ static void link_shader(GLuint program)
log_length = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &log_length);
Debug(3, "Link Status %d loglen %d\n", status, log_length);
}
static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) {
static GLuint sc_generate_osd(GLuint gl_prog)
{
Debug(3, "vor create osd\n");
gl_prog = glCreateProgram();
GL_init();
GLSL("%s\n", gl_version);
GLSL("in vec2 vertex_position;\n");
GLSL("in vec2 vertex_texcoord0;\n");
GLSL("out vec2 texcoord0;\n");
GLSL("void main() {\n");
GLSL("gl_Position = vec4(vertex_position, 1.0, 1.0);\n");
GLSL("texcoord0 = vertex_texcoord0;\n");
GLSL("}\n");
Debug(3, "vor compile vertex osd\n");
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh); // vertex_osd);
GL_init();
GLSL("%s\n", gl_version);
GLSL("#define texture1D texture\n");
GLSL("precision mediump float; \n");
GLSL("layout(location = 0) out vec4 out_color;\n");
GLSL("in vec2 texcoord0;\n");
GLSL("uniform sampler2D texture0;\n");
GLSL("void main() {\n");
GLSL("vec4 color; \n");
GLSL("color = vec4(texture(texture0, texcoord0));\n");
#ifdef GAMMA
GLSL("// delinearize gamma \n");
GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // delinearize gamma
GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
#endif
GLSL("out_color = color;\n");
GLSL("}\n");
Debug(3, "vor compile fragment osd \n");
compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, sh); //fragment_osd);
glBindAttribLocation(gl_prog, 0, "vertex_position");
glBindAttribLocation(gl_prog, 1, "vertex_texcoord0");
link_shader(gl_prog);
return gl_prog;
}
static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace)
{
char vname[80];
int n;
@@ -235,41 +252,135 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) {
float *m, *c, *cms;
char *frag;
GL_init();
GLSL("%s\n", gl_version);
GLSL("in vec2 vertex_position; \n");
GLSL("in vec2 vertex_texcoord0; \n");
GLSL("out vec2 texcoord0; \n");
GLSL("in vec2 vertex_texcoord1; \n");
GLSL("out vec2 texcoord1; \n");
if (Planes == 3) {
GLSL("in vec2 vertex_texcoord2; \n");
GLSL("out vec2 texcoord2; \n");
}
GLSL("void main() { \n");
GLSL("gl_Position = vec4(vertex_position, 1.0, 1.0);\n");
GLSL("texcoord0 = vertex_texcoord0; \n");
GLSL("texcoord1 = vertex_texcoord1; \n");
if (Planes == 3) {
GLSL("texcoord2 = vertex_texcoord1; \n"); // texcoord1 ist hier richtig
}
GLSL("} \n");
Debug(3, "vor create\n");
gl_prog = glCreateProgram();
Debug(3, "vor compile vertex\n");
// printf("%s",sh);
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh);
switch (colorspace) {
case AVCOL_SPC_RGB:
m = &yuv_bt601.m[0][0];
c = &yuv_bt601.c[0];
frag = fragment;
Debug(3, "BT601 Colorspace used\n");
break;
case AVCOL_SPC_BT709:
case AVCOL_SPC_UNSPECIFIED: // comes with UHD
m = &yuv_bt709.m[0][0];
c = &yuv_bt709.c[0];
frag = fragment;
Debug(3, "BT709 Colorspace used\n");
break;
case AVCOL_SPC_BT2020_NCL:
m = &yuv_bt2020ncl.m[0][0];
c = &yuv_bt2020ncl.c[0];
cms = &cms_matrix[0][0];
frag = fragment_bt2100;
Debug(3, "BT2020NCL Colorspace used\n");
break;
default: // fallback
m = &yuv_bt709.m[0][0];
c = &yuv_bt709.c[0];
frag = fragment;
Debug(3, "default BT709 Colorspace used %d\n", colorspace);
break;
}
Debug(3,"vor create\n");
gl_prog = glCreateProgram();
Debug(3,"vor compile vertex\n");
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, vertex);
GL_init();
GLSL("%s\n", gl_version);
GLSL("precision mediump float; \n");
GLSL("layout(location = 0) out vec4 out_color;\n");
GLSL("in vec2 texcoord0; \n");
GLSL("in vec2 texcoord1; \n");
if (Planes == 3)
GLSL("in vec2 texcoord2; \n");
GLSL("uniform mat3 colormatrix; \n");
GLSL("uniform vec3 colormatrix_c; \n");
if (colorspace == AVCOL_SPC_BT2020_NCL)
GLSL("uniform mat3 cms_matrix;\n");
GLSL("uniform sampler2D texture0; \n");
GLSL("uniform sampler2D texture1; \n");
if (Planes == 3)
GLSL("uniform sampler2D texture2; \n");
GLSL("void main() { \n");
GLSL("vec4 color; \n");
if (colorspace == AVCOL_SPC_BT2020_NCL) {
GLSL("color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r; \n");
if (Planes == 3) {
GLSL("color.g = 1.003906 * vec4(texture(texture1, texcoord1)).r; \n");
GLSL("color.b = 1.003906 * vec4(texture(texture2, texcoord2)).r; \n");
} else {
GLSL("color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n");
}
GLSL("// color conversion\n");
GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n");
GLSL("color.a = 1.0; \n");
GLSL("// pl_shader_linearize \n");
GLSL("color.rgb = max(color.rgb, 0.0); \n");
// GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n");
// GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
// GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - vec3(%f)) * vec3(1.0/%f)) + vec3(%f),bvec3(lessThan(vec3(0.5), color.rgb)));\n",HLG_C, HLG_A, HLG_B);
GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - vec3(0.55991073)) * vec3(1.0/0.17883277)) + vec3(0.28466892), bvec3(lessThan(vec3(0.5), color.rgb)));\n");
GLSL("// color mapping \n");
GLSL("color.rgb = cms_matrix * color.rgb; \n");
#ifndef GAMMA
GLSL("// pl_shader_delinearize \n");
GLSL("color.rgb = max(color.rgb, 0.0); \n");
// GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n");
// GLSL("color.rgb = pow(color.rgb, vec3(1.0/2.4)); \n");
GLSL("color.rgb = mix(vec3(0.5) * sqrt(color.rgb), vec3(0.17883277) * log(color.rgb - vec3(0.28466892)) + vec3(0.55991073), bvec3(lessThan(vec3(1.0), color.rgb))); \n");
#endif
GLSL("out_color = color; \n");
GLSL("} \n");
} else {
GLSL("color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r; \n");
if (Planes == 3) {
GLSL("color.g = 1.000000 * vec4(texture(texture1, texcoord1)).r;\n");
GLSL("color.b = 1.000000 * vec4(texture(texture2, texcoord2)).r;\n");
} else {
GLSL("color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg; \n");
}
GLSL("// color conversion \n");
GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n");
GLSL("color.a = 1.0; \n");
GLSL("// linearize gamma \n");
GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // linearize gamma
GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
#ifndef GAMMA
GLSL("// delinearize gamma to sRGB \n");
GLSL("color.rgb = max(color.rgb, 0.0); \n");
GLSL("color.rgb = mix(color.rgb * vec3(12.92), vec3(1.055) * pow(color.rgb, vec3(1.0/2.4)) - vec3(0.055), bvec3(lessThanEqual(vec3(0.0031308), color.rgb))); \n");
#endif
GLSL("// color mapping \n");
GLSL("out_color = color; \n");
GLSL("} \n");
}
//printf(">%s<",sh);
Debug(3, "vor compile fragment\n");
compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, frag);
compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, sh);
glBindAttribLocation(gl_prog, 0, "vertex_position");
for (n = 0; n < 6; n++) {
@@ -284,7 +395,8 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace) {
if (gl_colormatrix != -1)
glProgramUniformMatrix3fv(gl_prog, gl_colormatrix, 1, 0, m);
GlxCheck();
//glProgramUniform3fv(gl_prog,gl_colormatrix,3,&yuv_bt709.m[0][0]);
Debug(3, "nach set colormatrix\n");
gl_colormatrix_c = glGetUniformLocation(gl_prog, "colormatrix_c");
Debug(3, "get uniform colormatrix_c %d %f\n", gl_colormatrix_c, *c);
if (gl_colormatrix_c != -1)
@@ -306,6 +418,7 @@ static void render_pass_quad(int flip, float xcrop, float ycrop)
struct vertex va[4];
int n;
const struct gl_vao_entry *e;
// uhhhh what a hack
if (!flip) {
va[0].position.x = (float)-1.0;
@@ -344,8 +457,6 @@ static void render_pass_quad(int flip, float xcrop, float ycrop)
va[3].texcoord[1].x = (float)1.0 - xcrop;
va[3].texcoord[1].y = (float)1.0 - ycrop; // abgeschnitten von rechts unten 1.0 - wert
glBindBuffer(GL_ARRAY_BUFFER, vao_buffer);
glBufferData(GL_ARRAY_BUFFER, 4 * sizeof(struct vertex), va, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
@@ -355,8 +466,8 @@ static void render_pass_quad(int flip, float xcrop, float ycrop)
for (n = 0; vertex_vao[n].name; n++) {
e = &vertex_vao[n];
glEnableVertexAttribArray(n);
glVertexAttribPointer(n, e->num_elems, e->type, e->normalized,
sizeof(struct vertex), (void *)(intptr_t)e->offset);
glVertexAttribPointer(n, e->num_elems, e->type, e->normalized, sizeof(struct vertex),
(void *)(intptr_t) e->offset);
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
@@ -365,5 +476,3 @@ static void render_pass_quad(int flip, float xcrop, float ycrop)
for (n = 0; vertex_vao[n].name; n++)
glDisableVertexAttribArray(n);
}

View File

@@ -1,92 +0,0 @@
Pro 7 1080i
[vo/opengl] [ 1] color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;
[vo/opengl] [ 2] color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;
[vo/opengl] [ 3] // color conversion
[vo/opengl] [ 4] color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
[vo/opengl] [ 5] color.a = 1.0;
[vo/opengl] [ 6] // color mapping
UHD 10 Bit
[vo/opengl] [ 1] color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;
[vo/opengl] [ 2] color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;
[vo/opengl] [ 3] // color conversion
[vo/opengl] [ 4] color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
[vo/opengl] [ 5] color.a = 1.0;
[vo/opengl] [ 6] // color mapping
HEVC 8 Bit
[vo/opengl] [ 1] color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;
[vo/opengl] [ 2] color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;
[vo/opengl] [ 3] // color conversion
[vo/opengl] [ 4] color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
[vo/opengl] [ 5] color.a = 1.0;
[vo/opengl] [ 6] // color mapping
ZDF 720p
[vo/opengl] [ 1] color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;
[vo/opengl] [ 2] color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;
[vo/opengl] [ 3] // color conversion
[vo/opengl] [ 4] color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
[vo/opengl] [ 5] color.a = 1.0;
[vo/opengl] [ 6] // color mapping
VERTEX
#version 330
in vec2 vertex_position;
in vec2 vertex_texcoord0;
out vec2 texcoord0;
in vec2 vertex_texcoord1;
out vec2 texcoord1;
in vec2 vertex_texcoord2;
out vec2 texcoord2;
in vec2 vertex_texcoord3;
out vec2 texcoord3;
in vec2 vertex_texcoord4;
out vec2 texcoord4;
in vec2 vertex_texcoord5;
out vec2 texcoord5;
void main() {
gl_Position = vec4(vertex_position, 1.0, 1.0);
texcoord0 = vertex_texcoord0;
texcoord1 = vertex_texcoord1;
texcoord2 = vertex_texcoord2;
texcoord3 = vertex_texcoord3;
texcoord4 = vertex_texcoord4;
texcoord5 = vertex_texcoord5;
}
FRAGMENT
#version 330
#define texture1D texture
#define texture3D texture
out vec4 out_color;
in vec2 texcoord0;
in vec2 texcoord1;
in vec2 texcoord2;
in vec2 texcoord3;
in vec2 texcoord4;
in vec2 texcoord5;
uniform mat3 colormatrix;
uniform vec3 colormatrix_c;
uniform sampler2D texture0;
uniform vec2 texture_size0;
uniform mat2 texture_rot0;
uniform vec2 pixel_size0;
uniform sampler2D texture1;
uniform vec2 texture_size1;
uniform mat2 texture_rot1;
uniform vec2 pixel_size1;
#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))
void main() {
vec4 color = vec4(0.0, 0.0, 0.0, 1.0);
color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;
color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;
// color conversion
color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
color.a = 1.0;
// color mapping
out_color = color;
}

222
shaders/KrigBilateral.glsl Normal file
View File

@@ -0,0 +1,222 @@
// KrigBilateral by Shiandow
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3.0 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library.
//!HOOK CHROMA
//!BIND HOOKED
//!BIND LUMA
//!SAVE LOWRES_Y
//!WIDTH LUMA.w
//!WHEN CHROMA.w LUMA.w <
//!DESC KrigBilateral Downscaling Y pass 1
#define offset vec2(0,0)
#define axis 1
#define Kernel(x) dot(vec3(0.42659, -0.49656, 0.076849), cos(vec3(0, 1, 2) * acos(-1.) * (x + 1.)))
vec4 hook() {
// Calculate bounds
float low = ceil((LUMA_pos - CHROMA_pt) * LUMA_size - offset - 0.5)[axis];
float high = floor((LUMA_pos + CHROMA_pt) * LUMA_size - offset - 0.5)[axis];
float W = 0.0;
vec4 avg = vec4(0);
vec2 pos = LUMA_pos;
for (float k = low; k <= high; k++) {
pos[axis] = LUMA_pt[axis] * (k - offset[axis] + 0.5);
float rel = (pos[axis] - LUMA_pos[axis])*CHROMA_size[axis];
float w = Kernel(rel);
vec4 y = textureGrad(LUMA_raw, pos, vec2(0.0), vec2(0.0)).xxxx * LUMA_mul;
y.y *= y.y;
avg += w * y;
W += w;
}
avg /= W;
avg.y = abs(avg.y - pow(avg.x, 2.0));
return avg;
}
//!HOOK CHROMA
//!BIND HOOKED
//!BIND LOWRES_Y
//!SAVE LOWRES_Y
//!WHEN CHROMA.w LUMA.w <
//!DESC KrigBilateral Downscaling Y pass 2
#define offset vec2(0,0)
#define axis 0
#define Kernel(x) dot(vec3(0.42659, -0.49656, 0.076849), cos(vec3(0, 1, 2) * acos(-1.) * (x + 1.)))
vec4 hook() {
// Calculate bounds
float low = ceil((LOWRES_Y_pos - CHROMA_pt) * LOWRES_Y_size - offset - 0.5)[axis];
float high = floor((LOWRES_Y_pos + CHROMA_pt) * LOWRES_Y_size - offset - 0.5)[axis];
float W = 0.0;
vec4 avg = vec4(0);
vec2 pos = LOWRES_Y_pos;
for (float k = low; k <= high; k++) {
pos[axis] = LOWRES_Y_pt[axis] * (k - offset[axis] + 0.5);
float rel = (pos[axis] - LOWRES_Y_pos[axis])*CHROMA_size[axis];
float w = Kernel(rel);
vec4 y = textureGrad(LOWRES_Y_raw, pos, vec2(0.0), vec2(0.0)).xxxx * LOWRES_Y_mul;
y.y *= y.y;
avg += w * y;
W += w;
}
avg /= W;
avg.y = abs(avg.y - pow(avg.x, 2.0)) + LOWRES_Y_texOff(0).y;
return avg;
}
//!HOOK CHROMA
//!BIND HOOKED
//!BIND LUMA
//!BIND LOWRES_Y
//!WIDTH LUMA.w
//!HEIGHT LUMA.h
//!WHEN CHROMA.w LUMA.w <
//!OFFSET ALIGN
//!DESC KrigBilateral Upscaling UV
// -- Convenience --
#define sqr(x) dot(x,x)
#define bitnoise 1.0/(2.0*255.0)
#define noise 0.05//5.0*bitnoise
#define chromaOffset vec2(0.0, 0.0)
// -- Window Size --
#define taps 3
#define even (float(taps) - 2.0 * floor(float(taps) / 2.0) == 0.0)
#define minX int(1.0-ceil(float(taps)/2.0))
#define maxX int(floor(float(taps)/2.0))
#define Kernel(x) (cos(acos(-1.0)*(x)/float(taps))) // Hann kernel
// -- Input processing --
#define GetY(coord) LOWRES_Y_tex(LOWRES_Y_pt*(pos+coord+vec2(0.5))).xy
#define GetUV(coord) CHROMA_tex(CHROMA_pt*(pos+coord+vec2(0.5))).xy
#define N (taps*taps - 1)
#define M(i,j) Mx[min(i,j)*N + max(i,j) - min(i,j)*(min(i,j)+1)/2]
#define C(i,j) (inversesqrt(1.0 + (X[i].y + X[j].y)/localVar) * exp(-0.5*(sqr(X[i].x - X[j].x)/(localVar + X[i].y + X[j].y) + sqr((coords[i] - coords[j])/radius))) + (X[i].x - y) * (X[j].x - y) / localVar)
#define c(i) (inversesqrt(1.0 + X[i].y/localVar) * exp(-0.5*(sqr(X[i].x - y)/(localVar + X[i].y) + sqr((coords[i] - offset)/radius))))
vec4 hook() {
vec2 pos = CHROMA_pos * HOOKED_size - chromaOffset - vec2(0.5);
vec2 offset = pos - (even ? floor(pos) : round(pos));
pos -= offset;
vec2 coords[N+1];
vec4 X[N+1];
float y = LUMA_texOff(0).x;
vec4 total = vec4(0);
coords[0] = vec2(-1,-1); coords[1] = vec2(-1, 0); coords[2] = vec2(-1, 1);
coords[3] = vec2( 0,-1); coords[4] = vec2( 0, 1); coords[5] = vec2( 1,-1);
coords[6] = vec2( 1, 0); coords[7] = vec2( 1, 1); coords[8] = vec2( 0, 0);
for (int i=0; i<N+1; i++) {
X[i] = vec4(GetY(coords[i]), GetUV(coords[i]));
vec2 w = clamp(1.5 - abs(coords[i] - offset), 0.0, 1.0);
total += w.x*w.y*vec4(X[i].x, pow(X[i].x, 2.0), X[i].y, 1.0);
}
total.xyz /= total.w;
float localVar = sqr(noise) + abs(total.y - pow(total.x, 2.0)) + total.z;
float radius = 1.0;
float Mx[N*(N+1)/2];
float b[N];
vec4 interp = X[N];
b[0] = c(0) - c(N) - C(0,N) + C(N,N); M(0, 0) = C(0,0) - C(0,N) - C(0,N) + C(N,N); M(0, 1) = C(0,1) - C(1,N) - C(0,N) + C(N,N); M(0, 2) = C(0,2) - C(2,N) - C(0,N) + C(N,N); M(0, 3) = C(0,3) - C(3,N) - C(0,N) + C(N,N); M(0, 4) = C(0,4) - C(4,N) - C(0,N) + C(N,N); M(0, 5) = C(0,5) - C(5,N) - C(0,N) + C(N,N); M(0, 6) = C(0,6) - C(6,N) - C(0,N) + C(N,N); M(0, 7) = C(0,7) - C(7,N) - C(0,N) + C(N,N);
b[1] = c(1) - c(N) - C(1,N) + C(N,N); M(1, 1) = C(1,1) - C(1,N) - C(1,N) + C(N,N); M(1, 2) = C(1,2) - C(2,N) - C(1,N) + C(N,N); M(1, 3) = C(1,3) - C(3,N) - C(1,N) + C(N,N); M(1, 4) = C(1,4) - C(4,N) - C(1,N) + C(N,N); M(1, 5) = C(1,5) - C(5,N) - C(1,N) + C(N,N); M(1, 6) = C(1,6) - C(6,N) - C(1,N) + C(N,N); M(1, 7) = C(1,7) - C(7,N) - C(1,N) + C(N,N);
b[2] = c(2) - c(N) - C(2,N) + C(N,N); M(2, 2) = C(2,2) - C(2,N) - C(2,N) + C(N,N); M(2, 3) = C(2,3) - C(3,N) - C(2,N) + C(N,N); M(2, 4) = C(2,4) - C(4,N) - C(2,N) + C(N,N); M(2, 5) = C(2,5) - C(5,N) - C(2,N) + C(N,N); M(2, 6) = C(2,6) - C(6,N) - C(2,N) + C(N,N); M(2, 7) = C(2,7) - C(7,N) - C(2,N) + C(N,N);
b[3] = c(3) - c(N) - C(3,N) + C(N,N); M(3, 3) = C(3,3) - C(3,N) - C(3,N) + C(N,N); M(3, 4) = C(3,4) - C(4,N) - C(3,N) + C(N,N); M(3, 5) = C(3,5) - C(5,N) - C(3,N) + C(N,N); M(3, 6) = C(3,6) - C(6,N) - C(3,N) + C(N,N); M(3, 7) = C(3,7) - C(7,N) - C(3,N) + C(N,N);
b[4] = c(4) - c(N) - C(4,N) + C(N,N); M(4, 4) = C(4,4) - C(4,N) - C(4,N) + C(N,N); M(4, 5) = C(4,5) - C(5,N) - C(4,N) + C(N,N); M(4, 6) = C(4,6) - C(6,N) - C(4,N) + C(N,N); M(4, 7) = C(4,7) - C(7,N) - C(4,N) + C(N,N);
b[5] = c(5) - c(N) - C(5,N) + C(N,N); M(5, 5) = C(5,5) - C(5,N) - C(5,N) + C(N,N); M(5, 6) = C(5,6) - C(6,N) - C(5,N) + C(N,N); M(5, 7) = C(5,7) - C(7,N) - C(5,N) + C(N,N);
b[6] = c(6) - c(N) - C(6,N) + C(N,N); M(6, 6) = C(6,6) - C(6,N) - C(6,N) + C(N,N); M(6, 7) = C(6,7) - C(7,N) - C(6,N) + C(N,N);
b[7] = c(7) - c(N) - C(7,N) + C(N,N); M(7, 7) = C(7,7) - C(7,N) - C(7,N) + C(N,N);
b[1] -= b[0] * M(1, 0) / M(0, 0); M(1, 1) -= M(0, 1) * M(1, 0) / M(0, 0); M(1, 2) -= M(0, 2) * M(1, 0) / M(0, 0); M(1, 3) -= M(0, 3) * M(1, 0) / M(0, 0); M(1, 4) -= M(0, 4) * M(1, 0) / M(0, 0); M(1, 5) -= M(0, 5) * M(1, 0) / M(0, 0); M(1, 6) -= M(0, 6) * M(1, 0) / M(0, 0); M(1, 7) -= M(0, 7) * M(1, 0) / M(0, 0);
b[2] -= b[0] * M(2, 0) / M(0, 0); M(2, 2) -= M(0, 2) * M(2, 0) / M(0, 0); M(2, 3) -= M(0, 3) * M(2, 0) / M(0, 0); M(2, 4) -= M(0, 4) * M(2, 0) / M(0, 0); M(2, 5) -= M(0, 5) * M(2, 0) / M(0, 0); M(2, 6) -= M(0, 6) * M(2, 0) / M(0, 0); M(2, 7) -= M(0, 7) * M(2, 0) / M(0, 0);
b[3] -= b[0] * M(3, 0) / M(0, 0); M(3, 3) -= M(0, 3) * M(3, 0) / M(0, 0); M(3, 4) -= M(0, 4) * M(3, 0) / M(0, 0); M(3, 5) -= M(0, 5) * M(3, 0) / M(0, 0); M(3, 6) -= M(0, 6) * M(3, 0) / M(0, 0); M(3, 7) -= M(0, 7) * M(3, 0) / M(0, 0);
b[4] -= b[0] * M(4, 0) / M(0, 0); M(4, 4) -= M(0, 4) * M(4, 0) / M(0, 0); M(4, 5) -= M(0, 5) * M(4, 0) / M(0, 0); M(4, 6) -= M(0, 6) * M(4, 0) / M(0, 0); M(4, 7) -= M(0, 7) * M(4, 0) / M(0, 0);
b[5] -= b[0] * M(5, 0) / M(0, 0); M(5, 5) -= M(0, 5) * M(5, 0) / M(0, 0); M(5, 6) -= M(0, 6) * M(5, 0) / M(0, 0); M(5, 7) -= M(0, 7) * M(5, 0) / M(0, 0);
b[6] -= b[0] * M(6, 0) / M(0, 0); M(6, 6) -= M(0, 6) * M(6, 0) / M(0, 0); M(6, 7) -= M(0, 7) * M(6, 0) / M(0, 0);
b[7] -= b[0] * M(7, 0) / M(0, 0); M(7, 7) -= M(0, 7) * M(7, 0) / M(0, 0);
b[2] -= b[1] * M(2, 1) / M(1, 1); M(2, 2) -= M(1, 2) * M(2, 1) / M(1, 1); M(2, 3) -= M(1, 3) * M(2, 1) / M(1, 1); M(2, 4) -= M(1, 4) * M(2, 1) / M(1, 1); M(2, 5) -= M(1, 5) * M(2, 1) / M(1, 1); M(2, 6) -= M(1, 6) * M(2, 1) / M(1, 1); M(2, 7) -= M(1, 7) * M(2, 1) / M(1, 1);
b[3] -= b[1] * M(3, 1) / M(1, 1); M(3, 3) -= M(1, 3) * M(3, 1) / M(1, 1); M(3, 4) -= M(1, 4) * M(3, 1) / M(1, 1); M(3, 5) -= M(1, 5) * M(3, 1) / M(1, 1); M(3, 6) -= M(1, 6) * M(3, 1) / M(1, 1); M(3, 7) -= M(1, 7) * M(3, 1) / M(1, 1);
b[4] -= b[1] * M(4, 1) / M(1, 1); M(4, 4) -= M(1, 4) * M(4, 1) / M(1, 1); M(4, 5) -= M(1, 5) * M(4, 1) / M(1, 1); M(4, 6) -= M(1, 6) * M(4, 1) / M(1, 1); M(4, 7) -= M(1, 7) * M(4, 1) / M(1, 1);
b[5] -= b[1] * M(5, 1) / M(1, 1); M(5, 5) -= M(1, 5) * M(5, 1) / M(1, 1); M(5, 6) -= M(1, 6) * M(5, 1) / M(1, 1); M(5, 7) -= M(1, 7) * M(5, 1) / M(1, 1);
b[6] -= b[1] * M(6, 1) / M(1, 1); M(6, 6) -= M(1, 6) * M(6, 1) / M(1, 1); M(6, 7) -= M(1, 7) * M(6, 1) / M(1, 1);
b[7] -= b[1] * M(7, 1) / M(1, 1); M(7, 7) -= M(1, 7) * M(7, 1) / M(1, 1);
b[3] -= b[2] * M(3, 2) / M(2, 2); M(3, 3) -= M(2, 3) * M(3, 2) / M(2, 2); M(3, 4) -= M(2, 4) * M(3, 2) / M(2, 2); M(3, 5) -= M(2, 5) * M(3, 2) / M(2, 2); M(3, 6) -= M(2, 6) * M(3, 2) / M(2, 2); M(3, 7) -= M(2, 7) * M(3, 2) / M(2, 2);
b[4] -= b[2] * M(4, 2) / M(2, 2); M(4, 4) -= M(2, 4) * M(4, 2) / M(2, 2); M(4, 5) -= M(2, 5) * M(4, 2) / M(2, 2); M(4, 6) -= M(2, 6) * M(4, 2) / M(2, 2); M(4, 7) -= M(2, 7) * M(4, 2) / M(2, 2);
b[5] -= b[2] * M(5, 2) / M(2, 2); M(5, 5) -= M(2, 5) * M(5, 2) / M(2, 2); M(5, 6) -= M(2, 6) * M(5, 2) / M(2, 2); M(5, 7) -= M(2, 7) * M(5, 2) / M(2, 2);
b[6] -= b[2] * M(6, 2) / M(2, 2); M(6, 6) -= M(2, 6) * M(6, 2) / M(2, 2); M(6, 7) -= M(2, 7) * M(6, 2) / M(2, 2);
b[7] -= b[2] * M(7, 2) / M(2, 2); M(7, 7) -= M(2, 7) * M(7, 2) / M(2, 2);
b[4] -= b[3] * M(4, 3) / M(3, 3); M(4, 4) -= M(3, 4) * M(4, 3) / M(3, 3); M(4, 5) -= M(3, 5) * M(4, 3) / M(3, 3); M(4, 6) -= M(3, 6) * M(4, 3) / M(3, 3); M(4, 7) -= M(3, 7) * M(4, 3) / M(3, 3);
b[5] -= b[3] * M(5, 3) / M(3, 3); M(5, 5) -= M(3, 5) * M(5, 3) / M(3, 3); M(5, 6) -= M(3, 6) * M(5, 3) / M(3, 3); M(5, 7) -= M(3, 7) * M(5, 3) / M(3, 3);
b[6] -= b[3] * M(6, 3) / M(3, 3); M(6, 6) -= M(3, 6) * M(6, 3) / M(3, 3); M(6, 7) -= M(3, 7) * M(6, 3) / M(3, 3);
b[7] -= b[3] * M(7, 3) / M(3, 3); M(7, 7) -= M(3, 7) * M(7, 3) / M(3, 3);
b[5] -= b[4] * M(5, 4) / M(4, 4); M(5, 5) -= M(4, 5) * M(5, 4) / M(4, 4); M(5, 6) -= M(4, 6) * M(5, 4) / M(4, 4); M(5, 7) -= M(4, 7) * M(5, 4) / M(4, 4);
b[6] -= b[4] * M(6, 4) / M(4, 4); M(6, 6) -= M(4, 6) * M(6, 4) / M(4, 4); M(6, 7) -= M(4, 7) * M(6, 4) / M(4, 4);
b[7] -= b[4] * M(7, 4) / M(4, 4); M(7, 7) -= M(4, 7) * M(7, 4) / M(4, 4);
b[6] -= b[5] * M(6, 5) / M(5, 5); M(6, 6) -= M(5, 6) * M(6, 5) / M(5, 5); M(6, 7) -= M(5, 7) * M(6, 5) / M(5, 5);
b[7] -= b[5] * M(7, 5) / M(5, 5); M(7, 7) -= M(5, 7) * M(7, 5) / M(5, 5);
b[7] -= b[6] * M(7, 6) / M(6, 6); M(7, 7) -= M(6, 7) * M(7, 6) / M(6, 6);
b[N-1-0] /= M(N-1-0, N-1-0);
interp += b[N-1-0] * (X[N-1-0] - X[N]);
b[N-1-1] -= M(N-1-1, 7) * b[7]; b[N-1-1] /= M(N-1-1, N-1-1);
interp += b[N-1-1] * (X[N-1-1] - X[N]);
b[N-1-2] -= M(N-1-2, 6) * b[6]; b[N-1-2] -= M(N-1-2, 7) * b[7]; b[N-1-2] /= M(N-1-2, N-1-2);
interp += b[N-1-2] * (X[N-1-2] - X[N]);
b[N-1-3] -= M(N-1-3, 5) * b[5]; b[N-1-3] -= M(N-1-3, 6) * b[6]; b[N-1-3] -= M(N-1-3, 7) * b[7]; b[N-1-3] /= M(N-1-3, N-1-3);
interp += b[N-1-3] * (X[N-1-3] - X[N]);
b[N-1-4] -= M(N-1-4, 4) * b[4]; b[N-1-4] -= M(N-1-4, 5) * b[5]; b[N-1-4] -= M(N-1-4, 6) * b[6]; b[N-1-4] -= M(N-1-4, 7) * b[7]; b[N-1-4] /= M(N-1-4, N-1-4);
interp += b[N-1-4] * (X[N-1-4] - X[N]);
b[N-1-5] -= M(N-1-5, 3) * b[3]; b[N-1-5] -= M(N-1-5, 4) * b[4]; b[N-1-5] -= M(N-1-5, 5) * b[5]; b[N-1-5] -= M(N-1-5, 6) * b[6]; b[N-1-5] -= M(N-1-5, 7) * b[7]; b[N-1-5] /= M(N-1-5, N-1-5);
interp += b[N-1-5] * (X[N-1-5] - X[N]);
b[N-1-6] -= M(N-1-6, 2) * b[2]; b[N-1-6] -= M(N-1-6, 3) * b[3]; b[N-1-6] -= M(N-1-6, 4) * b[4]; b[N-1-6] -= M(N-1-6, 5) * b[5]; b[N-1-6] -= M(N-1-6, 6) * b[6]; b[N-1-6] -= M(N-1-6, 7) * b[7]; b[N-1-6] /= M(N-1-6, N-1-6);
interp += b[N-1-6] * (X[N-1-6] - X[N]);
b[N-1-7] -= M(N-1-7, 1) * b[1]; b[N-1-7] -= M(N-1-7, 2) * b[2]; b[N-1-7] -= M(N-1-7, 3) * b[3]; b[N-1-7] -= M(N-1-7, 4) * b[4]; b[N-1-7] -= M(N-1-7, 5) * b[5]; b[N-1-7] -= M(N-1-7, 6) * b[6]; b[N-1-7] -= M(N-1-7, 7) * b[7]; b[N-1-7] /= M(N-1-7, N-1-7);
interp += b[N-1-7] * (X[N-1-7] - X[N]);
return interp.zwxx;
}

View File

@@ -0,0 +1,206 @@
// vim: set ft=glsl:
/*
LumaSharpenHook 0.3
original hlsl by Christian Cann Schuldt Jensen ~ CeeJay.dk
port to glsl by Anon
It blurs the original pixel with the surrounding pixels and then subtracts this blur to sharpen the image.
It does this in luma to avoid color artifacts and allows limiting the maximum sharpning to avoid or lessen halo artifacts.
This is similar to using Unsharp Mask in Photoshop.
*/
// -- Hooks --
//!HOOK LUMA
//!BIND HOOKED
// -- Sharpening --
#define sharp_strength 0.30 //[0.10 to 3.00] Strength of the sharpening
#define sharp_clamp 0.035 //[0.000 to 1.000] Limits maximum amount of sharpening a pixel recieves - Default is 0.035
// -- Advanced sharpening settings --
#define pattern 2 //[1|2|3|4] Choose a sample pattern. 1 = Fast, 2 = Normal, 3 = Wider, 4 = Pyramid shaped.
//[8|9] Experimental slower patterns. 8 = 9 tap 9 fetch gaussian, 9 = 9 tap 9 fetch high pass.
#define offset_bias 1.0 //[0.0 to 6.0] Offset bias adjusts the radius of the sampling pattern.
vec4 hook(){
vec4 colorInput = LUMA_tex(LUMA_pos);
//We are on luma plane: xyzw = [luma_val, 0.0, 0.0, 1.0]
float ori = colorInput.x;
// -- Combining the strength and luma multipliers --
float sharp_strength_luma = sharp_strength; //I'll be combining even more multipliers with it later on
float px = 1.0;
float py = 1.0;
// Sampling patterns
// [ NW, , NE ] Each texture lookup (except ori)
// [ ,ori, ] samples 4 pixels
// [ SW, , SE ]
// -- Pattern 1 -- A (fast) 7 tap gaussian using only 2+1 texture fetches.
#if pattern == 1
// -- Gaussian filter --
// [ 1/9, 2/9, ] [ 1 , 2 , ]
// [ 2/9, 8/9, 2/9] = [ 2 , 8 , 2 ]
// [ , 2/9, 1/9] [ , 2 , 1 ]
px = (px / 3.0) * offset_bias;
py = (py / 3.0) * offset_bias;
float blur_ori = LUMA_texOff(vec2(px,py)).x; // North West
blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South East
//blur_ori += LUMA_texOff(vec2(px,py)).x; // North East
//blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West
blur_ori *= 0.5; //Divide by the number of texture fetches
sharp_strength_luma *= 1.5; // Adjust strength to aproximate the strength of pattern 2
#endif
// -- Pattern 2 -- A 9 tap gaussian using 4+1 texture fetches.
#if pattern == 2
// -- Gaussian filter --
// [ .25, .50, .25] [ 1 , 2 , 1 ]
// [ .50, 1, .50] = [ 2 , 4 , 2 ]
// [ .25, .50, .25] [ 1 , 2 , 1 ]
px = px * 0.5 * offset_bias;
py = py * 0.5 * offset_bias;
float blur_ori = LUMA_texOff(vec2(px,-py)).x; // South East
blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West
blur_ori += LUMA_texOff(vec2(px,py)).x; // North East
blur_ori += LUMA_texOff(vec2(-px,py)).x; // North West
blur_ori *= 0.25; // ( /= 4) Divide by the number of texture fetches
#endif
// -- Pattern 3 -- An experimental 17 tap gaussian using 4+1 texture fetches.
#if pattern == 3
// -- Gaussian filter --
// [ , 4 , 6 , , ]
// [ ,16 ,24 ,16 , 4 ]
// [ 6 ,24 , ,24 , 6 ]
// [ 4 ,16 ,24 ,16 , ]
// [ , , 6 , 4 , ]
px = px * offset_bias;
py = py * offset_bias;
float blur_ori = LUMA_texOff(vec2(0.4*px,-1.2*py)).x; // South South East
blur_ori += LUMA_texOff(vec2(-1.2*px,-0.4*py)).x; // West South West
blur_ori += LUMA_texOff(vec2(1.2*px,0.4*py)).x; // East North East
blur_ori += LUMA_texOff(vec2(-0.4*px,1.2*py)).x; // North North West
blur_ori *= 0.25; // ( /= 4) Divide by the number of texture fetches
sharp_strength_luma *= 0.51;
#endif
// -- Pattern 4 -- A 9 tap high pass (pyramid filter) using 4+1 texture fetches.
#if pattern == 4
// -- Gaussian filter --
// [ .50, .50, .50] [ 1 , 1 , 1 ]
// [ .50, , .50] = [ 1 , , 1 ]
// [ .50, .50, .50] [ 1 , 1 , 1 ]
float blur_ori = LUMA_texOff(vec2(0.5 * px,-py * offset_bias)).x; // South South East
blur_ori += LUMA_texOff(vec2(offset_bias * -px,0.5 * -py)).x; // West South West
blur_ori += LUMA_texOff(vec2(offset_bias * px,0.5 * py)).x; // East North East
blur_ori += LUMA_texOff(vec2(0.5 * -px,py * offset_bias)).x; // North North West
//blur_ori += (2.0 * ori); // Probably not needed. Only serves to lessen the effect.
blur_ori *= 0.25; //Divide by the number of texture fetches
sharp_strength_luma *= 0.666; // Adjust strength to aproximate the strength of pattern 2
#endif
// -- Pattern 8 -- A (slower) 9 tap gaussian using 9 texture fetches.
#if pattern == 8
// -- Gaussian filter --
// [ 1 , 2 , 1 ]
// [ 2 , 4 , 2 ]
// [ 1 , 2 , 1 ]
px = px * offset_bias;
py = py * offset_bias;
float blur_ori = LUMA_texOff(vec2(-px,py)).x; // North West
blur_ori += LUMA_texOff(vec2(px,-py)).x; // South East
blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West
blur_ori += LUMA_texOff(vec2(px,py)).x; // North East
float blur_ori2 = LUMA_texOff(vec2(0.0,py)).x; // North
blur_ori2 += LUMA_texOff(vec2(0.0,-py)).x; // South
blur_ori2 += LUMA_texOff(vec2(-px,0.0)).x; // West
blur_ori2 += LUMA_texOff(vec2(px,0.0)).x; // East
blur_ori2 *= 2.0;
blur_ori += blur_ori2;
blur_ori += (ori * 4.0); // Probably not needed. Only serves to lessen the effect.
// dot()s with gaussian strengths here?
blur_ori /= 16.0; //Divide by the number of texture fetches
sharp_strength_luma *= 0.75; // Adjust strength to aproximate the strength of pattern 2
#endif
// -- Pattern 9 -- A (slower) 9 tap high pass using 9 texture fetches.
#if pattern == 9
// -- Gaussian filter --
// [ 1 , 1 , 1 ]
// [ 1 , 1 , 1 ]
// [ 1 , 1 , 1 ]
px = px * offset_bias;
py = py * offset_bias;
float blur_ori = LUMA_texOff(vec2(-px,py)).x; // North West
blur_ori += LUMA_texOff(vec2(px,-py)).x; // South East
blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West
blur_ori += LUMA_texOff(vec2(px,py)).x; // North East
blur_ori += ori; // Probably not needed. Only serves to lessen the effect.
blur_ori += LUMA_texOff(vec2(0.0,py)).x; // North
blur_ori += LUMA_texOff(vec2(0.0,-py)).x; // South
blur_ori += LUMA_texOff(vec2(-px,0.0)).x; // West
blur_ori += LUMA_texOff(vec2(px,0.0)).x; // East
blur_ori /= 9.0; //Divide by the number of texture fetches
sharp_strength_luma *= (8.0/9.0); // Adjust strength to aproximate the strength of pattern 2
#endif
// -- Calculate the sharpening --
float sharp = ori - blur_ori; //Subtracting the blurred image from the original image
// -- Adjust strength of the sharpening and clamp it--
float sharp_strength_luma_clamp = sharp_strength_luma / (2.0 * sharp_clamp); //Roll part of the clamp into the dot
float sharp_luma = clamp((sharp * sharp_strength_luma_clamp + 0.5), 0.0,1.0 ); //Calculate the luma, adjust the strength, scale up and clamp
sharp_luma = (sharp_clamp * 2.0) * sharp_luma - sharp_clamp; //scale down
// -- Combining the values to get the final sharpened pixel --
colorInput.x = colorInput.x + sharp_luma; // Add the sharpening to the input color.
return clamp(colorInput, 0.0,1.0);
}

View File

@@ -0,0 +1,246 @@
// Copyright (c) 2015-2018, bacondither
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer
// in this position and unchanged.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Adaptive sharpen - version 2018-04-14 - (requires ps >= 3.0)
// Tuned for use post resize
//!HOOK SCALED
//!BIND HOOKED
//!SAVE ASSD
//!COMPONENTS 2
//!DESC adaptive-sharpen
//--------------------------------------- Settings ------------------------------------------------
#define curve_height 1.6 // Main control of sharpening strength [>0]
// 0.3 <-> 2.0 is a reasonable range of values
// Defined values under this row are "optimal" DO NOT CHANGE IF YOU DO NOT KNOW WHAT YOU ARE DOING!
#define curveslope 0.5 // Sharpening curve slope, high edge values
#define L_overshoot 0.003 // Max light overshoot before compression [>0.001]
#define L_compr_low 0.167 // Light compression, default (0.169=~9x)
#define L_compr_high 0.334 // Light compression, surrounded by edges (0.337=~4x)
#define D_overshoot 0.009 // Max dark overshoot before compression [>0.001]
#define D_compr_low 0.250 // Dark compression, default (0.253=~6x)
#define D_compr_high 0.500 // Dark compression, surrounded by edges (0.504=~2.5x)
#define scale_lim 0.1 // Abs max change before compression (0.1=+-10%)
#define scale_cs 0.056 // Compression slope above scale_lim
#define pm_p sat(1.0/curve_height) // Power mean p-value [>0-1.0]
//-------------------------------------------------------------------------------------------------
// Soft limit
#define soft_lim(v,s) ( (exp(2.0*min(abs(v), s*24.0)/s) - 1.0)/(exp(2.0*min(abs(v), s*24.0)/s) + 1.0)*s )
// Weighted power mean
#define wpmean(a,b,c) ( pow((c*pow(abs(a), pm_p) + (1.0-c)*pow(b, pm_p)), (1.0/pm_p)) )
// Get destination pixel values
#define get(x,y) ( HOOKED_texOff(vec2(x, y)).rgb )
#define sat(x) ( clamp(x, 0.0, 1.0) )
// Colour to luma, fast approx gamma, avg of rec. 709 & 601 luma coeffs
#define CtL(RGB) ( sqrt(dot(vec3(0.2558, 0.6511, 0.0931), pow(sat(RGB), vec3(2.0)))) )
// Center pixel diff
#define mdiff(a,b,c,d,e,f,g) ( abs(luma[g]-luma[a]) + abs(luma[g]-luma[b]) \
+ abs(luma[g]-luma[c]) + abs(luma[g]-luma[d]) \
+ 0.5*(abs(luma[g]-luma[e]) + abs(luma[g]-luma[f])) )
#define b_diff(pix) ( abs(blur-c[pix]) )
vec4 hook() {
vec4 o = HOOKED_tex(HOOKED_pos);
// Get points, saturate colour data in c[0]
// [ c22 ]
// [ c24, c9, c23 ]
// [ c21, c1, c2, c3, c18 ]
// [ c19, c10, c4, c0, c5, c11, c16 ]
// [ c20, c6, c7, c8, c17 ]
// [ c15, c12, c14 ]
// [ c13 ]
vec3 c[25] = vec3[](sat(o.rgb), get(-1,-1), get( 0,-1), get( 1,-1), get(-1, 0),
get( 1, 0), get(-1, 1), get( 0, 1), get( 1, 1), get( 0,-2),
get(-2, 0), get( 2, 0), get( 0, 2), get( 0, 3), get( 1, 2),
get(-1, 2), get( 3, 0), get( 2, 1), get( 2,-1), get(-3, 0),
get(-2, 1), get(-2,-1), get( 0,-3), get( 1,-2), get(-1,-2));
// Blur, gauss 3x3
vec3 blur = (2.0 * (c[2]+c[4]+c[5]+c[7]) + (c[1]+c[3]+c[6]+c[8]) + 4.0 * c[0]) / 16.0;
// Contrast compression, center = 0.5, scaled to 1/3
float c_comp = sat(0.266666681f + 0.9*exp2(dot(blur, vec3(-7.4/3.0))));
// Edge detection
// Relative matrix weights
// [ 1 ]
// [ 4, 5, 4 ]
// [ 1, 5, 6, 5, 1 ]
// [ 4, 5, 4 ]
// [ 1 ]
float edge = length( 1.38*b_diff(0)
+ 1.15*(b_diff(2) + b_diff(4) + b_diff(5) + b_diff(7))
+ 0.92*(b_diff(1) + b_diff(3) + b_diff(6) + b_diff(8))
+ 0.23*(b_diff(9) + b_diff(10) + b_diff(11) + b_diff(12)) ) * c_comp;
// RGB to luma
float c0_Y = CtL(c[0]);
float luma[25] = float[](c0_Y, CtL(c[1]), CtL(c[2]), CtL(c[3]), CtL(c[4]), CtL(c[5]), CtL(c[6]),
CtL(c[7]), CtL(c[8]), CtL(c[9]), CtL(c[10]), CtL(c[11]), CtL(c[12]),
CtL(c[13]), CtL(c[14]), CtL(c[15]), CtL(c[16]), CtL(c[17]), CtL(c[18]),
CtL(c[19]), CtL(c[20]), CtL(c[21]), CtL(c[22]), CtL(c[23]), CtL(c[24]));
// Precalculated default squared kernel weights
const vec3 w1 = vec3(0.5, 1.0, 1.41421356237); // 0.25, 1.0, 2.0
const vec3 w2 = vec3(0.86602540378, 1.0, 0.54772255751); // 0.75, 1.0, 0.3
// Transition to a concave kernel if the center edge val is above thr
vec3 dW = pow(mix( w1, w2, smoothstep( 0.3, 0.8, edge)), vec3(2.0));
float mdiff_c0 = 0.02 + 3.0*( abs(luma[0]-luma[2]) + abs(luma[0]-luma[4])
+ abs(luma[0]-luma[5]) + abs(luma[0]-luma[7])
+ 0.25*(abs(luma[0]-luma[1]) + abs(luma[0]-luma[3])
+abs(luma[0]-luma[6]) + abs(luma[0]-luma[8])) );
// Use lower weights for pixels in a more active area relative to center pixel area
// This results in narrower and less visible overshoots around sharp edges
float weights[12] = float[](( min((mdiff_c0/mdiff(24, 21, 2, 4, 9, 10, 1)), dW.y) ),
( dW.x ),
( min((mdiff_c0/mdiff(23, 18, 5, 2, 9, 11, 3)), dW.y) ),
( dW.x ),
( dW.x ),
( min((mdiff_c0/mdiff(4, 20, 15, 7, 10, 12, 6)), dW.y) ),
( dW.x ),
( min((mdiff_c0/mdiff(5, 7, 17, 14, 12, 11, 8)), dW.y) ),
( min((mdiff_c0/mdiff(2, 24, 23, 22, 1, 3, 9)), dW.z) ),
( min((mdiff_c0/mdiff(20, 19, 21, 4, 1, 6, 10)), dW.z) ),
( min((mdiff_c0/mdiff(17, 5, 18, 16, 3, 8, 11)), dW.z) ),
( min((mdiff_c0/mdiff(13, 15, 7, 14, 6, 8, 12)), dW.z) ));
weights[0] = (max(max((weights[8] + weights[9])/4.0, weights[0]), 0.25) + weights[0])/2.0;
weights[2] = (max(max((weights[8] + weights[10])/4.0, weights[2]), 0.25) + weights[2])/2.0;
weights[5] = (max(max((weights[9] + weights[11])/4.0, weights[5]), 0.25) + weights[5])/2.0;
weights[7] = (max(max((weights[10] + weights[11])/4.0, weights[7]), 0.25) + weights[7])/2.0;
// Calculate the negative part of the laplace kernel
float weightsum = 0.0;
float neg_laplace = 0.0;
for (int pix = 0; pix < 12; ++pix)
{
neg_laplace += luma[pix+1]*weights[pix];
weightsum += weights[pix];
}
neg_laplace = neg_laplace / weightsum;
// Compute sharpening magnitude function
float sharpen_val = (curve_height/(curve_height*curveslope*pow((edge), 3.5) + 0.625));
// Calculate sharpening diff and scale
float sharpdiff = (c0_Y - neg_laplace)*(sharpen_val + 0.01);
// Calculate local near min & max, partial sort
float temp;
for (int i1 = 0; i1 < 24; i1 += 2)
{
temp = luma[i1];
luma[i1] = min(luma[i1], luma[i1+1]);
luma[i1+1] = max(temp, luma[i1+1]);
}
for (int i2 = 24; i2 > 0; i2 -= 2)
{
temp = luma[0];
luma[0] = min(luma[0], luma[i2]);
luma[i2] = max(temp, luma[i2]);
temp = luma[24];
luma[24] = max(luma[24], luma[i2-1]);
luma[i2-1] = min(temp, luma[i2-1]);
}
for (int i1 = 1; i1 < 24-1; i1 += 2)
{
temp = luma[i1];
luma[i1] = min(luma[i1], luma[i1+1]);
luma[i1+1] = max(temp, luma[i1+1]);
}
for (int i2 = 24-1; i2 > 1; i2 -= 2)
{
temp = luma[1];
luma[1] = min(luma[1], luma[i2]);
luma[i2] = max(temp, luma[i2]);
temp = luma[24-1];
luma[24-1] = max(luma[24-1], luma[i2-1]);
luma[i2-1] = min(temp, luma[i2-1]);
}
float nmax = (max(luma[23], c0_Y)*3.0 + luma[24])/4.0;
float nmin = (min(luma[1], c0_Y)*3.0 + luma[0])/4.0;
// Calculate tanh scale factors
float min_dist = min(abs(nmax - c0_Y), abs(c0_Y - nmin));
float pos_scale = min_dist + min(L_overshoot, 1.0001 - min_dist - c0_Y);
float neg_scale = min_dist + min(D_overshoot, 0.0001 + c0_Y - min_dist);
pos_scale = min(pos_scale, scale_lim*(1.0 - scale_cs) + pos_scale*scale_cs);
neg_scale = min(neg_scale, scale_lim*(1.0 - scale_cs) + neg_scale*scale_cs);
// Soft limited anti-ringing with tanh, wpmean to control compression slope
sharpdiff = wpmean(max(sharpdiff, 0.0), soft_lim( max(sharpdiff, 0.0), pos_scale ), L_compr_low )
- wpmean(min(sharpdiff, 0.0), soft_lim( min(sharpdiff, 0.0), neg_scale ), D_compr_low );
return vec4(sharpdiff, c0_Y, 0, 1);
}
//!HOOK SCALED
//!BIND HOOKED
//!BIND ASSD
//!DESC adaptive-sharpen equalization
#define video_level_out false // True to preserve BTB & WTW (minor summation error)
// Normally it should be set to false
#define SD(x,y) ASSD_texOff(vec2(x,y)).r
vec4 hook() {
vec4 o = HOOKED_texOff(0);
float sharpdiff = SD( 0, 0) - 0.6 * 0.25 * (SD(-0.5,-0.5) + SD( 0.5,-0.5) + SD(-0.5, 0.5) + SD( 0.5, 0.5));
float c0_Y = ASSD_texOff(vec2(0)).g;
float sharpdiff_lim = clamp(c0_Y + sharpdiff, 0.0, 1.0) - c0_Y;
float satmul = (c0_Y + max(sharpdiff_lim*0.9, sharpdiff_lim)*1.03 + 0.03)/(c0_Y + 0.03);
vec3 res = c0_Y + (sharpdiff_lim*3 + sharpdiff)/4 + (clamp(o.rgb, 0.0, 1.0) - c0_Y)*satmul;
o.rgb = video_level_out == true ? res + o.rgb - clamp(o.rgb, 0.0, 1.0) : res;
return o;
}

41
shaders/filmgrain.glsl Normal file
View File

@@ -0,0 +1,41 @@
//!HOOK LUMA
//!BIND HOOKED
//!DESC gaussian film grain
#normal value is 0.05 changed for demo purposes
#define INTENSITY 0.55
float permute(float x)
{
x = (34.0 * x + 1.0) * x;
return fract(x * 1.0/289.0) * 289.0;
}
float rand(inout float state)
{
state = permute(state);
return fract(state * 1.0/41.0);
}
vec4 hook()
{
vec3 m = vec3(HOOKED_pos, random) + vec3(1.0);
float state = permute(permute(m.x) + m.y) + m.z;
const float a0 = 0.151015505647689;
const float a1 = -0.5303572634357367;
const float a2 = 1.365020122861334;
const float b0 = 0.132089632343748;
const float b1 = -0.7607324991323768;
float p = 0.95 * rand(state) + 0.025;
float q = p - 0.5;
float r = q * q;
float grain = q * (a2 + (a1 * r + a0) / (r*r + b1*r + b0));
grain *= 0.255121822830526; // normalize to [-1,1)
vec4 color = HOOKED_tex(HOOKED_pos);
color.rgb += vec3(INTENSITY * grain);
return color;
}

File diff suppressed because it is too large Load Diff

View File

@@ -44,19 +44,6 @@
#include <libavcodec/avcodec.h>
#include <libavutil/mem.h>
// support old ffmpeg versions <1.0
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,18,102)
#define AVCodecID CodecID
#define AV_CODEC_ID_AAC CODEC_ID_AAC
#define AV_CODEC_ID_AAC_LATM CODEC_ID_AAC_LATM
#define AV_CODEC_ID_AC3 CODEC_ID_AC3
#define AV_CODEC_ID_EAC3 CODEC_ID_EAC3
#define AV_CODEC_ID_H264 CODEC_ID_H264
#define AV_CODEC_ID_MP2 CODEC_ID_MP2
#define AV_CODEC_ID_MPEG2VIDEO CODEC_ID_MPEG2VIDEO
#define AV_CODEC_ID_NONE CODEC_ID_NONE
#define AV_CODEC_ID_PCM_DVD CODEC_ID_PCM_DVD
#endif
#ifndef __USE_GNU
#define __USE_GNU
@@ -107,6 +94,7 @@ static VideoStream *AudioSyncStream; ///< video stream for audio/video sync
#define AUDIO_MIN_BUFFER_FREE (3072 * 8 * 8)
#define AUDIO_BUFFER_SIZE (512 * 1024) ///< audio PES buffer default size
static AVPacket AudioAvPkt[1]; ///< audio a/v packet
int AudioDelay = 0;
//////////////////////////////////////////////////////////////////////////////
// Audio codec parser
@@ -244,10 +232,8 @@ static int MpegCheck(const uint8_t * data, int size)
break;
}
if (0) {
Debug(3,
"pesdemux: mpeg%s layer%d bitrate=%d samplerate=%d %d bytes\n",
mpeg25 ? "2.5" : mpeg2 ? "2" : "1", layer, bit_rate, sample_rate,
frame_size);
Debug(3, "pesdemux: mpeg%s layer%d bitrate=%d samplerate=%d %d bytes\n", mpeg25 ? "2.5" : mpeg2 ? "2" : "1",
layer, bit_rate, sample_rate, frame_size);
}
if (frame_size + 4 > size) {
@@ -592,8 +578,7 @@ static void PesInit(PesDemux * pesdx)
/// @param size number of payload data bytes
/// @param is_start flag, start of pes packet
///
static void PesParse(PesDemux * pesdx, const uint8_t * data, int size,
int is_start)
static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_start)
{
const uint8_t *p;
const uint8_t *q;
@@ -660,8 +645,8 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size,
q = pesdx->Buffer + pesdx->Skip;
n = pesdx->Index - pesdx->Skip;
while (n >= 5) {
int r;
unsigned codec_id;
int r = 0;
unsigned codec_id = AV_CODEC_ID_NONE;
// 4 bytes 0xFFExxxxx Mpeg audio
// 5 bytes 0x0B77xxxxxx AC-3 audio
@@ -670,8 +655,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size,
// 7/9 bytes 0xFFFxxxxxxxxxxx ADTS audio
// PCM audio can't be found
// FIXME: simple+faster detection, if codec already known
r = 0;
if (!r && FastMpegCheck(q)) {
if (FastMpegCheck(q)) {
r = MpegCheck(q, n);
codec_id = AV_CODEC_ID_MP2;
}
@@ -698,8 +682,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size,
// new codec id, close and open new
if (AudioCodecID != codec_id) {
Debug(3, "pesdemux: new codec %#06x -> %#06x\n",
AudioCodecID, codec_id);
Debug(3, "pesdemux: new codec %#06x -> %#06x\n", AudioCodecID, codec_id);
CodecAudioClose(MyAudioDecoder);
CodecAudioOpen(MyAudioDecoder, codec_id);
AudioCodecID = codec_id;
@@ -721,8 +704,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size,
if (AudioCodecID != AV_CODEC_ID_NONE) {
// shouldn't happen after we have a vaild codec
// detected
Debug(4, "pesdemux: skip @%d %02x\n", pesdx->Skip,
q[0]);
Debug(4, "pesdemux: skip @%d %02x\n", pesdx->Skip, q[0]);
}
// try next byte
++pesdx->Skip;
@@ -795,21 +777,20 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size,
if ((pesdx->Header[7] & 0xC0) == 0x80) {
pts =
(int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 |
(data[11] & 0xFE) << 14 | data[12] << 7 | (data[13]
(int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7
| (data[13]
& 0xFE) >> 1;
pesdx->PTS = pts;
pesdx->DTS = AV_NOPTS_VALUE;
} else if ((pesdx->Header[7] & 0xC0) == 0xC0) {
pts =
(int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 |
(data[11] & 0xFE) << 14 | data[12] << 7 | (data[13]
(int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7
| (data[13]
& 0xFE) >> 1;
pesdx->PTS = pts;
dts =
(int64_t) (data[14] & 0x0E) << 29 | data[15] << 22
| (data[16] & 0xFE) << 14 | data[17] << 7 |
(data[18] & 0xFE) >> 1;
(int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | data[17] <<
7 | (data[18] & 0xFE) >> 1;
pesdx->DTS = dts;
Debug(4, "pesdemux: pts %#012" PRIx64 " %#012" PRIx64 "\n", pts, dts);
}
@@ -844,31 +825,24 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size,
if (AudioCodecID != AV_CODEC_ID_PCM_DVD) {
q = pesdx->Header;
Debug(3, "pesdemux: LPCM %d sr:%d bits:%d chan:%d\n",
q[0], q[5] >> 4, (((q[5] >> 6) & 0x3) + 4) * 4,
(q[5] & 0x7) + 1);
Debug(3, "pesdemux: LPCM %d sr:%d bits:%d chan:%d\n", q[0], q[5] >> 4,
(((q[5] >> 6) & 0x3) + 4) * 4, (q[5] & 0x7) + 1);
CodecAudioClose(MyAudioDecoder);
bits_per_sample = (((q[5] >> 6) & 0x3) + 4) * 4;
if (bits_per_sample != 16) {
Error(_
("softhddev: LPCM %d bits per sample aren't supported\n"),
bits_per_sample);
Error(_("softhddev: LPCM %d bits per sample aren't supported\n"), bits_per_sample);
// FIXME: handle unsupported formats.
}
samplerate = samplerates[q[5] >> 4];
channels = (q[5] & 0x7) + 1;
AudioSetup(&samplerate, &channels, 0);
if (samplerate != samplerates[q[5] >> 4]) {
Error(_
("softhddev: LPCM %d sample-rate is unsupported\n"),
samplerates[q[5] >> 4]);
Error(_("softhddev: LPCM %d sample-rate is unsupported\n"), samplerates[q[5] >> 4]);
// FIXME: support resample
}
if (channels != (q[5] & 0x7) + 1) {
Error(_
("softhddev: LPCM %d channels are unsupported\n"),
(q[5] & 0x7) + 1);
Error(_("softhddev: LPCM %d channels are unsupported\n"), (q[5] & 0x7) + 1);
// FIXME: support resample
}
//CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD);
@@ -962,8 +936,7 @@ static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size)
}
#ifdef DEBUG
pid = (p[1] & 0x1F) << 8 | p[2];
Debug(4, "tsdemux: PID: %#04x%s%s\n", pid, p[1] & 0x40 ? " start" : "",
p[3] & 0x10 ? " payload" : "");
Debug(4, "tsdemux: PID: %#04x%s%s\n", pid, p[1] & 0x40 ? " start" : "", p[3] & 0x10 ? " payload" : "");
#endif
// skip adaptation field
switch (p[3] & 0x30) { // adaption field
@@ -1027,6 +1000,12 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id)
if (StreamFreezed) { // stream freezed
return 0;
}
if (AudioDelay) {
Debug(3, "AudioDelay %dms\n", AudioDelay);
usleep(AudioDelay / 90);
AudioDelay = 0;
return 0;
}
if (NewAudioStream) {
// this clears the audio ringbuffer indirect, open and setup does it
CodecAudioClose(MyAudioDecoder);
@@ -1042,8 +1021,7 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id)
}
#ifdef USE_SOFTLIMIT
// soft limit buffer full
if (AudioSyncStream && VideoGetBuffers(AudioSyncStream) > 3
&& AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) {
if (AudioSyncStream && VideoGetBuffers(AudioSyncStream) > 3 && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) {
return 0;
}
#endif
@@ -1068,14 +1046,13 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id)
if (data[7] & 0x80 && n >= 5) {
AudioAvPkt->pts =
(int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] &
0xFE) << 14 | data[12] << 7 | (data[13] & 0xFE) >> 1;
(int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] &
0xFE) >> 1;
// Debug(3, "audio: pts %#012" PRIx64 "\n", AudioAvPkt->pts);
}
if (0) { // dts is unused
if (data[7] & 0x40) {
AudioAvPkt->dts =
(int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16]
AudioAvPkt->dts = (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16]
& 0xFE) << 14 | data[17] << 7 | (data[18] & 0xFE) >> 1;
Debug(3, "audio: dts %#012" PRIx64 "\n", AudioAvPkt->dts);
}
@@ -1105,16 +1082,13 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id)
int channels;
int bits_per_sample;
Debug(3, "[softhddev]%s: LPCM %d sr:%d bits:%d chan:%d\n",
__FUNCTION__, id, p[5] >> 4, (((p[5] >> 6) & 0x3) + 4) * 4,
(p[5] & 0x7) + 1);
Debug(3, "[softhddev]%s: LPCM %d sr:%d bits:%d chan:%d\n", __FUNCTION__, id, p[5] >> 4,
(((p[5] >> 6) & 0x3) + 4) * 4, (p[5] & 0x7) + 1);
CodecAudioClose(MyAudioDecoder);
bits_per_sample = (((p[5] >> 6) & 0x3) + 4) * 4;
if (bits_per_sample != 16) {
Error(_
("[softhddev] LPCM %d bits per sample aren't supported\n"),
bits_per_sample);
Error(_("[softhddev] LPCM %d bits per sample aren't supported\n"), bits_per_sample);
// FIXME: handle unsupported formats.
}
samplerate = samplerates[p[5] >> 4];
@@ -1124,13 +1098,11 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id)
AudioSetBufferTime(400);
AudioSetup(&samplerate, &channels, 0);
if (samplerate != samplerates[p[5] >> 4]) {
Error(_("[softhddev] LPCM %d sample-rate is unsupported\n"),
samplerates[p[5] >> 4]);
Error(_("[softhddev] LPCM %d sample-rate is unsupported\n"), samplerates[p[5] >> 4]);
// FIXME: support resample
}
if (channels != (p[5] & 0x7) + 1) {
Error(_("[softhddev] LPCM %d channels are unsupported\n"),
(p[5] & 0x7) + 1);
Error(_("[softhddev] LPCM %d channels are unsupported\n"), (p[5] & 0x7) + 1);
// FIXME: support resample
}
//CodecAudioOpen(MyAudioDecoder, AV_CODEC_ID_PCM_DVD);
@@ -1257,6 +1229,7 @@ int PlayTsAudio(const uint8_t * data, int size)
if (StreamFreezed) { // stream freezed
return 0;
}
if (NewAudioStream) {
// this clears the audio ringbuffer indirect, open and setup does it
CodecAudioClose(MyAudioDecoder);
@@ -1274,12 +1247,17 @@ int PlayTsAudio(const uint8_t * data, int size)
}
#ifdef USE_SOFTLIMIT
// soft limit buffer full
if (AudioSyncStream && VideoGetBuffers(AudioSyncStream) > 3
&& AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) {
if (AudioSyncStream && VideoGetBuffers(AudioSyncStream) > 3 && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) {
return 0;
}
#endif
if (AudioDelay) {
Debug(3, "AudioDelay %dms\n", AudioDelay);
usleep(AudioDelay * 1000);
AudioDelay = 0;
// TsDemuxer(tsdx, data, size); // insert dummy audio
}
return TsDemuxer(tsdx, data, size);
}
@@ -1348,6 +1326,7 @@ static VideoStream MyVideoStream[1]; ///< normal video stream
#ifdef USE_PIP
static VideoStream PipVideoStream[1]; ///< pip video stream
static int PiPActive = 0, mwx, mwy, mww, mwh; ///< main window frame for PiP
#endif
#ifdef DEBUG
@@ -1399,7 +1378,7 @@ static void VideoPacketExit(VideoStream * stream)
atomic_set(&stream->PacketsFilled, 0);
for (i = 0; i < VIDEO_PACKET_MAX; ++i) {
av_free_packet(&stream->PacketRb[i]);
av_packet_unref(&stream->PacketRb[i]);
}
}
@@ -1411,8 +1390,7 @@ static void VideoPacketExit(VideoStream * stream)
** @param data data of pes packet
** @param size size of pes packet
*/
static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const void *data,
int size)
static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const void *data, int size)
{
AVPacket *avpkt;
@@ -1512,7 +1490,7 @@ static void VideoNextPacket(VideoStream * stream, int codec_id)
VideoResetPacket(stream);
}
#ifdef USE_PIP
#if defined(USE_PIP) || defined(VAAPI)
/**
** Place mpeg video data in packet ringbuffer.
@@ -1528,8 +1506,7 @@ static void VideoNextPacket(VideoStream * stream, int codec_id)
** @param data data of pes packet
** @param size size of pes packet
*/
static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts,
const uint8_t * data, int size)
static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const uint8_t * data, int size)
{
static const char startcode[3] = { 0x00, 0x00, 0x01 };
const uint8_t *p;
@@ -1687,6 +1664,8 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts,
**
** @param avpkt ffmpeg a/v packet
*/
#ifndef USE_PIP
static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt)
{
uint8_t *p;
@@ -1723,8 +1702,8 @@ static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt)
tmp->size = p - tmp->data;
#if STILL_DEBUG>1
if (InStillPicture) {
fprintf(stderr, "\nfix:%9d,%02x %02x %02x %02x\n", tmp->size,
tmp->data[0], tmp->data[1], tmp->data[2], tmp->data[3]);
fprintf(stderr, "\nfix:%9d,%02x %02x %02x %02x\n", tmp->size, tmp->data[0], tmp->data[1], tmp->data[2],
tmp->data[3]);
}
#endif
CodecVideoDecode(vdecoder, tmp);
@@ -1740,14 +1719,13 @@ static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt)
#if STILL_DEBUG>1
if (InStillPicture) {
fprintf(stderr, "\nfix:%9d.%02x %02x %02x %02x\n", tmp->size,
tmp->data[0], tmp->data[1], tmp->data[2], tmp->data[3]);
fprintf(stderr, "\nfix:%9d.%02x %02x %02x %02x\n", tmp->size, tmp->data[0], tmp->data[1], tmp->data[2],
tmp->data[3]);
}
#endif
CodecVideoDecode(vdecoder, tmp);
}
#endif
/**
** Open video stream.
@@ -1780,6 +1758,7 @@ static void VideoStreamClose(VideoStream * stream, int delhw)
stream->SkipStream = 1;
if (stream->Decoder) {
VideoDecoder *decoder;
Debug(3, "VideoStreamClose");
decoder = stream->Decoder;
// FIXME: remove this lock for main stream close
@@ -1853,7 +1832,7 @@ int VideoPollInput(VideoStream * stream)
** @retval 1 stream paused
** @retval -1 empty stream
*/
int VideoDecodeInput(VideoStream * stream)
int VideoDecodeInput(VideoStream * stream, int trick)
{
int filled;
AVPacket *avpkt;
@@ -1871,6 +1850,9 @@ int VideoDecodeInput(VideoStream * stream)
stream->Close = 0;
return 1;
}
if (stream->ClearBuffers && trick)
stream->ClearBuffers = 0;
if (stream->ClearBuffers) { // clear buffer request
atomic_set(&stream->PacketsFilled, 0);
stream->PacketRead = stream->PacketWrite;
@@ -1895,20 +1877,18 @@ int VideoDecodeInput(VideoStream * stream)
}
#if 0
// clearing for normal channel switch has no advantage
if (stream->ClearClose /*|| stream->ClosingStream */ ) {
if (stream->ClearClose || stream->ClosingStream) {
int f;
// FIXME: during replay all packets are always checked
// flush buffers, if close is in the queue
for (f = 0; f < filled; ++f) {
if (stream->CodecIDRb[(stream->PacketRead + f) % VIDEO_PACKET_MAX]
== AV_CODEC_ID_NONE) {
if (stream->CodecIDRb[(stream->PacketRead + f) % VIDEO_PACKET_MAX] == AV_CODEC_ID_NONE) {
if (f) {
Debug(3, "video: cleared upto close\n");
atomic_sub(f, &stream->PacketsFilled);
stream->PacketRead =
(stream->PacketRead + f) % VIDEO_PACKET_MAX;
stream->PacketRead = (stream->PacketRead + f) % VIDEO_PACKET_MAX;
stream->ClearClose = 0;
}
break;
@@ -1963,7 +1943,7 @@ int VideoDecodeInput(VideoStream * stream)
avpkt->size = avpkt->stream_index;
avpkt->stream_index = 0;
#ifdef USE_PIP
#if defined(USE_PIP) || defined(VAAPI)
// fprintf(stderr, "[");
// DumpMpeg(avpkt->data, avpkt->size);
#ifdef STILL_DEBUG
@@ -2127,8 +2107,7 @@ static int ValidateMpeg(const uint8_t * data, int size)
return -1;
}
if (data[0] || data[1] || data[2] != 0x01) {
printf("%02x: %02x %02x %02x %02x %02x\n", data[-1], data[0],
data[1], data[2], data[3], data[4]);
printf("%02x: %02x %02x %02x %02x %02x\n", data[-1], data[0], data[1], data[2], data[3], data[4]);
return -1;
}
@@ -2166,6 +2145,7 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
int n;
int z;
int l;
if (!stream->Decoder) { // no x11 video started
return size;
}
@@ -2196,8 +2176,7 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
}
if (stream->InvalidPesCounter) {
if (stream->InvalidPesCounter > 1) {
Error(_("[softhddev] %d invalid PES video packet(s)\n"),
stream->InvalidPesCounter);
Error(_("[softhddev] %d invalid PES video packet(s)\n"), stream->InvalidPesCounter);
}
stream->InvalidPesCounter = 0;
}
@@ -2217,12 +2196,13 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
}
// hard limit buffer full: needed for replay
if (atomic_read(&stream->PacketsFilled) >= VIDEO_PACKET_MAX - 10) {
Debug(3, "video: video buffer full\n");
// Debug(3, "video: video buffer full\n");
return 0;
}
#ifdef USE_SOFTLIMIT
// soft limit buffer full
if (AudioSyncStream == stream && atomic_read(&stream->PacketsFilled) > 3 && AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) {
if (AudioSyncStream == stream && atomic_read(&stream->PacketsFilled) > 3
&& AudioUsedBytes() > AUDIO_MIN_BUFFER_FREE * 2) {
return 0;
}
#endif
@@ -2230,14 +2210,17 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
pts = AV_NOPTS_VALUE;
dts = AV_NOPTS_VALUE;
if ((data[7] & 0xc0) == 0x80) {
pts = (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] &
0xFE) << 14 | data[12] << 7 | (data[13] & 0xFE) >> 1;
pts =
(int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] &
0xFE) >> 1;
}
if ((data[7] & 0xC0) == 0xc0) {
pts = (int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] &
0xFE) << 14 | data[12] << 7 | (data[13] & 0xFE) >> 1;
dts = (int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] &
0xFE) << 14 | data[17] << 7 | (data[18] & 0xFE) >> 1;
pts =
(int64_t) (data[9] & 0x0E) << 29 | data[10] << 22 | (data[11] & 0xFE) << 14 | data[12] << 7 | (data[13] &
0xFE) >> 1;
dts =
(int64_t) (data[14] & 0x0E) << 29 | data[15] << 22 | (data[16] & 0xFE) << 14 | data[17] << 7 | (data[18] &
0xFE) >> 1;
}
check = data + 9 + n;
@@ -2265,11 +2248,8 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
int fd;
static int FrameCounter;
snprintf(buf, sizeof(buf), "frame_%06d_%08d.raw", getpid(),
FrameCounter++);
if ((fd =
open(buf, O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC,
0666)) >= 0) {
snprintf(buf, sizeof(buf), "frame_%06d_%08d.raw", getpid(), FrameCounter++);
if ((fd = open(buf, O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC, 0666)) >= 0) {
if (write(fd, data + 9 + n, size - 9 - n)) {
// this construct is to remove the annoying warning
}
@@ -2325,7 +2305,7 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
}
// SKIP PES header, begin of start code
#ifdef USE_PIP
#if defined(USE_PIP) || defined(VAAPI)
VideoMpegEnqueue(stream, pts, dts, check - 2, l + 2);
#else
VideoEnqueue(stream, pts, dts, check - 2, l + 2);
@@ -2338,7 +2318,7 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
return size;
}
#ifdef USE_PIP
#if defined(USE_PIP) || defined(VAAPI)
if (stream->CodecID == AV_CODEC_ID_MPEG2VIDEO) {
// SKIP PES header
VideoMpegEnqueue(stream, pts, dts, data + 9 + n, size - 9 - n);
@@ -2412,8 +2392,7 @@ extern uint8_t *CreateJpeg(uint8_t *, int *, int, int, int);
**
** @returns allocated jpeg image.
*/
uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality,
int width, int height)
uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, int width, int height)
{
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
@@ -2568,8 +2547,7 @@ void GetVideoSize(int *width, int *height, double *aspect)
int aspect_den;
if (MyVideoStream->HwDecoder) {
VideoGetVideoSize(MyVideoStream->HwDecoder, width, height, &aspect_num,
&aspect_den);
VideoGetVideoSize(MyVideoStream->HwDecoder, width, height, &aspect_num, &aspect_den);
*aspect = (double)aspect_num / (double)aspect_den;
} else {
*width = 0;
@@ -2579,8 +2557,7 @@ void GetVideoSize(int *width, int *height, double *aspect)
#ifdef DEBUG
if (done_width != *width || done_height != *height) {
Debug(3, "[softhddev]%s: %dx%d %g\n", __FUNCTION__, *width, *height,
*aspect);
Debug(3, "[softhddev]%s: %dx%d %g\n", __FUNCTION__, *width, *height, *aspect);
done_width = *width;
done_height = *height;
}
@@ -2628,8 +2605,7 @@ void Clear(void)
for (i = 0; MyVideoStream->ClearBuffers && i < 20; ++i) {
usleep(1 * 100);
}
Debug(3, "[softhddev]%s: %dms buffers %d\n", __FUNCTION__, i,
VideoGetBuffers(MyVideoStream));
Debug(3, "[softhddev]%s: %dms buffers %d\n", __FUNCTION__, i, VideoGetBuffers(MyVideoStream));
}
/**
@@ -2676,7 +2652,6 @@ void StillPicture(const uint8_t * data, int size)
// H265 NAL End of Sequence
static uint8_t seq_end_h265[] = { 0x00, 0x00, 0x00, 0x01, 0x48, 0x01 }; //0x48 = end of seq 0x4a = end of stream
int i;
int old_video_hardware_decoder;
// might be called in Suspended Mode
if (!MyVideoStream->Decoder || MyVideoStream->SkipStream) {
@@ -2695,7 +2670,6 @@ void StillPicture(const uint8_t * data, int size)
VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream
if (MyVideoStream->CodecID == AV_CODEC_ID_NONE) {
// FIXME: should detect codec, see PlayVideo
Error(_("[softhddev] no codec known for still picture\n"));
@@ -2705,12 +2679,14 @@ void StillPicture(const uint8_t * data, int size)
#ifdef STILL_DEBUG
fprintf(stderr, "still-picture\n");
#endif
for (i = 0; i < (MyVideoStream->CodecID == AV_CODEC_ID_HEVC ? 8 : 8); ++i) {
for (i = 0; i < (MyVideoStream->CodecID == AV_CODEC_ID_HEVC ? 12 : 12); ++i) {
const uint8_t *split;
int n;
// FIXME: vdr pes recordings sends mixed audio/video
if ((data[3] & 0xF0) == 0xE0) { // PES packet
split = data;
n = size;
// split the I-frame into single pes packets
@@ -2744,7 +2720,7 @@ void StillPicture(const uint8_t * data, int size)
VideoNextPacket(MyVideoStream, MyVideoStream->CodecID); // terminate last packet
} else { // ES packet
if (MyVideoStream->CodecID != AV_CODEC_ID_MPEG2VIDEO) {
if (0 && MyVideoStream->CodecID != AV_CODEC_ID_MPEG2VIDEO) {
VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream
MyVideoStream->CodecID = AV_CODEC_ID_MPEG2VIDEO;
}
@@ -2802,8 +2778,7 @@ int Poll(int timeout)
filled = atomic_read(&MyVideoStream->PacketsFilled);
// soft limit + hard limit
full = (used > AUDIO_MIN_BUFFER_FREE && filled > 3)
|| AudioFreeBytes() < AUDIO_MIN_BUFFER_FREE
|| filled >= VIDEO_PACKET_MAX - 10;
|| AudioFreeBytes() < AUDIO_MIN_BUFFER_FREE || filled >= VIDEO_PACKET_MAX - 10;
if (!full || !timeout) {
return !full;
@@ -2857,8 +2832,7 @@ void GetOsdSize(int *width, int *height, double *aspect)
#ifdef DEBUG
if (done_width != *width || done_height != *height) {
Debug(3, "[softhddev]%s: %dx%d %g\n", __FUNCTION__, *width, *height,
*aspect);
Debug(3, "[softhddev]%s: %dx%d %g\n", __FUNCTION__, *width, *height, *aspect);
done_width = *width;
done_height = *height;
}
@@ -2885,8 +2859,7 @@ void OsdClose(void)
** @param x x-coordinate on screen of argb image
** @param y y-coordinate on screen of argb image
*/
void OsdDrawARGB(int xi, int yi, int height, int width, int pitch,
const uint8_t * argb, int x, int y)
void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, const uint8_t * argb, int x, int y)
{
// wakeup display for showing remote learning dialog
VideoDisplayWakeup();
@@ -2902,15 +2875,14 @@ const char *CommandLineHelp(void)
{
return " -a device\taudio device (fe. alsa: hw:0,0 oss: /dev/dsp)\n"
" -p device\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n"
" -c channel\taudio mixer channel name (fe. PCM)\n"
" -d display\tdisplay of x11 server (fe. :0.0)\n"
" -c channel\taudio mixer channel name (fe. PCM)\n" " -d display\tdisplay of x11 server (fe. :0.0)\n"
" -f\t\tstart with fullscreen window (only with window manager)\n"
" -g geometry\tx11 window geometry wxh+x+y\n"
" -v device\tvideo driver device (va-api, vdpau, noop)\n"
" -s\t\tstart in suspended mode\n"
" -g geometry\tx11 window geometry wxh+x+y\n" " -r Refresh\tRefreshrate for DRM (default is 50 Hz)\n"
" -C Connector\tConnector for DRM (default is current Connector)\n"
" -S shader\tShader to use.\n\t\tOnly with placebo. Can be repeated for more shaders\n"
" -v device\tvideo driver device (cuvid)\n" " -s\t\tstart in suspended mode\n"
" -x\t\tstart x11 server, with -xx try to connect, if this fails\n"
" -X args\tX11 server arguments (f.e. -nocursor)\n"
" -w workaround\tenable/disable workarounds\n"
" -X args\tX11 server arguments (f.e. -nocursor)\n" " -w workaround\tenable/disable workarounds\n"
"\tno-hw-decoder\t\tdisable hw decoder, use software decoder only\n"
"\tno-mpeg-hw-decoder\tdisable hw decoder for mpeg only\n"
"\tstill-hw-decoder\tenable hardware decoder for still-pictures\n"
@@ -2919,8 +2891,7 @@ const char *CommandLineHelp(void)
"\talsa-no-close-open\tdisable close open to fix alsa no sound bug\n"
"\talsa-close-open-delay\tenable close open delay to fix no sound bug\n"
"\tignore-repeat-pict\tdisable repeat pict message\n"
"\tuse-possible-defect-frames prefer faster channel switch\n"
" -D\t\tstart in detached mode\n";
"\tuse-possible-defect-frames prefer faster channel switch\n" " -D\t\tstart in detached mode\n";
}
/**
@@ -2942,13 +2913,25 @@ int ProcessArgs(int argc, char *const argv[])
#endif
for (;;) {
switch (getopt(argc, argv, "-a:c:d:fg:p:sv:w:xDX:")) {
switch (getopt(argc, argv, "-a:c:C:r:d:fg:p:S:sv:w:xDX:")) {
case 'a': // audio device for pcm
AudioSetDevice(optarg);
continue;
case 'c': // channel of audio mixer
AudioSetChannel(optarg);
continue;
case 'C': // Connector for DRM
VideoSetConnector(optarg);
continue;
case 'r': // Connector for DRM
VideoSetRefresh(optarg);
continue;
case 'S': // Shader
if (VideoSetShader(optarg) < 0) {
fprintf(stderr,_("Too much shaders definded\n"));
return 0;
}
continue;
case 'p': // pass-through audio device
AudioSetPassthroughDevice(optarg);
continue;
@@ -2961,8 +2944,7 @@ int ProcessArgs(int argc, char *const argv[])
case 'g': // geometry
if (VideoSetGeometry(optarg) < 0) {
fprintf(stderr,
_
("Bad formated geometry please use: [=][<width>{xX}<height>][{+-}<xoffset>{+-}<yoffset>]\n"));
_("Bad formated geometry please use: [=][<width>{xX}<height>][{+-}<xoffset>{+-}<yoffset>]\n"));
return 0;
}
continue;
@@ -3004,8 +2986,7 @@ int ProcessArgs(int argc, char *const argv[])
} else if (!strcasecmp("use-possible-defect-frames", optarg)) {
CodecUsePossibleDefectFrames = 1;
} else {
fprintf(stderr, _("Workaround '%s' unsupported\n"),
optarg);
fprintf(stderr, _("Workaround '%s' unsupported\n"), optarg);
return 0;
}
continue;
@@ -3015,8 +2996,7 @@ int ProcessArgs(int argc, char *const argv[])
fprintf(stderr, _("We need no long options\n"));
return 0;
case ':':
fprintf(stderr, _("Missing argument for option '%c'\n"),
optopt);
fprintf(stderr, _("Missing argument for option '%c'\n"), optopt);
return 0;
default:
fprintf(stderr, _("Unknown option '%c'\n"), optopt);
@@ -3116,8 +3096,7 @@ static void StartXServer(void)
usr1.sa_handler = Usr1Handler;
sigaction(SIGUSR1, &usr1, NULL);
Debug(3, "x-setup: Starting X server '%s' '%s'\n", args[0],
X11ServerArguments);
Debug(3, "x-setup: Starting X server '%s' '%s'\n", args[0], X11ServerArguments);
// fork
if ((pid = fork())) { // parent
@@ -3158,7 +3137,7 @@ void SoftHdDeviceExit(void)
MyAudioDecoder = NULL;
}
NewAudioStream = 0;
av_free_packet(AudioAvPkt);
av_packet_unref(AudioAvPkt);
StopVideo();
@@ -3189,12 +3168,10 @@ void SoftHdDeviceExit(void)
kill(X11ServerPid, SIGKILL);
} while (waittime < timeout);
if (wpid && WIFEXITED(status)) {
Debug(3, "x-setup: x11 server exited (%d)\n",
WEXITSTATUS(status));
Debug(3, "x-setup: x11 server exited (%d)\n", WEXITSTATUS(status));
}
if (wpid && WIFSIGNALED(status)) {
Debug(3, "x-setup: x11 server killed (%d)\n",
WTERMSIG(status));
Debug(3, "x-setup: x11 server killed (%d)\n", WTERMSIG(status));
}
}
}
@@ -3247,8 +3224,7 @@ int Start(void)
PesInit(PesDemuxAudio);
#endif
Info(_("[softhddev] ready%s\n"),
ConfigStartSuspended ? ConfigStartSuspended ==
-1 ? " detached" : " suspended" : "");
ConfigStartSuspended ? ConfigStartSuspended == -1 ? " detached" : " suspended" : "");
return ConfigStartSuspended;
}
@@ -3281,12 +3257,10 @@ void Housekeeping(void)
wpid = waitpid(X11ServerPid, &status, WNOHANG);
if (wpid) {
if (WIFEXITED(status)) {
Debug(3, "x-setup: x11 server exited (%d)\n",
WEXITSTATUS(status));
Debug(3, "x-setup: x11 server exited (%d)\n", WEXITSTATUS(status));
}
if (WIFSIGNALED(status)) {
Debug(3, "x-setup: x11 server killed (%d)\n",
WTERMSIG(status));
Debug(3, "x-setup: x11 server killed (%d)\n", WTERMSIG(status));
}
X11ServerPid = 0;
// video not running
@@ -3352,7 +3326,7 @@ void Suspend(int video, int audio, int dox11)
MyAudioDecoder = NULL;
}
NewAudioStream = 0;
av_free_packet(AudioAvPkt);
av_packet_unref(AudioAvPkt);
}
if (video) {
StopVideo();
@@ -3407,15 +3381,21 @@ void Resume(void)
** @param[out] dropped dropped frames
** @param[out] count number of decoded frames
*/
void GetStats(int *missed, int *duped, int *dropped, int *counter, float *frametime)
void GetStats(int *missed, int *duped, int *dropped, int *counter, float *frametime, int *width, int *height,
int *color, int *eotf)
{
*missed = 0;
*duped = 0;
*dropped = 0;
*counter = 0;
*frametime = 0.0f;
*width = 0;
*height = 0;
*color = NULL;
*eotf = NULL;
if (MyVideoStream->HwDecoder) {
VideoGetStats(MyVideoStream->HwDecoder, missed, duped, dropped, counter, frametime);
VideoGetStats(MyVideoStream->HwDecoder, missed, duped, dropped, counter, frametime, width, height, color,
eotf);
}
}
@@ -3429,6 +3409,12 @@ void GetStats(int *missed, int *duped, int *dropped, int *counter, float *framet
*/
void ScaleVideo(int x, int y, int width, int height)
{
#ifdef USE_PIP
if (PiPActive && !(x & y & width & height)) {
Info("[softhddev]%s: fullscreen with PiP active.\n", __FUNCTION__);
x = mwx; y = mwy; width = mww; height = mwh;
}
#endif
if (MyVideoStream->HwDecoder) {
VideoSetOutputPosition(MyVideoStream->HwDecoder, x, y, width, height);
}
@@ -3452,8 +3438,7 @@ void ScaleVideo(int x, int y, int width, int height)
** @param pip_width pip window width OSD relative
** @param pip_height pip window height OSD relative
*/
void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y,
int pip_width, int pip_height)
void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height)
{
if (!MyVideoStream->HwDecoder) { // video not running
return;
@@ -3463,8 +3448,7 @@ void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y,
if (!PipVideoStream->HwDecoder) { // pip not running
return;
}
VideoSetOutputPosition(PipVideoStream->HwDecoder, pip_x, pip_y, pip_width,
pip_height);
VideoSetOutputPosition(PipVideoStream->HwDecoder, pip_x, pip_y, pip_width, pip_height);
}
/**
@@ -3479,8 +3463,7 @@ void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y,
** @param pip_width pip window width OSD relative
** @param pip_height pip window height OSD relative
*/
void PipStart(int x, int y, int width, int height, int pip_x, int pip_y,
int pip_width, int pip_height)
void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height)
{
if (!MyVideoStream->HwDecoder) { // video not running
return;
@@ -3490,6 +3473,8 @@ void PipStart(int x, int y, int width, int height, int pip_x, int pip_y,
VideoStreamOpen(PipVideoStream);
}
PipSetPosition(x, y, width, height, pip_x, pip_y, pip_width, pip_height);
mwx = x; mwy = y; mww = width; mwh = height;
PiPActive = 1;
}
/**
@@ -3503,6 +3488,8 @@ void PipStop(void)
return;
}
PiPActive = 0;
mwx = 0; mwy = 0; mww = 0; mwh = 0;
ScaleVideo(0, 0, 0, 0);
PipVideoStream->Close = 1;

View File

@@ -25,8 +25,7 @@ extern "C"
{
#endif
/// C callback feed key press
extern void FeedKeyPress(const char *, const char *, int, int,
const char *);
extern void FeedKeyPress(const char *, const char *, int, int, const char *);
/// C plugin get osd size and ascpect
extern void GetOsdSize(int *, int *, double *);
@@ -34,8 +33,7 @@ extern "C"
/// C plugin close osd
extern void OsdClose(void);
/// C plugin draw osd pixmap
extern void OsdDrawARGB(int, int, int, int, int, const uint8_t *, int,
int);
extern void OsdDrawARGB(int, int, int, int, int, const uint8_t *, int, int);
/// C plugin play audio packet
extern int PlayAudio(const uint8_t *, int, uint8_t);
@@ -98,7 +96,7 @@ extern "C"
extern void Resume(void);
/// Get decoder statistics
extern void GetStats(int *, int *, int *, int *, float *);
extern void GetStats(int *, int *, int *, int *, float *, int *, int *, int *, int *);
/// C plugin scale video
extern void ScaleVideo(int, int, int, int);

4483
video.c

File diff suppressed because it is too large Load Diff

37
video.h
View File

@@ -44,7 +44,7 @@ extern signed char VideoHardwareDecoder; ///< flag use hardware decoder
extern char VideoIgnoreRepeatPict; ///< disable repeat pict warning
extern int VideoAudioDelay; ///< audio/video delay
extern char ConfigStartX11Server; ///< flag start the x11 server
extern char MyConfigDir[];
//----------------------------------------------------------------------------
// Prototypes
//----------------------------------------------------------------------------
@@ -55,7 +55,6 @@ extern VideoHwDecoder *VideoNewHwDecoder(VideoStream *);
/// Deallocate video hardware decoder.
extern void VideoDelHwDecoder(VideoHwDecoder *);
#ifdef LIBAVCODEC_VERSION
/// Get and allocate a video hardware surface.
extern unsigned VideoGetSurface(VideoHwDecoder *, const AVCodecContext *);
@@ -63,21 +62,17 @@ extern unsigned VideoGetSurface(VideoHwDecoder *, const AVCodecContext *);
extern void VideoReleaseSurface(VideoHwDecoder *, unsigned);
/// Callback to negotiate the PixelFormat.
extern enum AVPixelFormat Video_get_format(VideoHwDecoder *, AVCodecContext *,
const enum AVPixelFormat *);
extern enum AVPixelFormat Video_get_format(VideoHwDecoder *, AVCodecContext *, const enum AVPixelFormat *);
/// Render a ffmpeg frame.
extern void VideoRenderFrame(VideoHwDecoder *, const AVCodecContext *,
const AVFrame *);
extern void VideoRenderFrame(VideoHwDecoder *, const AVCodecContext *, const AVFrame *);
/// Get hwaccel context for ffmpeg.
extern void *VideoGetHwAccelContext(VideoHwDecoder *);
#ifdef AVCODEC_VDPAU_H
/// Draw vdpau render state.
extern void VideoDrawRenderState(VideoHwDecoder *,
struct vdpau_render_state *);
#endif
extern void VideoDrawRenderState(VideoHwDecoder *, struct vdpau_render_state *);
#endif
#ifdef USE_OPENGLOSD
@@ -118,7 +113,7 @@ extern void VideoSetContrast(int);
/// Set saturation adjustment.
extern void VideoSetSaturation(int);
/// Set Gamm.
/// Set Gamma.
extern void VideoSetGamma(int);
/// Set ColorSpace.
@@ -184,15 +179,11 @@ extern void VideoSetBackground(uint32_t);
/// Set audio delay.
extern void VideoSetAudioDelay(int);
/// Set auto-crop parameters.
extern void VideoSetAutoCrop(int, int, int);
/// Clear OSD.
extern void VideoOsdClear(void);
/// Draw an OSD ARGB image.
extern void VideoOsdDrawARGB(int, int, int, int, int, const uint8_t *, int,
int);
extern void VideoOsdDrawARGB(int, int, int, int, int, const uint8_t *, int, int);
/// Get OSD size.
extern void VideoGetOsdSize(int *, int *);
@@ -225,7 +216,7 @@ extern uint8_t *VideoGrab(int *, int *, int *, int);
extern uint8_t *VideoGrabService(int *, int *, int *);
/// Get decoder statistics.
extern void VideoGetStats(VideoHwDecoder *, int *, int *, int *, int *, float *);
extern void VideoGetStats(VideoHwDecoder *, int *, int *, int *, int *, float *, int *, int *, int *, int *);
/// Get video stream size
extern void VideoGetVideoSize(VideoHwDecoder *, int *, int *, int *, int *);
@@ -240,7 +231,7 @@ extern void VideoExit(void); ///< Cleanup and exit video module.
extern int VideoPollInput(VideoStream *);
/// Decode video input buffers.
extern int VideoDecodeInput(VideoStream *);
extern int VideoDecodeInput(VideoStream *, int);
/// Get number of input buffers.
extern int VideoGetBuffers(const VideoStream *);
@@ -250,9 +241,21 @@ extern void SetDPMSatBlackScreen(int);
/// Raise the frontend window
extern int VideoRaiseWindow(void);
/// Set Shaders
extern int VideoSetShader(char *);
#ifdef USE_OPENGLOSD
extern void ActivateOsd(GLuint, int, int, int, int);
#endif
#ifdef GAMMA
extern void Init_Gamma();
extern void Exit_Gamma();
extern void Set_Gamma(float, int);
extern void Get_Gamma();
#endif
#if 0
long int gettid()
{