28 Commits

Author SHA1 Message Date
jojo61
d553a8108d Fix AC3 dowmix 2021-01-26 09:17:39 +01:00
jojo61
cb4515f6b7 Minor updates 2021-01-14 10:40:29 +01:00
jojo61
277d7fbd86 More shader samples 2021-01-11 17:22:47 +01:00
jojo61
9347f2a502 Fix CUVID without placebo 2021-01-11 16:13:04 +01:00
jojo61
6dfd2d96aa Remove local cuda dependencie 2021-01-11 08:28:44 +01:00
jojo61
a7471e8800 Support for libplacebo API Version 106
Support for LIBPLACEBO with opengl -> needs API >= 106
2021-01-10 13:55:09 +01:00
jojo61
184cc1aa05 Fix playback for old PES recordings with vaapi 2020-08-18 11:35:11 +02:00
jojo61
072e1d6847 Fix vaapi for libplacebo > 87 2020-08-14 12:38:27 +02:00
jojo61
6c13195fda Remove hbbtv changes - not needed 2020-06-23 11:22:24 +02:00
jojo61
6a31404aa0 Optimize for hbbtv plugin 2020-06-22 16:35:37 +02:00
jojo61
a424a57036 Mangle with C and C++ 2020-06-16 08:49:03 +02:00
jojo61
05c2585238 Fix shaders for vaapi Version 2020-06-16 07:46:27 +02:00
jojo61
a28e368c1b Fix jumps between recording marks 2020-06-09 09:23:38 +02:00
jojo61
e4115f348b Update README 2020-06-07 12:56:51 +02:00
jojo61
03b770ce47 Support for mpv user shaders with libplacebo 2020-06-07 12:31:18 +02:00
jojo61
c7c4cb06a6 Fix aspect change within SD channels
Should fix #48
2020-06-01 17:15:07 +02:00
jojo61
a41f6b22fd Update for latest libplacebo 2020-05-15 16:37:06 +02:00
jojo61
628bad5006 Fix Build without PIP 2020-05-07 13:21:26 +02:00
jojo61
cb466dd894 No root for softhddrm needed anymore 2020-05-07 10:53:51 +02:00
jojo61
3578e3212d Merge pull request #54 from REELcoder/master
Fix PiP window size handling
2020-05-04 08:37:09 +02:00
jojo61
78337f5933 Update Minor Version 2020-05-01 12:08:12 +02:00
jojo61
2fea2ee69f Fix default Gamma in Initial Setup
Prepare für opengl placebo (not working yet)
2020-05-01 12:06:04 +02:00
jojo61
c1c345dd4d Merge pull request #53 from dnehring7/master
Remove unsupported auto-crop feature from source
2020-05-01 11:20:04 +02:00
Dirk Nehring
a3eedbff0c Remove unsupported auto-crop feature from source 2020-04-16 21:15:19 +02:00
jojo61
309ad1c90e Merge pull request #45 from dnehring7/master
Reindent all sources to common coding. Reworked aspect function.
2020-04-14 08:18:20 +00:00
Dirk Nehring
23651104f2 Reworked the aspect function. Now we calculate the display aspect from the pixel width and
pixel height (and not from the physical size, which was the root cause for rounding errors).
Cropping calculation is reworking, now the rounding is correct. I introduce a new aspect mode
"original" which displays the output with the original size (but after correction the
pixel aspect ratio). Tested with vaapi(X11) and vaapi(DRM) with and without libplacebo.
2020-04-13 18:04:57 +02:00
Dirk Nehring
36c208967e - Reindent all sources to common coding style again.
- Fix compile bug occurred with gcc10 (-fno-common is now default)
2020-04-10 16:17:23 +02:00
REELcoder
d1a1329beb Fix PiP window size handling 2020-04-06 20:45:01 +02:00
17 changed files with 2625 additions and 1490 deletions

View File

@@ -9,7 +9,7 @@
### Configuration (edit this for your needs)
# config as needed
# comment out if not needed
# what kind of decoder do we make -
# if VAAPI is enabled the pluginname is softhdvaapi
@@ -20,25 +20,34 @@ CUVID ?= 0
# if you enable DRM then the plugin will only run without X server
# only valid for VAAPI
# does not work with libplacebo
DRM ?= 0
# use libplacebo - available for both decoders but not for DRM
# use libplacebo -
# available for all decoders but for DRM you need LIBPLACEBO_GL
LIBPLACEBO ?= 1
LIBPLACEBO_GL ?= 0
# use YADIF deint - only available with cuvid
#YADIF=1
# use gamma correction
#GAMMA ?= 0
CONFIG := #-DDEBUG # remove # to enable debug output
#--------------------- no more config needed past this point--------------------------------
# sanitize selections --------
ifneq "$(MAKECMDGOALS)" "clean"
ifneq "$(MAKECMDGOALS)" "indent"
ifeq ($(VAAPI),0)
ifeq ($(CUVID),0)
@@ -65,9 +74,12 @@ exit 1;
endif
endif
endif
endif # MAKECMDGOALS!=indent
endif # MAKECMDGOALS!=clean
#--------------------------
PLUGIN = softhdcuvid
# support OPENGLOSD always needed
@@ -144,33 +156,7 @@ APIVERSION = $(call PKGCFG,apiversion)
### Parse config
ifeq ($(VAAPI),1)
CONFIG += -DVAAPI
#LIBPLACEBO=1
PLUGIN = softhdvaapi
LIBS += -lEGL
endif
ifeq ($(DRM),1)
PLUGIN = softhddrm
CONFIG += -DUSE_DRM -DVAAPI
LIBPLACEBO=0
_CFLAGS += $(shell pkg-config --cflags libdrm)
LIBS += -lgbm -ldrm
LIBS += -lEGL
endif
ifeq ($(CUVID),1)
CONFIG += -DUSE_PIP # PIP support
CONFIG += -DCUVID # enable CUVID decoder
LIBS += -lEGL -lGL
ifeq ($(YADIF),1)
CONFIG += -DYADIF # Yadif only with CUVID
endif
endif
### Parse softhddevice config
ifeq ($(ALSA),1)
CONFIG += -DUSE_ALSA
@@ -199,8 +185,45 @@ _CFLAGS += $(shell pkg-config --cflags freetype2)
LIBS += $(shell pkg-config --libs freetype2)
endif
ifeq ($(VAAPI),1)
CONFIG += -DVAAPI
#LIBPLACEBO=1
PLUGIN = softhdvaapi
endif
ifeq ($(LIBPLACEBO_GL),1)
CONFIG += -DPLACEBO_GL -DPLACEBO
LIBS += -lepoxy
LIBS += -lplacebo
else
LIBS += -lEGL
endif
ifeq ($(LIBPLACEBO),1)
CONFIG += -DPLACEBO
LIBS += -lEGL
LIBS += -lplacebo
endif
ifeq ($(DRM),1)
PLUGIN = softhddrm
CONFIG += -DUSE_DRM -DVAAPI
_CFLAGS += $(shell pkg-config --cflags libdrm)
LIBS += -lgbm -ldrm -lEGL
endif
ifeq ($(CUVID),1)
CONFIG += -DUSE_PIP # PIP support
CONFIG += -DCUVID # enable CUVID decoder
LIBS += -lEGL -lGL
ifeq ($(YADIF),1)
CONFIG += -DYADIF # Yadif only with CUVID
endif
endif
ifeq ($(GAMMA),1)
CONFIG += -DGAMMA
endif
@@ -280,9 +303,6 @@ _CFLAGS += -I./opengl -I./
LIBS += -L/usr/lib64
ifeq ($(LIBPLACEBO),1)
LIBS += -lplacebo
endif
ifeq ($(CUVID),1)
LIBS += -lcuda -lnvcuvid
@@ -306,9 +326,14 @@ override CFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \
### The object files (add further files here):
OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o
ifeq ($(OPENGLOSD),1)
OBJS += openglosd.o
OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o openglosd.o
ifeq ($(GAMMA),1)
OBJS += colorramp.o
ifeq ($(DRM),1)
OBJS += gamma-drm.o
else
OBJS += gamma-vidmode.o
endif
endif
SRCS = $(wildcard $(OBJS:.o=.c)) *.cpp

View File

@@ -50,10 +50,9 @@ Only via DisplayPort you can get 10 Bit output to a compatible screen. This is a
Current Status with VAAPI:
I tested it with Intel VAAPI. If you have problmes with the shaders then copy the drirc file in your home directory as .drirc
AMD VAAPI is broken by AMD and will not work currently.
AMD VAAPI is broken by AMD and will not work currently.
You have to adapt the Makefile to your needs. I use FFMPEG 4.0
The Makefile expects the CUDA SDK in /usr/local/cuda. Currently it is tested with CUDA 10
This Version supports building with libplacebo. https://github.com/haasn/libplacebo
You have to enable it in the Makefile and install libplacebo yourself.
@@ -73,13 +72,13 @@ Quickstart:
You have to adapt the Makefile. There are 3 possible Version that you can build:
softhdcuvid
This is for NVIDA cards and uses cuvid as decoder. It uses xcb for output and needs a X Server to run.
This is for NVIDA cards and uses cuvid as decoder. It uses xcb for output and needs a X Server to run.
softhdvaapi
This is for INTEL cards and uses Vaapi as decoder. It uses xcb for output and needs a X Server to run.
softhddrm
This is for INTEL cards and also uses Vaapi as decoder. It uses the DRM API for output and
This is for INTEL cards and also uses Vaapi as decoder. It uses the DRM API for output and
runs without X Server. There are several commandline options to select the resolution and refresh rate.
Install:
@@ -98,27 +97,32 @@ Install:
Beginners Guide for libplacebo:
-------------------------------
When using libplacebo you will find several config options.
When using libplacebo you will find several config options.
First of all you need to set the right scaler for each resolution:
Best you beginn with setting all to "bilinear". If that works ok for you, you can try to change them
for more advanced scaler. I use ewa_robidouxsharp on my GTX1050, but your mileage may vary.
Best you beginn with setting all to "bilinear". If that works ok for you, you can try to change them
for more advanced scaler. I use ewa_robidouxsharp on my GTX1050, but your mileage may vary.
Unfortunatly on INTEL not all scalers may work or crash.
You can enable a Scaler Test feature. When enabled then the screen is split.On the left half you will
see the scaler defined by Scaler Test and on the right side you will see the scaler defined at the
Resolution setting. There is as small black line between the halfs to remaind you that Scaler Test
You can enable a Scaler Test feature. When enabled then the screen is split.On the left half you will
see the scaler defined by Scaler Test and on the right side you will see the scaler defined at the
Resolution setting. There is as small black line between the halfs to remaind you that Scaler Test
is activ.
Then you should set the Monitor Colorspace to "sRGB". This guarantees you the best colors on your screen.
At the moment all calculations internaly are done in RGB space and all cards output also RGB.
If you are colorblind you could try to remedy this with the Colorblind Settings. Realy only needed
At the moment all calculations internaly are done in RGB space and all cards output also RGB.
If you are colorblind you could try to remedy this with the Colorblind Settings. Realy only needed
in rare cases.
All other settings can be in their default state.
Beginning with libplacebo API 58 user shaders from mpv are supported. Use -S parameter to set the shader.
The plugins searches the shaders in $ConfigDir/plugins/shaders for the shaders. One example shader is
provided in the shader subdirectory. Copy it to e.g.: /etc/vdr/plugins/shaders and then start
vdr -P 'softhdcuvid -S filmgrain.glsl ...'
Setup: environment
------
@@ -232,18 +236,6 @@ Setup: /etc/vdr/setup.conf
0 = default (336 ms)
1 - 1000 = size of the buffer in ms
softhddevice.AutoCrop.Interval = 0
0 disables auto-crop
n each 'n' frames auto-crop is checked.
softhddevice.AutoCrop.Delay = 0
if auto-crop is over 'n' intervals the same, the cropping is
used.
softhddevice.AutoCrop.Tolerance = 0
if detected crop area is too small, cut max 'n' pixels at top and
bottom.
softhddevice.Background = 0
32bit RGBA background color
(Red * 16777216 + Green * 65536 + Blue * 256 + Alpha)
@@ -283,11 +275,13 @@ Setup: /etc/vdr/setup.conf
0 pan and scan
1 letter box
2 center cut-out
3 original
softhddevice.VideoOtherDisplayFormat = 1
0 pan and scan
1 pillar box
2 center cut-out
3 original
softhddevice.pip.X = 79
softhddevice.pip.Y = 78
@@ -314,22 +308,6 @@ Setup: /etc/vdr/setup.conf
PIP alternative video window position and size in percent.
Setup: /etc/vdr/remote.conf
------
Add "XKeySym." definitions to /etc/vdr/remote.conf to control
the vdr and plugin with the connected input device.
fe.
XKeySym.Up Up
XKeySym.Down Down
...
Additional to the x11 input sends the window close button "Close".
fe.
XKeySym.Power Close
Commandline:
------------
@@ -378,6 +356,3 @@ Running:
Known Bugs:
-----------
SD Streams not working very well on vaapi

19
audio.c
View File

@@ -146,7 +146,6 @@ static volatile char AudioRunning; ///< thread running / stopped
static volatile char AudioPaused; ///< audio paused
static volatile char AudioVideoIsReady; ///< video ready start early
static int AudioSkip; ///< skip audio to sync to video
int AudioDelay; /// delay audio to sync to video
static const int AudioBytesProSample = 2; ///< number of bytes per sample
@@ -2005,7 +2004,7 @@ static void *AudioPlayHandlerThread(void *dummy)
{
Debug(3, "audio: play thread started\n");
prctl(PR_SET_NAME, "cuvid audio", 0, 0, 0);
for (;;) {
// check if we should stop the thread
if (AudioThreadStop) {
@@ -2024,7 +2023,8 @@ static void *AudioPlayHandlerThread(void *dummy)
Debug(3, "audio: ----> %dms %d start\n", (AudioUsedBytes() * 1000)
/ (!AudioRing[AudioRingWrite].HwSampleRate + !AudioRing[AudioRingWrite].HwChannels +
AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),AudioUsedBytes());
AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),
AudioUsedBytes());
do {
int filled;
@@ -2252,7 +2252,7 @@ void AudioEnqueue(const void *samples, int count)
AudioNormalizer(buffer, count);
}
}
n = RingBufferWrite(AudioRing[AudioRingWrite].RingBuffer, buffer, count);
if (n != (size_t)count) {
Error(_("audio: can't place %d samples in ring buffer\n"), count);
@@ -2291,7 +2291,7 @@ void AudioEnqueue(const void *samples, int count)
// no lock needed, can wakeup next time
AudioRunning = 1;
pthread_cond_signal(&AudioStartCond);
Debug(3, "Start on AudioEnque Threshold %d n %d\n",AudioStartThreshold,n);
Debug(3, "Start on AudioEnque Threshold %d n %d\n", AudioStartThreshold, n);
}
}
// Update audio clock (stupid gcc developers thinks INT64_C is unsigned)
@@ -2325,7 +2325,7 @@ void AudioVideoReady(int64_t pts)
// Audio.PTS = next written sample time stamp
used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer);
audio_pts =
audio_pts =
AudioRing[AudioRingWrite].PTS -
(used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels *
AudioBytesProSample);
@@ -2337,11 +2337,12 @@ void AudioVideoReady(int64_t pts)
if (!AudioRunning) {
int skip;
// buffer ~15 video frames
// FIXME: HDTV can use smaller video buffer
skip = pts - 0 * 20 * 90 - AudioBufferTime * 90 - audio_pts + VideoAudioDelay;
#ifdef DEBUG
// fprintf(stderr, "a/v-diff %dms a/v-delay %dms skip %dms Audiobuffer %d\n", (int)(pts - audio_pts) / 90, VideoAudioDelay / 90, skip / 90,AudioBufferTime);
// fprintf(stderr, "a/v-diff %dms a/v-delay %dms skip %dms Audiobuffer %d\n", (int)(pts - audio_pts) / 90, VideoAudioDelay / 90, skip / 90,AudioBufferTime);
#endif
// guard against old PTS
if (skip > 0 && skip < 4000 * 90) {
@@ -2479,7 +2480,7 @@ int64_t AudioGetDelay(void)
pts += ((int64_t) RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer)
* 90 * 1000) / (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels *
AudioBytesProSample);
Debug(4,"audio: hw+sw delay %zd %" PRId64 "ms\n", RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer),
Debug(4, "audio: hw+sw delay %zd %" PRId64 "ms\n", RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer),
pts / 90);
return pts;
@@ -2496,7 +2497,7 @@ void AudioSetClock(int64_t pts)
Debug(4, "audio: set clock %s -> %s pts\n", Timestamp2String(AudioRing[AudioRingWrite].PTS),
Timestamp2String(pts));
}
// printf("Audiosetclock pts %#012" PRIx64 " %d\n",pts,RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer));
// printf("Audiosetclock pts %#012" PRIx64 " %d\n",pts,RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer));
AudioRing[AudioRingWrite].PTS = pts;
}

58
codec.c
View File

@@ -96,6 +96,7 @@ static pthread_mutex_t CodecLockMutex;
/// Flag prefer fast channel switch
char CodecUsePossibleDefectFrames;
AVBufferRef *hw_device_ctx;
//----------------------------------------------------------------------------
// Video
//----------------------------------------------------------------------------
@@ -255,18 +256,18 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
#endif
#ifdef RASPI
switch (codec_id) {
case AV_CODEC_ID_MPEG2VIDEO:
name = "mpeg2_v4l2m2m";
break;
case AV_CODEC_ID_H264:
name = "h264_v4l2m2m";
case AV_CODEC_ID_MPEG2VIDEO:
name = "mpeg2_v4l2m2m";
break;
case AV_CODEC_ID_H264:
name = "h264_v4l2m2m";
// name = "h264_mmal";
break;
case AV_CODEC_ID_HEVC:
name = "hevc_v4l2m2m";
break;
break;
case AV_CODEC_ID_HEVC:
name = "hevc_v4l2m2m";
break;
}
#endif
#endif
if (name && (video_codec = avcodec_find_decoder_by_name(name))) {
Debug(3, "codec: decoder found\n");
} else if ((video_codec = avcodec_find_decoder(codec_id)) == NULL) {
@@ -281,15 +282,15 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
if (!(decoder->VideoCtx = avcodec_alloc_context3(video_codec))) {
Fatal(_("codec: can't allocate video codec context\n"));
}
#ifndef RASPI
if (!HwDeviceContext) {
Fatal("codec: no hw device context to be used");
}
decoder->VideoCtx->hw_device_ctx = av_buffer_ref(HwDeviceContext);
#else
decoder->VideoCtx->pix_fmt = AV_PIX_FMT_DRM_PRIME; /* request a DRM frame
// decoder->VideoCtx->pix_fmt = AV_PIX_FMT_MMAL; /* request a DRM frame */
decoder->VideoCtx->pix_fmt = AV_PIX_FMT_DRM_PRIME; /* request a DRM frame */
// decoder->VideoCtx->pix_fmt = AV_PIX_FMT_MMAL; /* request a DRM frame */
#endif
// FIXME: for software decoder use all cpus, otherwise 1
@@ -310,7 +311,7 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
if (video_codec->capabilities & (AV_CODEC_CAP_AUTO_THREADS)) {
Debug(3, "codec: auto threads enabled");
// decoder->VideoCtx->thread_count = 0;
}
}
if (video_codec->capabilities & AV_CODEC_CAP_TRUNCATED) {
Debug(3, "codec: supports truncated packets");
@@ -333,9 +334,9 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
// if (av_opt_set_int(decoder->VideoCtx, "refcounted_frames", 1, 0) < 0)
// Fatal(_("VAAPI Refcounts invalid\n"));
decoder->VideoCtx->thread_safe_callbacks = 0;
#endif
#ifdef RASPI
decoder->VideoCtx->codec_id = codec_id;
decoder->VideoCtx->flags |= AV_CODEC_FLAG_BITEXACT;
@@ -508,18 +509,18 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
int ret;
AVPacket pkt[1];
AVFrame *frame;
*pkt = *avpkt; // use copy
ret = avcodec_send_packet(video_ctx, pkt);
if (ret < 0) {
return;
}
}
if (!CuvidTestSurfaces())
usleep(1000);
ret = 0;
while (ret >= 0 && CuvidTestSurfaces()) {
while (ret >= 0 && CuvidTestSurfaces()) {
frame = av_frame_alloc();
ret = avcodec_receive_frame(video_ctx, frame);
@@ -549,7 +550,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
av_frame_free(&frame);
return;
}
}
}
}
}
#endif
@@ -596,9 +597,9 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
} else {
got_frame = 0;
}
// printf("got %s packet from decoder\n",got_frame?"1":"no");
// printf("got %s packet from decoder\n",got_frame?"1":"no");
if (got_frame) { // frame completed
// printf("video frame pts %#012" PRIx64 " %dms\n",frame->pts,(int)(apts - frame->pts) / 90);
// printf("video frame pts %#012" PRIx64 " %dms\n",frame->pts,(int)(apts - frame->pts) / 90);
#ifdef YADIF
if (decoder->filter) {
if (decoder->filter == 1) {
@@ -647,7 +648,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
void CodecVideoFlushBuffers(VideoDecoder * decoder)
{
if (decoder->VideoCtx) {
avcodec_flush_buffers(decoder->VideoCtx);
avcodec_flush_buffers(decoder->VideoCtx);
}
}
@@ -783,7 +784,7 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, int codec_id)
}
if (CodecDownmix) {
audio_decoder->AudioCtx->request_channel_layout = AV_CH_LAYOUT_STEREO_DOWNMIX;
audio_decoder->AudioCtx->request_channel_layout = AV_CH_LAYOUT_STEREO;
}
pthread_mutex_lock(&CodecLockMutex);
// open codec
@@ -819,7 +820,7 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, int codec_id)
void CodecAudioClose(AudioDecoder * audio_decoder)
{
// FIXME: output any buffered data
#ifdef USE_SWRESAMPLE
if (audio_decoder->Resample) {
swr_free(&audio_decoder->Resample);
@@ -1111,8 +1112,6 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac
return 0;
}
#if defined(USE_SWRESAMPLE) || defined(USE_AVRESAMPLE)
/**
@@ -1346,6 +1345,7 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
if (audio_decoder->Resample) {
uint8_t outbuf[8192 * 2 * 8];
uint8_t *out[1];
out[0] = outbuf;
ret =
swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels),

129
drm.c
View File

@@ -18,13 +18,13 @@ struct _Drm_Render_
int fd_drm;
drmModeModeInfo mode;
drmModeCrtc *saved_crtc;
// drmEventContext ev;
// drmEventContext ev;
int bpp;
uint32_t connector_id, crtc_id, video_plane;
uint32_t hdr_metadata;
uint32_t mmWidth,mmHeight; // Size in mm
uint32_t hdr_blob_id;
};
typedef struct _Drm_Render_ VideoRender;
@@ -193,12 +193,12 @@ void set_video_mode(int width, int height)
return;
connector = drmModeGetConnector(render->fd_drm, render->connector_id);
for (ii = 0; ii < connector->count_modes; ii++) {
mode = &connector->modes[ii];
mode = &connector->modes[ii];
printf("Mode %d %dx%d Rate %d\n",ii,mode->hdisplay,mode->vdisplay,mode->vrefresh);
if (width == mode->hdisplay &&
height == mode->vdisplay &&
if (width == mode->hdisplay &&
height == mode->vdisplay &&
mode->vrefresh == DRMRefresh &&
render->mode.hdisplay != width &&
render->mode.hdisplay != width &&
render->mode.vdisplay != height &&
!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
@@ -211,7 +211,7 @@ void set_video_mode(int width, int height)
CuvidSetVideoMode();
Debug(3,"Set new mode %d:%d\n",mode->hdisplay,mode->vdisplay);
break;
}
}
}
}
@@ -240,11 +240,31 @@ static int FindDevice(VideoRender * render)
fprintf(stderr, "FindDevice: cannot open /dev/dri/card0: %m\n");
return -errno;
}
drmSetMaster(render->fd_drm);
int ret = drmSetMaster(render->fd_drm);
if (ret < 0)
{
drm_magic_t magic;
ret = drmGetMagic(render->fd_drm, &magic);
if (ret < 0)
{
Debug(3, "drm:%s - failed to get drm magic: %s\n", __FUNCTION__, strerror(errno));
return -1;
}
ret = drmAuthMagic(render->fd_drm, magic);
if (ret < 0)
{
Debug(3, "drm:%s - failed to authorize drm magic: %s\n", __FUNCTION__, strerror(errno));
return -1;
}
}
version = drmGetVersion(render->fd_drm);
fprintf(stderr, "FindDevice: open /dev/dri/card0: %s\n", version->name);
// check capability
if (drmGetCap(render->fd_drm, DRM_CAP_DUMB_BUFFER, &has_dumb) < 0 || has_dumb == 0)
fprintf(stderr, "FindDevice: drmGetCap DRM_CAP_DUMB_BUFFER failed or doesn't have dumb buffer\n");
@@ -263,7 +283,7 @@ static int FindDevice(VideoRender * render)
if (drmGetCap(render->fd_drm, DRM_PRIME_CAP_IMPORT, &has_prime) < 0)
fprintf(stderr, "FindDevice: DRM_PRIME_CAP_IMPORT not available.\n");
if ((resources = drmModeGetResources(render->fd_drm)) == NULL){
fprintf(stderr, "FindDevice: cannot retrieve DRM resources (%d): %m\n", errno);
return -errno;
@@ -282,18 +302,18 @@ static int FindDevice(VideoRender * render)
fprintf(stderr, "FindDevice: cannot retrieve DRM connector (%d): %m\n", errno);
return -errno;
}
sprintf(connectorstr,"%s-%u",util_lookup_connector_type_name(connector->connector_type),connector->connector_type_id);
printf("Connector >%s< is %sconnected\n",connectorstr,connector->connection == DRM_MODE_CONNECTED?"":"not ");
if (DRMConnector && strcmp(DRMConnector,connectorstr))
continue;
if (connector->connection == DRM_MODE_CONNECTED && connector->count_modes > 0) {
float aspect = (float)connector->mmWidth / (float)connector->mmHeight;
if ((aspect > 1.70) && (aspect < 1.85)) {
render->mmHeight = 90;
render->mmWidth = 160;
} else {
} else {
render->mmHeight = connector->mmHeight;
render->mmWidth = connector->mmWidth;
}
@@ -304,27 +324,27 @@ static int FindDevice(VideoRender * render)
return -errno;
}
render->crtc_id = encoder->crtc_id;
render->hdr_metadata = GetPropertyID(render->fd_drm, connector->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "HDR_OUTPUT_METADATA");
DRM_MODE_OBJECT_CONNECTOR, "HDR_OUTPUT_METADATA");
printf("ID %d of METADATA in Connector %d connected %d\n",render->hdr_metadata,connector->connector_id,connector->connection);
memcpy(&render->mode, &connector->modes[0], sizeof(drmModeModeInfo)); // set fallback
// search Modes for Connector
for (ii = 0; ii < connector->count_modes; ii++) {
mode = &connector->modes[ii];
printf("Mode %d %dx%d Rate %d\n",ii,mode->hdisplay,mode->vdisplay,mode->vrefresh);
if (VideoWindowWidth && VideoWindowHeight) { // preset by command line
if (VideoWindowWidth == mode->hdisplay &&
VideoWindowHeight == mode->vdisplay &&
if (VideoWindowWidth && VideoWindowHeight) { // preset by command line
if (VideoWindowWidth == mode->hdisplay &&
VideoWindowHeight == mode->vdisplay &&
mode->vrefresh == DRMRefresh &&
!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
break;
}
}
}
else {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
@@ -335,7 +355,7 @@ static int FindDevice(VideoRender * render)
}
}
found = 1;
i = resources->count_connectors; // uuuuhh
i = resources->count_connectors; // uuuuhh
}
VideoWindowWidth = render->mode.hdisplay;
VideoWindowHeight = render->mode.vdisplay;
@@ -348,7 +368,7 @@ static int FindDevice(VideoRender * render)
printf("Requested Connector not found or not connected\n");
return -1;
}
// find first plane
if ((plane_res = drmModeGetPlaneResources(render->fd_drm)) == NULL)
fprintf(stderr, "FindDevice: cannot retrieve PlaneResources (%d): %m\n", errno);
@@ -366,7 +386,7 @@ static int FindDevice(VideoRender * render)
uint64_t type = GetPropertyValue(render->fd_drm, plane_res->planes[j],
DRM_MODE_OBJECT_PLANE, "type");
uint64_t zpos = 0;
uint64_t zpos = 0;
#ifdef DRM_DEBUG // If more then 2 crtcs this must rewriten!!!
printf("[FindDevice] Plane id %i crtc_id %i possible_crtcs %i possible CRTC %i type %s\n",
@@ -384,7 +404,7 @@ static int FindDevice(VideoRender * render)
case DRM_FORMAT_ARGB8888:
#else
case DRM_FORMAT_XRGB2101010:
#endif
#endif
if (!render->video_plane) {
render->video_plane = plane->plane_id;
}
@@ -419,25 +439,25 @@ static int FindDevice(VideoRender * render)
void VideoInitDrm()
{
int i;
if (!(render = calloc(1, sizeof(*render)))) {
Fatal(_("video/DRM: out of memory\n"));
return;
}
if (FindDevice(render)){
Fatal(_( "VideoInit: FindDevice() failed\n"));
}
gbm.dev = gbm_create_device (render->fd_drm);
assert (gbm.dev != NULL);
PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display = NULL;
get_platform_display =
(void *) eglGetProcAddress("eglGetPlatformDisplay");
assert(get_platform_display != NULL);
eglDisplay = get_platform_display(EGL_PLATFORM_GBM_KHR, gbm.dev, NULL);
assert (eglDisplay != NULL);
@@ -462,22 +482,21 @@ void VideoInitDrm()
DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id);
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 1);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
if (drmModeDestroyPropertyBlob(render->fd_drm, modeID) != 0)
fprintf(stderr, "cannot destroy property blob (%d): %m\n", errno);
drmModeAtomicFree(ModeReq);
}
void get_drm_aspect(int *num,int *den)
{
Debug(3,"mmHeight %d mmWidth %d VideoHeight %d VideoWidth %d\n",render->mmHeight,render->mmWidth,VideoWindowHeight,VideoWindowWidth);
*num = VideoWindowWidth * render->mmHeight;
*den = VideoWindowHeight * render->mmWidth;
*num = VideoWindowWidth;
*den = VideoWindowHeight;
}
struct gbm_bo *bo = NULL, *next_bo=NULL;
@@ -488,8 +507,8 @@ static int old_color=-1,old_trc=-1;
void InitBo(int bpp) {
// create the GBM and EGL surface
render->bpp = bpp;
gbm.surface = gbm_surface_create (gbm.dev, VideoWindowWidth,VideoWindowHeight,
bpp==10?GBM_FORMAT_XRGB2101010:GBM_FORMAT_ARGB8888,
gbm.surface = gbm_surface_create (gbm.dev, VideoWindowWidth,VideoWindowHeight,
bpp==10?GBM_FORMAT_XRGB2101010:GBM_FORMAT_ARGB8888,
GBM_BO_USE_SCANOUT|GBM_BO_USE_RENDERING);
assert(gbm.surface != NULL);
eglSurface = eglCreateWindowSurface (eglDisplay, eglConfig, gbm.surface, NULL);
@@ -501,10 +520,10 @@ static struct gbm_bo *previous_bo = NULL;
static uint32_t previous_fb;
static void drm_swap_buffers () {
uint32_t fb;
eglSwapBuffers (eglDisplay, eglSurface);
eglSwapBuffers (eglDisplay, eglSurface);
struct gbm_bo *bo = gbm_surface_lock_front_buffer (gbm.surface);
#if 1
if (bo == NULL)
@@ -517,7 +536,7 @@ static void drm_swap_buffers () {
drmModeAddFB (render->fd_drm, VideoWindowWidth,VideoWindowHeight,render->bpp==10? 30:24, 32, pitch, handle, &fb);
// drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode);
if (m_need_modeset) {
drmModeAtomicReqPtr ModeReq;
const uint32_t flags = DRM_MODE_ATOMIC_ALLOW_MODESET;
@@ -531,14 +550,14 @@ static void drm_swap_buffers () {
fprintf(stderr, "cannot allocate atomic request (%d): %m\n", errno);
return;
}
// Need to disable the CRTC in order to submit the HDR data....
// Need to disable the CRTC in order to submit the HDR data....
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 0);
DRM_MODE_OBJECT_CRTC, "ACTIVE", 0);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
sleep(2);
SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "Colorspace",old_color==AVCOL_PRI_BT2020?9:2 );
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
@@ -547,7 +566,7 @@ static void drm_swap_buffers () {
DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id);
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 1);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
@@ -558,7 +577,7 @@ static void drm_swap_buffers () {
m_need_modeset = 0;
}
drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode);
if (previous_bo) {
drmModeRmFB (render->fd_drm, previous_fb);
gbm_surface_release_buffer (gbm.surface, previous_bo);
@@ -579,15 +598,15 @@ static void drm_clean_up () {
drmModeRmFB (render->fd_drm, previous_fb);
gbm_surface_release_buffer (gbm.surface, previous_bo);
}
drmModeSetCrtc (render->fd_drm, render->saved_crtc->crtc_id, render->saved_crtc->buffer_id,
render->saved_crtc->x, render->saved_crtc->y, &render->connector_id, 1, &render->saved_crtc->mode);
drmModeFreeCrtc (render->saved_crtc);
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
render->hdr_blob_id = 0;
eglDestroySurface (eglDisplay, eglSurface);
EglCheck();
gbm_surface_destroy (gbm.surface);
@@ -597,9 +616,9 @@ static void drm_clean_up () {
EglCheck();
eglSharedContext = NULL;
eglTerminate (eglDisplay);
eglTerminate (eglDisplay);
EglCheck();
gbm_device_destroy (gbm.dev);
drmDropMaster(render->fd_drm);
close (render->fd_drm);

41
hdr.c
View File

@@ -334,11 +334,11 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid
int max_lum=4000,min_lum=0050;
struct AVMasteringDisplayMetadata *md = NULL;
struct AVContentLightMetadata *ld = NULL;
if (render->hdr_metadata == -1) { // Metadata not supported
return;
}
// clean up FFMEPG stuff
if (trc == AVCOL_TRC_BT2020_10)
trc = AVCOL_TRC_ARIB_STD_B67;
@@ -346,16 +346,16 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid
trc = AVCOL_TRC_BT709;
if (color == AVCOL_PRI_UNSPECIFIED)
color = AVCOL_PRI_BT709;
if ((old_color == color && old_trc == trc && !sd1 && !sd2) || !render->hdr_metadata)
return; // nothing to do
if (sd1)
md = sd1->data;
if (sd2)
ld = sd2->data;
if (md && !memcmp(md,&md_save,sizeof(md_save)))
if (ld && !memcmp(ld,&ld_save,sizeof(ld_save))) {
return;
@@ -363,23 +363,23 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid
else if (ld && !memcmp(ld,&ld_save,sizeof(ld_save))) {
return;
}
if (ld)
memcpy(&ld_save,ld,sizeof(ld_save));
if (md)
memcpy(&md_save,md,sizeof(md_save));
Debug(3,"Update HDR to TRC %d color %d\n",trc,color);
if (trc == AVCOL_TRC_BT2020_10)
trc = AVCOL_TRC_ARIB_STD_B67;
old_color = color;
old_trc = trc;
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
switch(trc) {
case AVCOL_TRC_BT709: // 1
case AVCOL_TRC_UNSPECIFIED: // 2
@@ -387,7 +387,7 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid
break;
case AVCOL_TRC_BT2020_10: // 14
case AVCOL_TRC_BT2020_12:
case AVCOL_TRC_ARIB_STD_B67: // 18 HLG
case AVCOL_TRC_ARIB_STD_B67: // 18 HLG
eotf = EOTF_HLG;
break;
case AVCOL_TRC_SMPTE2084: // 16
@@ -397,7 +397,7 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid
eotf = EOTF_TRADITIONAL_GAMMA_SDR;
break;
}
switch (color) {
case AVCOL_PRI_BT709: // 1
case AVCOL_PRI_UNSPECIFIED: // 2
@@ -413,7 +413,7 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid
cs = weston_colorspace_lookup("BT.709");
break;
}
if (md) { // we got Metadata
if (md->has_primaries) {
Debug(3,"Mastering Display Metadata,\n has_primaries:%d has_luminance:%d \n"
@@ -465,9 +465,9 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid
MaxCLL, // Maximum Content Light Level (MaxCLL)
MaxFALL, // Maximum Frame-Average Light Level (MaxFALL)
eotf);
ret = drmModeCreatePropertyBlob(render->fd_drm, &data, sizeof(data), &render->hdr_blob_id);
if (ret) {
printf("DRM: HDR metadata: failed blob create \n");
@@ -479,15 +479,14 @@ static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSid
render->hdr_metadata, render->hdr_blob_id);
if (ret) {
printf("DRM: HDR metadata: failed property set %d\n",ret);
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
render->hdr_blob_id = 0;
return;
}
m_need_modeset = 1;
Debug(3,"DRM: HDR metadata: prop set\n");
}
Debug(3,"DRM: HDR metadata: prop set\n");
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: VDR \n"
"Report-Msgid-Bugs-To: <see README>\n"
"POT-Creation-Date: 2019-10-26 18:41+0200\n"
"POT-Creation-Date: 2020-04-15 18:57+0200\n"
"PO-Revision-Date: blabla\n"
"Last-Translator: blabla\n"
"Language-Team: blabla\n"
@@ -263,9 +263,6 @@ msgstr ""
msgid "codec: can't allocate video codec context\n"
msgstr ""
msgid "VAAPI Refcounts invalid\n"
msgstr ""
msgid "codec: can't set option deint to video codec!\n"
msgstr ""
@@ -306,24 +303,6 @@ msgstr ""
msgid "codec/audio: decoded data smaller than encoded\n"
msgstr ""
msgid "codec/audio: resample setup error\n"
msgstr ""
msgid "codec/audio: overwrite resample\n"
msgstr ""
msgid "codec/audio: AvResample setup error\n"
msgstr ""
msgid "codec: latm\n"
msgstr ""
msgid "codec: error audio data\n"
msgstr ""
msgid "codec: error more than one frame data\n"
msgstr ""
msgid "codec/audio: can't setup resample\n"
msgstr ""
@@ -471,18 +450,6 @@ msgstr "Schneide oben und unten ab (Pixel)"
msgid "Cut left and right (pixel)"
msgstr "Schneide links und rechts ab (Pixel)"
msgid "Auto-crop"
msgstr ""
msgid "Autocrop interval (frames)"
msgstr ""
msgid "Autocrop delay (n * interval)"
msgstr ""
msgid "Autocrop tolerance (pixel)"
msgstr ""
msgid "Audio"
msgstr "Audio"
@@ -642,7 +609,7 @@ msgid " Frames missed(%d) duped(%d) dropped(%d) total(%d)"
msgstr " Frames verloren(%d) verdoppelt(%d) übersprungen(%d) Gesamt(%d)"
#, c-format
msgid " Frame Process time %2.2fms"
msgid " Video %dx%d Color: %s Gamma: %s"
msgstr ""
msgid "pass-through disabled"
@@ -661,12 +628,6 @@ msgstr ""
msgid "surround downmix disabled"
msgstr ""
msgid "auto-crop disabled and freezed"
msgstr ""
msgid "auto-crop enabled"
msgstr ""
#, c-format
msgid "[softhddev]: hot key %d is not supported\n"
msgstr ""
@@ -807,10 +768,6 @@ msgstr ""
msgid "video/glx: no GLX support\n"
msgstr ""
#, c-format
msgid "video/glx: glx version %d.%d\n"
msgstr ""
msgid "did not get FBconfig"
msgstr ""
@@ -898,11 +855,10 @@ msgid "Failed rendering frame!\n"
msgstr ""
#, c-format
msgid "video/vdpau: output buffer full, dropping frame (%d/%d)\n"
msgid "video/cuvid: output buffer full, dropping frame (%d/%d)\n"
msgstr ""
#, c-format
msgid "video/vdpau: pixel format %d not supported\n"
msgid "Could not dynamically load CUDA\n"
msgstr ""
msgid "Kein Cuda device gefunden"
@@ -943,12 +899,6 @@ msgstr ""
msgid "video/egl: can't create thread egl context\n"
msgstr ""
msgid "video: can't queue cancel video display thread\n"
msgstr ""
msgid "video: can't cancel video display thread\n"
msgstr ""
#, c-format
msgid "video: repeated pict %d found, but not handled\n"
msgstr ""

346
shaders.h
View File

@@ -11,7 +11,6 @@ const char *gl_version = "#version 300 es ";
#endif
#endif
/* Color conversion matrix: RGB = m * YUV + c
* m is in row-major matrix, with m[row][col], e.g.:
* [ a11 a12 a13 ] float m[3][3] = { { a11, a12, a13 },
@@ -39,48 +38,48 @@ struct mp_mat
// YUV input limited range (16-235 for luma, 16-240 for chroma)
// ITU-R BT.601 (SD)
struct mp_cmat yuv_bt601 = { {{1.164384, 1.164384, 1.164384},
{0.00000, -0.391762, 2.017232},
{1.596027, -0.812968, 0.000000}},
{-0.874202, 0.531668, -1.085631}
{0.00000, -0.391762, 2.017232},
{1.596027, -0.812968, 0.000000}},
{-0.874202, 0.531668, -1.085631}
};
// ITU-R BT.709 (HD)
struct mp_cmat yuv_bt709 = { {{1.164384, 1.164384, 1.164384},
{0.00000, -0.213249, 2.112402},
{1.792741, -0.532909, 0.000000}},
{-0.972945, 0.301483, -1.133402}
{0.00000, -0.213249, 2.112402},
{1.792741, -0.532909, 0.000000}},
{-0.972945, 0.301483, -1.133402}
};
// ITU-R BT.2020 non-constant luminance system
struct mp_cmat yuv_bt2020ncl = { {{1.164384, 1.164384, 1.164384},
{0.00000, -0.187326, 2.141772},
{1.678674, -0.650424, 0.000000}},
{-0.915688, 0.347459, -1.148145}
{0.00000, -0.187326, 2.141772},
{1.678674, -0.650424, 0.000000}},
{-0.915688, 0.347459, -1.148145}
};
// ITU-R BT.2020 constant luminance system
struct mp_cmat yuv_bt2020cl = { {{0.0000, 1.164384, 0.000000},
{0.00000, 0.000000, 1.138393},
{1.138393, 0.000000, 0.000000}},
{-0.571429, -0.073059, -0.571429}
{0.00000, 0.000000, 1.138393},
{1.138393, 0.000000, 0.000000}},
{-0.571429, -0.073059, -0.571429}
};
float cms_matrix[3][3] = { {1.660497, -0.124547, -0.018154},
{-0.587657, 1.132895, -0.100597},
{-0.072840, -0.008348, 1.118751}
{-0.587657, 1.132895, -0.100597},
{-0.072840, -0.008348, 1.118751}
};
// Common constants for SMPTE ST.2084 (PQ)
static const float PQ_M1 = 2610./4096 * 1./4,
PQ_M2 = 2523./4096 * 128,
PQ_C1 = 3424./4096,
PQ_C2 = 2413./4096 * 32,
PQ_C3 = 2392./4096 * 32;
static const float PQ_M1 = 2610. / 4096 * 1. / 4,
PQ_M2 = 2523. / 4096 * 128,
PQ_C1 = 3424. / 4096,
PQ_C2 = 2413. / 4096 * 32,
PQ_C3 = 2392. / 4096 * 32;
// Common constants for ARIB STD-B67 (HLG)
static const float HLG_A = 0.17883277,
HLG_B = 0.28466892,
HLG_C = 0.55991073;
HLG_B = 0.28466892,
HLG_C = 0.55991073;
struct gl_vao_entry
{
@@ -124,48 +123,55 @@ static const struct gl_vao_entry vertex_vao[] = {
char sh[SHADER_LENGTH];
char shv[SHADER_LENGTH];
GL_init() {
sh[0] = 0;
GL_init()
{
sh[0] = 0;
}
GLV_init() {
shv[0] = 0;
GLV_init()
{
shv[0] = 0;
}
pl_shader_append(const char *fmt, ...) {
char temp[1000];
va_list ap;
pl_shader_append(const char *fmt, ...)
{
char temp[1000];
va_list ap;
va_start(ap, fmt);
vsprintf(temp,fmt,ap);
va_end(ap);
if (strlen(sh) + strlen(temp) > SHADER_LENGTH)
Fatal(_("Shaderlenght fault\n"));
strcat(sh,temp);
vsprintf(temp, fmt, ap);
va_end(ap);
if (strlen(sh) + strlen(temp) > SHADER_LENGTH)
Fatal(_("Shaderlenght fault\n"));
strcat(sh, temp);
}
pl_shader_append_v(const char *fmt, ...) {
char temp[1000];
va_list ap;
pl_shader_append_v(const char *fmt, ...)
{
char temp[1000];
va_list ap;
va_start(ap, fmt);
vsprintf(temp,fmt,ap);
va_end(ap);
if (strlen(shv) + strlen(temp) > SHADER_LENGTH)
Fatal(_("Shaderlenght fault\n"));
strcat(shv,temp);
vsprintf(temp, fmt, ap);
va_end(ap);
if (strlen(shv) + strlen(temp) > SHADER_LENGTH)
Fatal(_("Shaderlenght fault\n"));
strcat(shv, temp);
}
static void compile_attach_shader(GLuint program, GLenum type, const char *source)
{
GLuint shader;
GLint status=1234, log_length;
GLint status = 1234, log_length;
char log[4000];
GLsizei len;
shader = glCreateShader(type);
glShaderSource(shader, 1, (const GLchar **)&source, NULL); // &buffer, NULL);
glShaderSource(shader, 1, (const GLchar **)&source, NULL); // &buffer, NULL);
glCompileShader(shader);
status = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
@@ -198,42 +204,42 @@ static GLuint sc_generate_osd(GLuint gl_prog)
Debug(3, "vor create osd\n");
gl_prog = glCreateProgram();
GL_init();
GLSL("%s\n",gl_version);
GLSL("in vec2 vertex_position;\n");
GLSL("in vec2 vertex_texcoord0;\n");
GLSL("out vec2 texcoord0;\n");
GLSL("void main() {\n");
GLSL("gl_Position = vec4(vertex_position, 1.0, 1.0);\n");
GLSL("texcoord0 = vertex_texcoord0;\n");
GLSL("}\n");
GL_init();
GLSL("%s\n", gl_version);
GLSL("in vec2 vertex_position;\n");
GLSL("in vec2 vertex_texcoord0;\n");
GLSL("out vec2 texcoord0;\n");
GLSL("void main() {\n");
GLSL("gl_Position = vec4(vertex_position, 1.0, 1.0);\n");
GLSL("texcoord0 = vertex_texcoord0;\n");
GLSL("}\n");
Debug(3, "vor compile vertex osd\n");
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh); // vertex_osd);
GL_init();
GLSL("%s\n",gl_version);
GLSL("#define texture1D texture\n");
GLSL("precision mediump float; \n");
GLSL("layout(location = 0) out vec4 out_color;\n");
GLSL("in vec2 texcoord0;\n");
GLSL("uniform sampler2D texture0;\n");
GLSL("void main() {\n");
GLSL("vec4 color; \n");
GLSL("color = vec4(texture(texture0, texcoord0));\n");
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh); // vertex_osd);
GL_init();
GLSL("%s\n", gl_version);
GLSL("#define texture1D texture\n");
GLSL("precision mediump float; \n");
GLSL("layout(location = 0) out vec4 out_color;\n");
GLSL("in vec2 texcoord0;\n");
GLSL("uniform sampler2D texture0;\n");
GLSL("void main() {\n");
GLSL("vec4 color; \n");
GLSL("color = vec4(texture(texture0, texcoord0));\n");
#ifdef GAMMA
GLSL("// delinearize gamma \n");
GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // delinearize gamma
GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
GLSL("// delinearize gamma \n");
GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // delinearize gamma
GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
#endif
GLSL("out_color = color;\n");
GLSL("}\n");
GLSL("out_color = color;\n");
GLSL("}\n");
Debug(3, "vor compile fragment osd \n");
compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, sh); //fragment_osd);
glBindAttribLocation(gl_prog, 0, "vertex_position");
glBindAttribLocation(gl_prog, 1, "vertex_texcoord0");
link_shader(gl_prog);
return gl_prog;
}
@@ -245,34 +251,33 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace)
GLint cmsLoc;
float *m, *c, *cms;
char *frag;
GL_init();
GLSL("%s\n",gl_version);
GLSL("in vec2 vertex_position; \n");
GLSL("in vec2 vertex_texcoord0; \n");
GLSL("out vec2 texcoord0; \n");
GLSL("in vec2 vertex_texcoord1; \n");
GLSL("out vec2 texcoord1; \n");
if (Planes == 3) {
GLSL("in vec2 vertex_texcoord2; \n");
GLSL("out vec2 texcoord2; \n");
}
GLSL("void main() { \n");
GLSL("gl_Position = vec4(vertex_position, 1.0, 1.0);\n");
GLSL("texcoord0 = vertex_texcoord0; \n");
GLSL("texcoord1 = vertex_texcoord1; \n");
if (Planes == 3) {
GLSL("texcoord2 = vertex_texcoord1; \n"); // texcoord1 ist hier richtig
}
GLSL("} \n");
GL_init();
GLSL("%s\n", gl_version);
GLSL("in vec2 vertex_position; \n");
GLSL("in vec2 vertex_texcoord0; \n");
GLSL("out vec2 texcoord0; \n");
GLSL("in vec2 vertex_texcoord1; \n");
GLSL("out vec2 texcoord1; \n");
if (Planes == 3) {
GLSL("in vec2 vertex_texcoord2; \n");
GLSL("out vec2 texcoord2; \n");
}
GLSL("void main() { \n");
GLSL("gl_Position = vec4(vertex_position, 1.0, 1.0);\n");
GLSL("texcoord0 = vertex_texcoord0; \n");
GLSL("texcoord1 = vertex_texcoord1; \n");
if (Planes == 3) {
GLSL("texcoord2 = vertex_texcoord1; \n"); // texcoord1 ist hier richtig
}
GLSL("} \n");
Debug(3, "vor create\n");
gl_prog = glCreateProgram();
Debug(3, "vor compile vertex\n");
// printf("%s",sh);
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh );
// printf("%s",sh);
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, sh);
switch (colorspace) {
case AVCOL_SPC_RGB:
m = &yuv_bt601.m[0][0];
@@ -297,86 +302,85 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace)
Debug(3, "default BT709 Colorspace used %d\n", colorspace);
break;
}
GL_init();
GLSL("%s\n",gl_version);
GLSL("precision mediump float; \n");
GLSL("layout(location = 0) out vec4 out_color;\n");
GLSL("in vec2 texcoord0; \n");
GLSL("in vec2 texcoord1; \n");
if (Planes == 3)
GLSL("in vec2 texcoord2; \n");
GLSL("uniform mat3 colormatrix; \n");
GLSL("uniform vec3 colormatrix_c; \n");
if (colorspace == AVCOL_SPC_BT2020_NCL)
GLSL("uniform mat3 cms_matrix;\n");
GLSL("uniform sampler2D texture0; \n");
GLSL("uniform sampler2D texture1; \n");
if (Planes == 3)
GLSL("uniform sampler2D texture2; \n");
GLSL("void main() { \n");
GLSL("vec4 color; \n");
if (colorspace == AVCOL_SPC_BT2020_NCL) {
GLSL("color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r; \n");
if (Planes == 3) {
GLSL("color.g = 1.003906 * vec4(texture(texture1, texcoord1)).r; \n");
GLSL("color.b = 1.003906 * vec4(texture(texture2, texcoord2)).r; \n");
} else {
GLSL("color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n");
}
GLSL("// color conversion\n");
GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n");
GLSL("color.a = 1.0; \n");
GLSL("// pl_shader_linearize \n");
GL_init();
GLSL("%s\n", gl_version);
GLSL("precision mediump float; \n");
GLSL("layout(location = 0) out vec4 out_color;\n");
GLSL("in vec2 texcoord0; \n");
GLSL("in vec2 texcoord1; \n");
if (Planes == 3)
GLSL("in vec2 texcoord2; \n");
GLSL("uniform mat3 colormatrix; \n");
GLSL("uniform vec3 colormatrix_c; \n");
if (colorspace == AVCOL_SPC_BT2020_NCL)
GLSL("uniform mat3 cms_matrix;\n");
GLSL("uniform sampler2D texture0; \n");
GLSL("uniform sampler2D texture1; \n");
if (Planes == 3)
GLSL("uniform sampler2D texture2; \n");
GLSL("void main() { \n");
GLSL("vec4 color; \n");
if (colorspace == AVCOL_SPC_BT2020_NCL) {
GLSL("color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r; \n");
if (Planes == 3) {
GLSL("color.g = 1.003906 * vec4(texture(texture1, texcoord1)).r; \n");
GLSL("color.b = 1.003906 * vec4(texture(texture2, texcoord2)).r; \n");
} else {
GLSL("color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n");
}
GLSL("// color conversion\n");
GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n");
GLSL("color.a = 1.0; \n");
GLSL("// pl_shader_linearize \n");
GLSL("color.rgb = max(color.rgb, 0.0); \n");
// GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n");
// GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
// GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - vec3(%f)) * vec3(1.0/%f)) + vec3(%f),bvec3(lessThan(vec3(0.5), color.rgb)));\n",HLG_C, HLG_A, HLG_B);
GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - vec3(0.55991073)) * vec3(1.0/0.17883277)) + vec3(0.28466892), bvec3(lessThan(vec3(0.5), color.rgb)));\n");
GLSL("// color mapping \n");
GLSL("color.rgb = cms_matrix * color.rgb; \n");
#ifndef GAMMA
GLSL("// pl_shader_delinearize \n");
GLSL("color.rgb = max(color.rgb, 0.0); \n");
// GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n");
// GLSL("color.rgb = pow(color.rgb, vec3(1.0/2.4)); \n");
GLSL("color.rgb = mix(vec3(0.5) * sqrt(color.rgb), vec3(0.17883277) * log(color.rgb - vec3(0.28466892)) + vec3(0.55991073), bvec3(lessThan(vec3(1.0), color.rgb))); \n");
// GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n");
// GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
// GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - vec3(%f)) * vec3(1.0/%f)) + vec3(%f),bvec3(lessThan(vec3(0.5), color.rgb)));\n",HLG_C, HLG_A, HLG_B);
GLSL("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,exp((color.rgb - vec3(0.55991073)) * vec3(1.0/0.17883277)) + vec3(0.28466892), bvec3(lessThan(vec3(0.5), color.rgb)));\n");
GLSL("// color mapping \n");
GLSL("color.rgb = cms_matrix * color.rgb; \n");
#ifndef GAMMA
GLSL("// pl_shader_delinearize \n");
GLSL("color.rgb = max(color.rgb, 0.0); \n");
// GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n");
// GLSL("color.rgb = pow(color.rgb, vec3(1.0/2.4)); \n");
GLSL("color.rgb = mix(vec3(0.5) * sqrt(color.rgb), vec3(0.17883277) * log(color.rgb - vec3(0.28466892)) + vec3(0.55991073), bvec3(lessThan(vec3(1.0), color.rgb))); \n");
#endif
GLSL("out_color = color; \n");
GLSL("} \n");
}
else {
GLSL("out_color = color; \n");
GLSL("} \n");
} else {
GLSL("color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r; \n");
if (Planes == 3) {
GLSL("color.g = 1.000000 * vec4(texture(texture1, texcoord1)).r;\n");
GLSL("color.b = 1.000000 * vec4(texture(texture2, texcoord2)).r;\n");
} else {
GLSL("color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg; \n");
}
GLSL("// color conversion \n");
GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n");
GLSL("color.a = 1.0; \n");
GLSL("color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r; \n");
if (Planes == 3) {
GLSL("color.g = 1.000000 * vec4(texture(texture1, texcoord1)).r;\n");
GLSL("color.b = 1.000000 * vec4(texture(texture2, texcoord2)).r;\n");
} else {
GLSL("color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg; \n");
}
GLSL("// color conversion \n");
GLSL("color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c; \n");
GLSL("color.a = 1.0; \n");
GLSL("// linearize gamma \n");
GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // linearize gamma
GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
#ifndef GAMMA
GLSL("// delinearize gamma to sRGB \n");
GLSL("color.rgb = max(color.rgb, 0.0); \n");
GLSL("color.rgb = mix(color.rgb * vec3(12.92), vec3(1.055) * pow(color.rgb, vec3(1.0/2.4)) - vec3(0.055), bvec3(lessThanEqual(vec3(0.0031308), color.rgb))); \n");
GLSL("// linearize gamma \n");
GLSL("color.rgb = clamp(color.rgb, 0.0, 1.0); \n"); // linearize gamma
GLSL("color.rgb = pow(color.rgb, vec3(2.4)); \n");
#ifndef GAMMA
GLSL("// delinearize gamma to sRGB \n");
GLSL("color.rgb = max(color.rgb, 0.0); \n");
GLSL("color.rgb = mix(color.rgb * vec3(12.92), vec3(1.055) * pow(color.rgb, vec3(1.0/2.4)) - vec3(0.055), bvec3(lessThanEqual(vec3(0.0031308), color.rgb))); \n");
#endif
GLSL("// color mapping \n");
GLSL("out_color = color; \n");
GLSL("} \n");
}
GLSL("// color mapping \n");
GLSL("out_color = color; \n");
GLSL("} \n");
}
//printf(">%s<",sh);
Debug(3, "vor compile fragment\n");
compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, sh);
compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, sh);
glBindAttribLocation(gl_prog, 0, "vertex_position");
for (n = 0; n < 6; n++) {

222
shaders/KrigBilateral.glsl Normal file
View File

@@ -0,0 +1,222 @@
// KrigBilateral by Shiandow
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3.0 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library.
//!HOOK CHROMA
//!BIND HOOKED
//!BIND LUMA
//!SAVE LOWRES_Y
//!WIDTH LUMA.w
//!WHEN CHROMA.w LUMA.w <
//!DESC KrigBilateral Downscaling Y pass 1
#define offset vec2(0,0)
#define axis 1
#define Kernel(x) dot(vec3(0.42659, -0.49656, 0.076849), cos(vec3(0, 1, 2) * acos(-1.) * (x + 1.)))
vec4 hook() {
// Calculate bounds
float low = ceil((LUMA_pos - CHROMA_pt) * LUMA_size - offset - 0.5)[axis];
float high = floor((LUMA_pos + CHROMA_pt) * LUMA_size - offset - 0.5)[axis];
float W = 0.0;
vec4 avg = vec4(0);
vec2 pos = LUMA_pos;
for (float k = low; k <= high; k++) {
pos[axis] = LUMA_pt[axis] * (k - offset[axis] + 0.5);
float rel = (pos[axis] - LUMA_pos[axis])*CHROMA_size[axis];
float w = Kernel(rel);
vec4 y = textureGrad(LUMA_raw, pos, vec2(0.0), vec2(0.0)).xxxx * LUMA_mul;
y.y *= y.y;
avg += w * y;
W += w;
}
avg /= W;
avg.y = abs(avg.y - pow(avg.x, 2.0));
return avg;
}
//!HOOK CHROMA
//!BIND HOOKED
//!BIND LOWRES_Y
//!SAVE LOWRES_Y
//!WHEN CHROMA.w LUMA.w <
//!DESC KrigBilateral Downscaling Y pass 2
#define offset vec2(0,0)
#define axis 0
#define Kernel(x) dot(vec3(0.42659, -0.49656, 0.076849), cos(vec3(0, 1, 2) * acos(-1.) * (x + 1.)))
vec4 hook() {
// Calculate bounds
float low = ceil((LOWRES_Y_pos - CHROMA_pt) * LOWRES_Y_size - offset - 0.5)[axis];
float high = floor((LOWRES_Y_pos + CHROMA_pt) * LOWRES_Y_size - offset - 0.5)[axis];
float W = 0.0;
vec4 avg = vec4(0);
vec2 pos = LOWRES_Y_pos;
for (float k = low; k <= high; k++) {
pos[axis] = LOWRES_Y_pt[axis] * (k - offset[axis] + 0.5);
float rel = (pos[axis] - LOWRES_Y_pos[axis])*CHROMA_size[axis];
float w = Kernel(rel);
vec4 y = textureGrad(LOWRES_Y_raw, pos, vec2(0.0), vec2(0.0)).xxxx * LOWRES_Y_mul;
y.y *= y.y;
avg += w * y;
W += w;
}
avg /= W;
avg.y = abs(avg.y - pow(avg.x, 2.0)) + LOWRES_Y_texOff(0).y;
return avg;
}
//!HOOK CHROMA
//!BIND HOOKED
//!BIND LUMA
//!BIND LOWRES_Y
//!WIDTH LUMA.w
//!HEIGHT LUMA.h
//!WHEN CHROMA.w LUMA.w <
//!OFFSET ALIGN
//!DESC KrigBilateral Upscaling UV
// -- Convenience --
#define sqr(x) dot(x,x)
#define bitnoise 1.0/(2.0*255.0)
#define noise 0.05//5.0*bitnoise
#define chromaOffset vec2(0.0, 0.0)
// -- Window Size --
#define taps 3
#define even (float(taps) - 2.0 * floor(float(taps) / 2.0) == 0.0)
#define minX int(1.0-ceil(float(taps)/2.0))
#define maxX int(floor(float(taps)/2.0))
#define Kernel(x) (cos(acos(-1.0)*(x)/float(taps))) // Hann kernel
// -- Input processing --
#define GetY(coord) LOWRES_Y_tex(LOWRES_Y_pt*(pos+coord+vec2(0.5))).xy
#define GetUV(coord) CHROMA_tex(CHROMA_pt*(pos+coord+vec2(0.5))).xy
#define N (taps*taps - 1)
#define M(i,j) Mx[min(i,j)*N + max(i,j) - min(i,j)*(min(i,j)+1)/2]
#define C(i,j) (inversesqrt(1.0 + (X[i].y + X[j].y)/localVar) * exp(-0.5*(sqr(X[i].x - X[j].x)/(localVar + X[i].y + X[j].y) + sqr((coords[i] - coords[j])/radius))) + (X[i].x - y) * (X[j].x - y) / localVar)
#define c(i) (inversesqrt(1.0 + X[i].y/localVar) * exp(-0.5*(sqr(X[i].x - y)/(localVar + X[i].y) + sqr((coords[i] - offset)/radius))))
vec4 hook() {
vec2 pos = CHROMA_pos * HOOKED_size - chromaOffset - vec2(0.5);
vec2 offset = pos - (even ? floor(pos) : round(pos));
pos -= offset;
vec2 coords[N+1];
vec4 X[N+1];
float y = LUMA_texOff(0).x;
vec4 total = vec4(0);
coords[0] = vec2(-1,-1); coords[1] = vec2(-1, 0); coords[2] = vec2(-1, 1);
coords[3] = vec2( 0,-1); coords[4] = vec2( 0, 1); coords[5] = vec2( 1,-1);
coords[6] = vec2( 1, 0); coords[7] = vec2( 1, 1); coords[8] = vec2( 0, 0);
for (int i=0; i<N+1; i++) {
X[i] = vec4(GetY(coords[i]), GetUV(coords[i]));
vec2 w = clamp(1.5 - abs(coords[i] - offset), 0.0, 1.0);
total += w.x*w.y*vec4(X[i].x, pow(X[i].x, 2.0), X[i].y, 1.0);
}
total.xyz /= total.w;
float localVar = sqr(noise) + abs(total.y - pow(total.x, 2.0)) + total.z;
float radius = 1.0;
float Mx[N*(N+1)/2];
float b[N];
vec4 interp = X[N];
b[0] = c(0) - c(N) - C(0,N) + C(N,N); M(0, 0) = C(0,0) - C(0,N) - C(0,N) + C(N,N); M(0, 1) = C(0,1) - C(1,N) - C(0,N) + C(N,N); M(0, 2) = C(0,2) - C(2,N) - C(0,N) + C(N,N); M(0, 3) = C(0,3) - C(3,N) - C(0,N) + C(N,N); M(0, 4) = C(0,4) - C(4,N) - C(0,N) + C(N,N); M(0, 5) = C(0,5) - C(5,N) - C(0,N) + C(N,N); M(0, 6) = C(0,6) - C(6,N) - C(0,N) + C(N,N); M(0, 7) = C(0,7) - C(7,N) - C(0,N) + C(N,N);
b[1] = c(1) - c(N) - C(1,N) + C(N,N); M(1, 1) = C(1,1) - C(1,N) - C(1,N) + C(N,N); M(1, 2) = C(1,2) - C(2,N) - C(1,N) + C(N,N); M(1, 3) = C(1,3) - C(3,N) - C(1,N) + C(N,N); M(1, 4) = C(1,4) - C(4,N) - C(1,N) + C(N,N); M(1, 5) = C(1,5) - C(5,N) - C(1,N) + C(N,N); M(1, 6) = C(1,6) - C(6,N) - C(1,N) + C(N,N); M(1, 7) = C(1,7) - C(7,N) - C(1,N) + C(N,N);
b[2] = c(2) - c(N) - C(2,N) + C(N,N); M(2, 2) = C(2,2) - C(2,N) - C(2,N) + C(N,N); M(2, 3) = C(2,3) - C(3,N) - C(2,N) + C(N,N); M(2, 4) = C(2,4) - C(4,N) - C(2,N) + C(N,N); M(2, 5) = C(2,5) - C(5,N) - C(2,N) + C(N,N); M(2, 6) = C(2,6) - C(6,N) - C(2,N) + C(N,N); M(2, 7) = C(2,7) - C(7,N) - C(2,N) + C(N,N);
b[3] = c(3) - c(N) - C(3,N) + C(N,N); M(3, 3) = C(3,3) - C(3,N) - C(3,N) + C(N,N); M(3, 4) = C(3,4) - C(4,N) - C(3,N) + C(N,N); M(3, 5) = C(3,5) - C(5,N) - C(3,N) + C(N,N); M(3, 6) = C(3,6) - C(6,N) - C(3,N) + C(N,N); M(3, 7) = C(3,7) - C(7,N) - C(3,N) + C(N,N);
b[4] = c(4) - c(N) - C(4,N) + C(N,N); M(4, 4) = C(4,4) - C(4,N) - C(4,N) + C(N,N); M(4, 5) = C(4,5) - C(5,N) - C(4,N) + C(N,N); M(4, 6) = C(4,6) - C(6,N) - C(4,N) + C(N,N); M(4, 7) = C(4,7) - C(7,N) - C(4,N) + C(N,N);
b[5] = c(5) - c(N) - C(5,N) + C(N,N); M(5, 5) = C(5,5) - C(5,N) - C(5,N) + C(N,N); M(5, 6) = C(5,6) - C(6,N) - C(5,N) + C(N,N); M(5, 7) = C(5,7) - C(7,N) - C(5,N) + C(N,N);
b[6] = c(6) - c(N) - C(6,N) + C(N,N); M(6, 6) = C(6,6) - C(6,N) - C(6,N) + C(N,N); M(6, 7) = C(6,7) - C(7,N) - C(6,N) + C(N,N);
b[7] = c(7) - c(N) - C(7,N) + C(N,N); M(7, 7) = C(7,7) - C(7,N) - C(7,N) + C(N,N);
b[1] -= b[0] * M(1, 0) / M(0, 0); M(1, 1) -= M(0, 1) * M(1, 0) / M(0, 0); M(1, 2) -= M(0, 2) * M(1, 0) / M(0, 0); M(1, 3) -= M(0, 3) * M(1, 0) / M(0, 0); M(1, 4) -= M(0, 4) * M(1, 0) / M(0, 0); M(1, 5) -= M(0, 5) * M(1, 0) / M(0, 0); M(1, 6) -= M(0, 6) * M(1, 0) / M(0, 0); M(1, 7) -= M(0, 7) * M(1, 0) / M(0, 0);
b[2] -= b[0] * M(2, 0) / M(0, 0); M(2, 2) -= M(0, 2) * M(2, 0) / M(0, 0); M(2, 3) -= M(0, 3) * M(2, 0) / M(0, 0); M(2, 4) -= M(0, 4) * M(2, 0) / M(0, 0); M(2, 5) -= M(0, 5) * M(2, 0) / M(0, 0); M(2, 6) -= M(0, 6) * M(2, 0) / M(0, 0); M(2, 7) -= M(0, 7) * M(2, 0) / M(0, 0);
b[3] -= b[0] * M(3, 0) / M(0, 0); M(3, 3) -= M(0, 3) * M(3, 0) / M(0, 0); M(3, 4) -= M(0, 4) * M(3, 0) / M(0, 0); M(3, 5) -= M(0, 5) * M(3, 0) / M(0, 0); M(3, 6) -= M(0, 6) * M(3, 0) / M(0, 0); M(3, 7) -= M(0, 7) * M(3, 0) / M(0, 0);
b[4] -= b[0] * M(4, 0) / M(0, 0); M(4, 4) -= M(0, 4) * M(4, 0) / M(0, 0); M(4, 5) -= M(0, 5) * M(4, 0) / M(0, 0); M(4, 6) -= M(0, 6) * M(4, 0) / M(0, 0); M(4, 7) -= M(0, 7) * M(4, 0) / M(0, 0);
b[5] -= b[0] * M(5, 0) / M(0, 0); M(5, 5) -= M(0, 5) * M(5, 0) / M(0, 0); M(5, 6) -= M(0, 6) * M(5, 0) / M(0, 0); M(5, 7) -= M(0, 7) * M(5, 0) / M(0, 0);
b[6] -= b[0] * M(6, 0) / M(0, 0); M(6, 6) -= M(0, 6) * M(6, 0) / M(0, 0); M(6, 7) -= M(0, 7) * M(6, 0) / M(0, 0);
b[7] -= b[0] * M(7, 0) / M(0, 0); M(7, 7) -= M(0, 7) * M(7, 0) / M(0, 0);
b[2] -= b[1] * M(2, 1) / M(1, 1); M(2, 2) -= M(1, 2) * M(2, 1) / M(1, 1); M(2, 3) -= M(1, 3) * M(2, 1) / M(1, 1); M(2, 4) -= M(1, 4) * M(2, 1) / M(1, 1); M(2, 5) -= M(1, 5) * M(2, 1) / M(1, 1); M(2, 6) -= M(1, 6) * M(2, 1) / M(1, 1); M(2, 7) -= M(1, 7) * M(2, 1) / M(1, 1);
b[3] -= b[1] * M(3, 1) / M(1, 1); M(3, 3) -= M(1, 3) * M(3, 1) / M(1, 1); M(3, 4) -= M(1, 4) * M(3, 1) / M(1, 1); M(3, 5) -= M(1, 5) * M(3, 1) / M(1, 1); M(3, 6) -= M(1, 6) * M(3, 1) / M(1, 1); M(3, 7) -= M(1, 7) * M(3, 1) / M(1, 1);
b[4] -= b[1] * M(4, 1) / M(1, 1); M(4, 4) -= M(1, 4) * M(4, 1) / M(1, 1); M(4, 5) -= M(1, 5) * M(4, 1) / M(1, 1); M(4, 6) -= M(1, 6) * M(4, 1) / M(1, 1); M(4, 7) -= M(1, 7) * M(4, 1) / M(1, 1);
b[5] -= b[1] * M(5, 1) / M(1, 1); M(5, 5) -= M(1, 5) * M(5, 1) / M(1, 1); M(5, 6) -= M(1, 6) * M(5, 1) / M(1, 1); M(5, 7) -= M(1, 7) * M(5, 1) / M(1, 1);
b[6] -= b[1] * M(6, 1) / M(1, 1); M(6, 6) -= M(1, 6) * M(6, 1) / M(1, 1); M(6, 7) -= M(1, 7) * M(6, 1) / M(1, 1);
b[7] -= b[1] * M(7, 1) / M(1, 1); M(7, 7) -= M(1, 7) * M(7, 1) / M(1, 1);
b[3] -= b[2] * M(3, 2) / M(2, 2); M(3, 3) -= M(2, 3) * M(3, 2) / M(2, 2); M(3, 4) -= M(2, 4) * M(3, 2) / M(2, 2); M(3, 5) -= M(2, 5) * M(3, 2) / M(2, 2); M(3, 6) -= M(2, 6) * M(3, 2) / M(2, 2); M(3, 7) -= M(2, 7) * M(3, 2) / M(2, 2);
b[4] -= b[2] * M(4, 2) / M(2, 2); M(4, 4) -= M(2, 4) * M(4, 2) / M(2, 2); M(4, 5) -= M(2, 5) * M(4, 2) / M(2, 2); M(4, 6) -= M(2, 6) * M(4, 2) / M(2, 2); M(4, 7) -= M(2, 7) * M(4, 2) / M(2, 2);
b[5] -= b[2] * M(5, 2) / M(2, 2); M(5, 5) -= M(2, 5) * M(5, 2) / M(2, 2); M(5, 6) -= M(2, 6) * M(5, 2) / M(2, 2); M(5, 7) -= M(2, 7) * M(5, 2) / M(2, 2);
b[6] -= b[2] * M(6, 2) / M(2, 2); M(6, 6) -= M(2, 6) * M(6, 2) / M(2, 2); M(6, 7) -= M(2, 7) * M(6, 2) / M(2, 2);
b[7] -= b[2] * M(7, 2) / M(2, 2); M(7, 7) -= M(2, 7) * M(7, 2) / M(2, 2);
b[4] -= b[3] * M(4, 3) / M(3, 3); M(4, 4) -= M(3, 4) * M(4, 3) / M(3, 3); M(4, 5) -= M(3, 5) * M(4, 3) / M(3, 3); M(4, 6) -= M(3, 6) * M(4, 3) / M(3, 3); M(4, 7) -= M(3, 7) * M(4, 3) / M(3, 3);
b[5] -= b[3] * M(5, 3) / M(3, 3); M(5, 5) -= M(3, 5) * M(5, 3) / M(3, 3); M(5, 6) -= M(3, 6) * M(5, 3) / M(3, 3); M(5, 7) -= M(3, 7) * M(5, 3) / M(3, 3);
b[6] -= b[3] * M(6, 3) / M(3, 3); M(6, 6) -= M(3, 6) * M(6, 3) / M(3, 3); M(6, 7) -= M(3, 7) * M(6, 3) / M(3, 3);
b[7] -= b[3] * M(7, 3) / M(3, 3); M(7, 7) -= M(3, 7) * M(7, 3) / M(3, 3);
b[5] -= b[4] * M(5, 4) / M(4, 4); M(5, 5) -= M(4, 5) * M(5, 4) / M(4, 4); M(5, 6) -= M(4, 6) * M(5, 4) / M(4, 4); M(5, 7) -= M(4, 7) * M(5, 4) / M(4, 4);
b[6] -= b[4] * M(6, 4) / M(4, 4); M(6, 6) -= M(4, 6) * M(6, 4) / M(4, 4); M(6, 7) -= M(4, 7) * M(6, 4) / M(4, 4);
b[7] -= b[4] * M(7, 4) / M(4, 4); M(7, 7) -= M(4, 7) * M(7, 4) / M(4, 4);
b[6] -= b[5] * M(6, 5) / M(5, 5); M(6, 6) -= M(5, 6) * M(6, 5) / M(5, 5); M(6, 7) -= M(5, 7) * M(6, 5) / M(5, 5);
b[7] -= b[5] * M(7, 5) / M(5, 5); M(7, 7) -= M(5, 7) * M(7, 5) / M(5, 5);
b[7] -= b[6] * M(7, 6) / M(6, 6); M(7, 7) -= M(6, 7) * M(7, 6) / M(6, 6);
b[N-1-0] /= M(N-1-0, N-1-0);
interp += b[N-1-0] * (X[N-1-0] - X[N]);
b[N-1-1] -= M(N-1-1, 7) * b[7]; b[N-1-1] /= M(N-1-1, N-1-1);
interp += b[N-1-1] * (X[N-1-1] - X[N]);
b[N-1-2] -= M(N-1-2, 6) * b[6]; b[N-1-2] -= M(N-1-2, 7) * b[7]; b[N-1-2] /= M(N-1-2, N-1-2);
interp += b[N-1-2] * (X[N-1-2] - X[N]);
b[N-1-3] -= M(N-1-3, 5) * b[5]; b[N-1-3] -= M(N-1-3, 6) * b[6]; b[N-1-3] -= M(N-1-3, 7) * b[7]; b[N-1-3] /= M(N-1-3, N-1-3);
interp += b[N-1-3] * (X[N-1-3] - X[N]);
b[N-1-4] -= M(N-1-4, 4) * b[4]; b[N-1-4] -= M(N-1-4, 5) * b[5]; b[N-1-4] -= M(N-1-4, 6) * b[6]; b[N-1-4] -= M(N-1-4, 7) * b[7]; b[N-1-4] /= M(N-1-4, N-1-4);
interp += b[N-1-4] * (X[N-1-4] - X[N]);
b[N-1-5] -= M(N-1-5, 3) * b[3]; b[N-1-5] -= M(N-1-5, 4) * b[4]; b[N-1-5] -= M(N-1-5, 5) * b[5]; b[N-1-5] -= M(N-1-5, 6) * b[6]; b[N-1-5] -= M(N-1-5, 7) * b[7]; b[N-1-5] /= M(N-1-5, N-1-5);
interp += b[N-1-5] * (X[N-1-5] - X[N]);
b[N-1-6] -= M(N-1-6, 2) * b[2]; b[N-1-6] -= M(N-1-6, 3) * b[3]; b[N-1-6] -= M(N-1-6, 4) * b[4]; b[N-1-6] -= M(N-1-6, 5) * b[5]; b[N-1-6] -= M(N-1-6, 6) * b[6]; b[N-1-6] -= M(N-1-6, 7) * b[7]; b[N-1-6] /= M(N-1-6, N-1-6);
interp += b[N-1-6] * (X[N-1-6] - X[N]);
b[N-1-7] -= M(N-1-7, 1) * b[1]; b[N-1-7] -= M(N-1-7, 2) * b[2]; b[N-1-7] -= M(N-1-7, 3) * b[3]; b[N-1-7] -= M(N-1-7, 4) * b[4]; b[N-1-7] -= M(N-1-7, 5) * b[5]; b[N-1-7] -= M(N-1-7, 6) * b[6]; b[N-1-7] -= M(N-1-7, 7) * b[7]; b[N-1-7] /= M(N-1-7, N-1-7);
interp += b[N-1-7] * (X[N-1-7] - X[N]);
return interp.zwxx;
}

View File

@@ -0,0 +1,206 @@
// vim: set ft=glsl:
/*
LumaSharpenHook 0.3
original hlsl by Christian Cann Schuldt Jensen ~ CeeJay.dk
port to glsl by Anon
It blurs the original pixel with the surrounding pixels and then subtracts this blur to sharpen the image.
It does this in luma to avoid color artifacts and allows limiting the maximum sharpning to avoid or lessen halo artifacts.
This is similar to using Unsharp Mask in Photoshop.
*/
// -- Hooks --
//!HOOK LUMA
//!BIND HOOKED
// -- Sharpening --
#define sharp_strength 0.30 //[0.10 to 3.00] Strength of the sharpening
#define sharp_clamp 0.035 //[0.000 to 1.000] Limits maximum amount of sharpening a pixel recieves - Default is 0.035
// -- Advanced sharpening settings --
#define pattern 2 //[1|2|3|4] Choose a sample pattern. 1 = Fast, 2 = Normal, 3 = Wider, 4 = Pyramid shaped.
//[8|9] Experimental slower patterns. 8 = 9 tap 9 fetch gaussian, 9 = 9 tap 9 fetch high pass.
#define offset_bias 1.0 //[0.0 to 6.0] Offset bias adjusts the radius of the sampling pattern.
vec4 hook(){
vec4 colorInput = LUMA_tex(LUMA_pos);
//We are on luma plane: xyzw = [luma_val, 0.0, 0.0, 1.0]
float ori = colorInput.x;
// -- Combining the strength and luma multipliers --
float sharp_strength_luma = sharp_strength; //I'll be combining even more multipliers with it later on
float px = 1.0;
float py = 1.0;
// Sampling patterns
// [ NW, , NE ] Each texture lookup (except ori)
// [ ,ori, ] samples 4 pixels
// [ SW, , SE ]
// -- Pattern 1 -- A (fast) 7 tap gaussian using only 2+1 texture fetches.
#if pattern == 1
// -- Gaussian filter --
// [ 1/9, 2/9, ] [ 1 , 2 , ]
// [ 2/9, 8/9, 2/9] = [ 2 , 8 , 2 ]
// [ , 2/9, 1/9] [ , 2 , 1 ]
px = (px / 3.0) * offset_bias;
py = (py / 3.0) * offset_bias;
float blur_ori = LUMA_texOff(vec2(px,py)).x; // North West
blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South East
//blur_ori += LUMA_texOff(vec2(px,py)).x; // North East
//blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West
blur_ori *= 0.5; //Divide by the number of texture fetches
sharp_strength_luma *= 1.5; // Adjust strength to aproximate the strength of pattern 2
#endif
// -- Pattern 2 -- A 9 tap gaussian using 4+1 texture fetches.
#if pattern == 2
// -- Gaussian filter --
// [ .25, .50, .25] [ 1 , 2 , 1 ]
// [ .50, 1, .50] = [ 2 , 4 , 2 ]
// [ .25, .50, .25] [ 1 , 2 , 1 ]
px = px * 0.5 * offset_bias;
py = py * 0.5 * offset_bias;
float blur_ori = LUMA_texOff(vec2(px,-py)).x; // South East
blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West
blur_ori += LUMA_texOff(vec2(px,py)).x; // North East
blur_ori += LUMA_texOff(vec2(-px,py)).x; // North West
blur_ori *= 0.25; // ( /= 4) Divide by the number of texture fetches
#endif
// -- Pattern 3 -- An experimental 17 tap gaussian using 4+1 texture fetches.
#if pattern == 3
// -- Gaussian filter --
// [ , 4 , 6 , , ]
// [ ,16 ,24 ,16 , 4 ]
// [ 6 ,24 , ,24 , 6 ]
// [ 4 ,16 ,24 ,16 , ]
// [ , , 6 , 4 , ]
px = px * offset_bias;
py = py * offset_bias;
float blur_ori = LUMA_texOff(vec2(0.4*px,-1.2*py)).x; // South South East
blur_ori += LUMA_texOff(vec2(-1.2*px,-0.4*py)).x; // West South West
blur_ori += LUMA_texOff(vec2(1.2*px,0.4*py)).x; // East North East
blur_ori += LUMA_texOff(vec2(-0.4*px,1.2*py)).x; // North North West
blur_ori *= 0.25; // ( /= 4) Divide by the number of texture fetches
sharp_strength_luma *= 0.51;
#endif
// -- Pattern 4 -- A 9 tap high pass (pyramid filter) using 4+1 texture fetches.
#if pattern == 4
// -- Gaussian filter --
// [ .50, .50, .50] [ 1 , 1 , 1 ]
// [ .50, , .50] = [ 1 , , 1 ]
// [ .50, .50, .50] [ 1 , 1 , 1 ]
float blur_ori = LUMA_texOff(vec2(0.5 * px,-py * offset_bias)).x; // South South East
blur_ori += LUMA_texOff(vec2(offset_bias * -px,0.5 * -py)).x; // West South West
blur_ori += LUMA_texOff(vec2(offset_bias * px,0.5 * py)).x; // East North East
blur_ori += LUMA_texOff(vec2(0.5 * -px,py * offset_bias)).x; // North North West
//blur_ori += (2.0 * ori); // Probably not needed. Only serves to lessen the effect.
blur_ori *= 0.25; //Divide by the number of texture fetches
sharp_strength_luma *= 0.666; // Adjust strength to aproximate the strength of pattern 2
#endif
// -- Pattern 8 -- A (slower) 9 tap gaussian using 9 texture fetches.
#if pattern == 8
// -- Gaussian filter --
// [ 1 , 2 , 1 ]
// [ 2 , 4 , 2 ]
// [ 1 , 2 , 1 ]
px = px * offset_bias;
py = py * offset_bias;
float blur_ori = LUMA_texOff(vec2(-px,py)).x; // North West
blur_ori += LUMA_texOff(vec2(px,-py)).x; // South East
blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West
blur_ori += LUMA_texOff(vec2(px,py)).x; // North East
float blur_ori2 = LUMA_texOff(vec2(0.0,py)).x; // North
blur_ori2 += LUMA_texOff(vec2(0.0,-py)).x; // South
blur_ori2 += LUMA_texOff(vec2(-px,0.0)).x; // West
blur_ori2 += LUMA_texOff(vec2(px,0.0)).x; // East
blur_ori2 *= 2.0;
blur_ori += blur_ori2;
blur_ori += (ori * 4.0); // Probably not needed. Only serves to lessen the effect.
// dot()s with gaussian strengths here?
blur_ori /= 16.0; //Divide by the number of texture fetches
sharp_strength_luma *= 0.75; // Adjust strength to aproximate the strength of pattern 2
#endif
// -- Pattern 9 -- A (slower) 9 tap high pass using 9 texture fetches.
#if pattern == 9
// -- Gaussian filter --
// [ 1 , 1 , 1 ]
// [ 1 , 1 , 1 ]
// [ 1 , 1 , 1 ]
px = px * offset_bias;
py = py * offset_bias;
float blur_ori = LUMA_texOff(vec2(-px,py)).x; // North West
blur_ori += LUMA_texOff(vec2(px,-py)).x; // South East
blur_ori += LUMA_texOff(vec2(-px,-py)).x; // South West
blur_ori += LUMA_texOff(vec2(px,py)).x; // North East
blur_ori += ori; // Probably not needed. Only serves to lessen the effect.
blur_ori += LUMA_texOff(vec2(0.0,py)).x; // North
blur_ori += LUMA_texOff(vec2(0.0,-py)).x; // South
blur_ori += LUMA_texOff(vec2(-px,0.0)).x; // West
blur_ori += LUMA_texOff(vec2(px,0.0)).x; // East
blur_ori /= 9.0; //Divide by the number of texture fetches
sharp_strength_luma *= (8.0/9.0); // Adjust strength to aproximate the strength of pattern 2
#endif
// -- Calculate the sharpening --
float sharp = ori - blur_ori; //Subtracting the blurred image from the original image
// -- Adjust strength of the sharpening and clamp it--
float sharp_strength_luma_clamp = sharp_strength_luma / (2.0 * sharp_clamp); //Roll part of the clamp into the dot
float sharp_luma = clamp((sharp * sharp_strength_luma_clamp + 0.5), 0.0,1.0 ); //Calculate the luma, adjust the strength, scale up and clamp
sharp_luma = (sharp_clamp * 2.0) * sharp_luma - sharp_clamp; //scale down
// -- Combining the values to get the final sharpened pixel --
colorInput.x = colorInput.x + sharp_luma; // Add the sharpening to the input color.
return clamp(colorInput, 0.0,1.0);
}

View File

@@ -0,0 +1,246 @@
// Copyright (c) 2015-2018, bacondither
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer
// in this position and unchanged.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Adaptive sharpen - version 2018-04-14 - (requires ps >= 3.0)
// Tuned for use post resize
//!HOOK SCALED
//!BIND HOOKED
//!SAVE ASSD
//!COMPONENTS 2
//!DESC adaptive-sharpen
//--------------------------------------- Settings ------------------------------------------------
#define curve_height 1.6 // Main control of sharpening strength [>0]
// 0.3 <-> 2.0 is a reasonable range of values
// Defined values under this row are "optimal" DO NOT CHANGE IF YOU DO NOT KNOW WHAT YOU ARE DOING!
#define curveslope 0.5 // Sharpening curve slope, high edge values
#define L_overshoot 0.003 // Max light overshoot before compression [>0.001]
#define L_compr_low 0.167 // Light compression, default (0.169=~9x)
#define L_compr_high 0.334 // Light compression, surrounded by edges (0.337=~4x)
#define D_overshoot 0.009 // Max dark overshoot before compression [>0.001]
#define D_compr_low 0.250 // Dark compression, default (0.253=~6x)
#define D_compr_high 0.500 // Dark compression, surrounded by edges (0.504=~2.5x)
#define scale_lim 0.1 // Abs max change before compression (0.1=+-10%)
#define scale_cs 0.056 // Compression slope above scale_lim
#define pm_p sat(1.0/curve_height) // Power mean p-value [>0-1.0]
//-------------------------------------------------------------------------------------------------
// Soft limit
#define soft_lim(v,s) ( (exp(2.0*min(abs(v), s*24.0)/s) - 1.0)/(exp(2.0*min(abs(v), s*24.0)/s) + 1.0)*s )
// Weighted power mean
#define wpmean(a,b,c) ( pow((c*pow(abs(a), pm_p) + (1.0-c)*pow(b, pm_p)), (1.0/pm_p)) )
// Get destination pixel values
#define get(x,y) ( HOOKED_texOff(vec2(x, y)).rgb )
#define sat(x) ( clamp(x, 0.0, 1.0) )
// Colour to luma, fast approx gamma, avg of rec. 709 & 601 luma coeffs
#define CtL(RGB) ( sqrt(dot(vec3(0.2558, 0.6511, 0.0931), pow(sat(RGB), vec3(2.0)))) )
// Center pixel diff
#define mdiff(a,b,c,d,e,f,g) ( abs(luma[g]-luma[a]) + abs(luma[g]-luma[b]) \
+ abs(luma[g]-luma[c]) + abs(luma[g]-luma[d]) \
+ 0.5*(abs(luma[g]-luma[e]) + abs(luma[g]-luma[f])) )
#define b_diff(pix) ( abs(blur-c[pix]) )
vec4 hook() {
vec4 o = HOOKED_tex(HOOKED_pos);
// Get points, saturate colour data in c[0]
// [ c22 ]
// [ c24, c9, c23 ]
// [ c21, c1, c2, c3, c18 ]
// [ c19, c10, c4, c0, c5, c11, c16 ]
// [ c20, c6, c7, c8, c17 ]
// [ c15, c12, c14 ]
// [ c13 ]
vec3 c[25] = vec3[](sat(o.rgb), get(-1,-1), get( 0,-1), get( 1,-1), get(-1, 0),
get( 1, 0), get(-1, 1), get( 0, 1), get( 1, 1), get( 0,-2),
get(-2, 0), get( 2, 0), get( 0, 2), get( 0, 3), get( 1, 2),
get(-1, 2), get( 3, 0), get( 2, 1), get( 2,-1), get(-3, 0),
get(-2, 1), get(-2,-1), get( 0,-3), get( 1,-2), get(-1,-2));
// Blur, gauss 3x3
vec3 blur = (2.0 * (c[2]+c[4]+c[5]+c[7]) + (c[1]+c[3]+c[6]+c[8]) + 4.0 * c[0]) / 16.0;
// Contrast compression, center = 0.5, scaled to 1/3
float c_comp = sat(0.266666681f + 0.9*exp2(dot(blur, vec3(-7.4/3.0))));
// Edge detection
// Relative matrix weights
// [ 1 ]
// [ 4, 5, 4 ]
// [ 1, 5, 6, 5, 1 ]
// [ 4, 5, 4 ]
// [ 1 ]
float edge = length( 1.38*b_diff(0)
+ 1.15*(b_diff(2) + b_diff(4) + b_diff(5) + b_diff(7))
+ 0.92*(b_diff(1) + b_diff(3) + b_diff(6) + b_diff(8))
+ 0.23*(b_diff(9) + b_diff(10) + b_diff(11) + b_diff(12)) ) * c_comp;
// RGB to luma
float c0_Y = CtL(c[0]);
float luma[25] = float[](c0_Y, CtL(c[1]), CtL(c[2]), CtL(c[3]), CtL(c[4]), CtL(c[5]), CtL(c[6]),
CtL(c[7]), CtL(c[8]), CtL(c[9]), CtL(c[10]), CtL(c[11]), CtL(c[12]),
CtL(c[13]), CtL(c[14]), CtL(c[15]), CtL(c[16]), CtL(c[17]), CtL(c[18]),
CtL(c[19]), CtL(c[20]), CtL(c[21]), CtL(c[22]), CtL(c[23]), CtL(c[24]));
// Precalculated default squared kernel weights
const vec3 w1 = vec3(0.5, 1.0, 1.41421356237); // 0.25, 1.0, 2.0
const vec3 w2 = vec3(0.86602540378, 1.0, 0.54772255751); // 0.75, 1.0, 0.3
// Transition to a concave kernel if the center edge val is above thr
vec3 dW = pow(mix( w1, w2, smoothstep( 0.3, 0.8, edge)), vec3(2.0));
float mdiff_c0 = 0.02 + 3.0*( abs(luma[0]-luma[2]) + abs(luma[0]-luma[4])
+ abs(luma[0]-luma[5]) + abs(luma[0]-luma[7])
+ 0.25*(abs(luma[0]-luma[1]) + abs(luma[0]-luma[3])
+abs(luma[0]-luma[6]) + abs(luma[0]-luma[8])) );
// Use lower weights for pixels in a more active area relative to center pixel area
// This results in narrower and less visible overshoots around sharp edges
float weights[12] = float[](( min((mdiff_c0/mdiff(24, 21, 2, 4, 9, 10, 1)), dW.y) ),
( dW.x ),
( min((mdiff_c0/mdiff(23, 18, 5, 2, 9, 11, 3)), dW.y) ),
( dW.x ),
( dW.x ),
( min((mdiff_c0/mdiff(4, 20, 15, 7, 10, 12, 6)), dW.y) ),
( dW.x ),
( min((mdiff_c0/mdiff(5, 7, 17, 14, 12, 11, 8)), dW.y) ),
( min((mdiff_c0/mdiff(2, 24, 23, 22, 1, 3, 9)), dW.z) ),
( min((mdiff_c0/mdiff(20, 19, 21, 4, 1, 6, 10)), dW.z) ),
( min((mdiff_c0/mdiff(17, 5, 18, 16, 3, 8, 11)), dW.z) ),
( min((mdiff_c0/mdiff(13, 15, 7, 14, 6, 8, 12)), dW.z) ));
weights[0] = (max(max((weights[8] + weights[9])/4.0, weights[0]), 0.25) + weights[0])/2.0;
weights[2] = (max(max((weights[8] + weights[10])/4.0, weights[2]), 0.25) + weights[2])/2.0;
weights[5] = (max(max((weights[9] + weights[11])/4.0, weights[5]), 0.25) + weights[5])/2.0;
weights[7] = (max(max((weights[10] + weights[11])/4.0, weights[7]), 0.25) + weights[7])/2.0;
// Calculate the negative part of the laplace kernel
float weightsum = 0.0;
float neg_laplace = 0.0;
for (int pix = 0; pix < 12; ++pix)
{
neg_laplace += luma[pix+1]*weights[pix];
weightsum += weights[pix];
}
neg_laplace = neg_laplace / weightsum;
// Compute sharpening magnitude function
float sharpen_val = (curve_height/(curve_height*curveslope*pow((edge), 3.5) + 0.625));
// Calculate sharpening diff and scale
float sharpdiff = (c0_Y - neg_laplace)*(sharpen_val + 0.01);
// Calculate local near min & max, partial sort
float temp;
for (int i1 = 0; i1 < 24; i1 += 2)
{
temp = luma[i1];
luma[i1] = min(luma[i1], luma[i1+1]);
luma[i1+1] = max(temp, luma[i1+1]);
}
for (int i2 = 24; i2 > 0; i2 -= 2)
{
temp = luma[0];
luma[0] = min(luma[0], luma[i2]);
luma[i2] = max(temp, luma[i2]);
temp = luma[24];
luma[24] = max(luma[24], luma[i2-1]);
luma[i2-1] = min(temp, luma[i2-1]);
}
for (int i1 = 1; i1 < 24-1; i1 += 2)
{
temp = luma[i1];
luma[i1] = min(luma[i1], luma[i1+1]);
luma[i1+1] = max(temp, luma[i1+1]);
}
for (int i2 = 24-1; i2 > 1; i2 -= 2)
{
temp = luma[1];
luma[1] = min(luma[1], luma[i2]);
luma[i2] = max(temp, luma[i2]);
temp = luma[24-1];
luma[24-1] = max(luma[24-1], luma[i2-1]);
luma[i2-1] = min(temp, luma[i2-1]);
}
float nmax = (max(luma[23], c0_Y)*3.0 + luma[24])/4.0;
float nmin = (min(luma[1], c0_Y)*3.0 + luma[0])/4.0;
// Calculate tanh scale factors
float min_dist = min(abs(nmax - c0_Y), abs(c0_Y - nmin));
float pos_scale = min_dist + min(L_overshoot, 1.0001 - min_dist - c0_Y);
float neg_scale = min_dist + min(D_overshoot, 0.0001 + c0_Y - min_dist);
pos_scale = min(pos_scale, scale_lim*(1.0 - scale_cs) + pos_scale*scale_cs);
neg_scale = min(neg_scale, scale_lim*(1.0 - scale_cs) + neg_scale*scale_cs);
// Soft limited anti-ringing with tanh, wpmean to control compression slope
sharpdiff = wpmean(max(sharpdiff, 0.0), soft_lim( max(sharpdiff, 0.0), pos_scale ), L_compr_low )
- wpmean(min(sharpdiff, 0.0), soft_lim( min(sharpdiff, 0.0), neg_scale ), D_compr_low );
return vec4(sharpdiff, c0_Y, 0, 1);
}
//!HOOK SCALED
//!BIND HOOKED
//!BIND ASSD
//!DESC adaptive-sharpen equalization
#define video_level_out false // True to preserve BTB & WTW (minor summation error)
// Normally it should be set to false
#define SD(x,y) ASSD_texOff(vec2(x,y)).r
vec4 hook() {
vec4 o = HOOKED_texOff(0);
float sharpdiff = SD( 0, 0) - 0.6 * 0.25 * (SD(-0.5,-0.5) + SD( 0.5,-0.5) + SD(-0.5, 0.5) + SD( 0.5, 0.5));
float c0_Y = ASSD_texOff(vec2(0)).g;
float sharpdiff_lim = clamp(c0_Y + sharpdiff, 0.0, 1.0) - c0_Y;
float satmul = (c0_Y + max(sharpdiff_lim*0.9, sharpdiff_lim)*1.03 + 0.03)/(c0_Y + 0.03);
vec3 res = c0_Y + (sharpdiff_lim*3 + sharpdiff)/4 + (clamp(o.rgb, 0.0, 1.0) - c0_Y)*satmul;
o.rgb = video_level_out == true ? res + o.rgb - clamp(o.rgb, 0.0, 1.0) : res;
return o;
}

41
shaders/filmgrain.glsl Normal file
View File

@@ -0,0 +1,41 @@
//!HOOK LUMA
//!BIND HOOKED
//!DESC gaussian film grain
#normal value is 0.05 changed for demo purposes
#define INTENSITY 0.55
float permute(float x)
{
x = (34.0 * x + 1.0) * x;
return fract(x * 1.0/289.0) * 289.0;
}
float rand(inout float state)
{
state = permute(state);
return fract(state * 1.0/41.0);
}
vec4 hook()
{
vec3 m = vec3(HOOKED_pos, random) + vec3(1.0);
float state = permute(permute(m.x) + m.y) + m.z;
const float a0 = 0.151015505647689;
const float a1 = -0.5303572634357367;
const float a2 = 1.365020122861334;
const float b0 = 0.132089632343748;
const float b1 = -0.7607324991323768;
float p = 0.95 * rand(state) + 0.025;
float q = p - 0.5;
float r = q * q;
float grain = q * (a2 + (a1 * r + a0) / (r*r + b1*r + b0));
grain *= 0.255121822830526; // normalize to [-1,1)
vec4 color = HOOKED_tex(HOOKED_pos);
color.rgb += vec3(INTENSITY * grain);
return color;
}

View File

@@ -61,7 +61,7 @@ extern "C"
/// vdr-plugin version number.
/// Makefile extracts the version number for generating the file name
/// for the distribution archive.
static const char *const VERSION = "3.1.0"
static const char *const VERSION = "3.3.1"
#ifdef GIT_REV
"-GIT" GIT_REV
#endif
@@ -106,7 +106,7 @@ static int ConfigVideoBrightness; ///< config video brightness
static int ConfigVideoContrast = 100; ///< config video contrast
static int ConfigVideoSaturation = 100; ///< config video saturation
static int ConfigVideoHue; ///< config video hue
static int ConfigGamma; ///< config Gamma
static int ConfigGamma=100; ///< config Gamma
static int ConfigTargetColorSpace; ///< config Target Colrospace
static int ConfigScalerTest; /// Test for Scalers
static int ConfigColorBlindness;
@@ -136,11 +136,6 @@ static int ConfigVideoCutTopBottom[RESOLUTIONS];
/// config cut left and right pixels
static int ConfigVideoCutLeftRight[RESOLUTIONS];
static int ConfigAutoCropEnabled; ///< auto crop detection enabled
static int ConfigAutoCropInterval; ///< auto crop detection interval
static int ConfigAutoCropDelay; ///< auto crop detection delay
static int ConfigAutoCropTolerance; ///< auto crop detection tolerance
static int ConfigVideoAudioDelay; ///< config audio delay
static char ConfigAudioDrift; ///< config audio drift
static char ConfigAudioPassthrough; ///< config audio pass-through mask
@@ -997,10 +992,6 @@ class cMenuSetupSoft:public cMenuSetupPage
int CutTopBottom[RESOLUTIONS];
int CutLeftRight[RESOLUTIONS];
int AutoCropInterval;
int AutoCropDelay;
int AutoCropTolerance;
int Audio;
int AudioDelay;
int AudioDrift;
@@ -1096,10 +1087,10 @@ void cMenuSetupSoft::Create(void)
"auto", "1920x1080", "1280x720", "custom",
};
static const char *const video_display_formats_4_3[] = {
"pan&scan", "letterbox", "center cut-out",
"pan&scan", "letterbox", "center cut-out", "original"
};
static const char *const video_display_formats_16_9[] = {
"pan&scan", "pillarbox", "center cut-out",
"pan&scan", "pillarbox", "center cut-out", "original"
};
#ifdef YADIF
static const char *const deinterlace[] = {
@@ -1133,7 +1124,7 @@ void cMenuSetupSoft::Create(void)
static char *scalingtest[100];
if (scalers == 0) {
scalingtest[0] = (char *) "Off";
scalingtest[0] = (char *)"Off";
for (scalers = 0; pl_named_filters[scalers].filter != NULL; scalers++) {
scaling[scalers] = (char *)pl_named_filters[scalers].name;
scalingtest[scalers + 1] = (char *)pl_named_filters[scalers].name;
@@ -1182,9 +1173,9 @@ void cMenuSetupSoft::Create(void)
Add(new cMenuEditBoolItem(tr("Enable Screensaver(DPMS) at black screen"), &EnableDPMSatBlackScreen,
trVDR("no"), trVDR("yes")));
#endif
Add(new cMenuEditStraItem(trVDR("4:3 video display format"), &Video4to3DisplayFormat, 3,
Add(new cMenuEditStraItem(trVDR("4:3 video display format"), &Video4to3DisplayFormat, 4,
video_display_formats_4_3));
Add(new cMenuEditStraItem(trVDR("16:9+other video display format"), &VideoOtherDisplayFormat, 3,
Add(new cMenuEditStraItem(trVDR("16:9+other video display format"), &VideoOtherDisplayFormat, 4,
video_display_formats_16_9));
#if 0
@@ -1243,15 +1234,6 @@ void cMenuSetupSoft::Create(void)
Add(new cMenuEditIntItem(tr("Cut left and right (pixel)"), &CutLeftRight[i], 0, 250));
}
}
#ifdef USE_AUTOCROP
//
// auto-crop
//
Add(SeparatorItem(tr("Auto-crop")));
Add(new cMenuEditIntItem(tr("Autocrop interval (frames)"), &AutoCropInterval, 0, 200, tr("off")));
Add(new cMenuEditIntItem(tr("Autocrop delay (n * interval)"), &AutoCropDelay, 0, 200));
Add(new cMenuEditIntItem(tr("Autocrop tolerance (pixel)"), &AutoCropTolerance, 0, 32));
#endif
}
//
// audio
@@ -1261,12 +1243,15 @@ void cMenuSetupSoft::Create(void)
if (Audio) {
Add(new cMenuEditIntItem(tr("Audio/Video delay (ms)"), &AudioDelay, -1000, 1000));
Add(new cMenuEditStraItem(tr("Audio drift correction"), &AudioDrift, 4, audiodrift));
Add(new cMenuEditBoolItem(tr("Pass-through default"), &AudioPassthroughDefault, trVDR("off"), trVDR("on")));
Add(new cMenuEditBoolItem(tr("\040\040PCM pass-through"), &AudioPassthroughPCM, trVDR("no"), trVDR("yes")));
Add(new cMenuEditBoolItem(tr("\040\040AC-3 pass-through"), &AudioPassthroughAC3, trVDR("no"), trVDR("yes")));
Add(new cMenuEditBoolItem(tr("\040\040E-AC-3 pass-through"), &AudioPassthroughEAC3, trVDR("no"),
trVDR("yes")));
Add(new cMenuEditBoolItem(tr("Enable (E-)AC-3 (decoder) downmix"), &AudioDownmix, trVDR("no"), trVDR("yes")));
if (AudioPassthroughDefault) {
Add(new cMenuEditBoolItem(tr("\040\040PCM pass-through"), &AudioPassthroughPCM, trVDR("no"), trVDR("yes")));
Add(new cMenuEditBoolItem(tr("\040\040AC-3 pass-through"), &AudioPassthroughAC3, trVDR("no"), trVDR("yes")));
Add(new cMenuEditBoolItem(tr("\040\040E-AC-3 pass-through"), &AudioPassthroughEAC3, trVDR("no"),trVDR("yes")));
} else {
Add(new cMenuEditBoolItem(tr("Enable (E-)AC-3 (decoder) downmix"), &AudioDownmix, trVDR("no"), trVDR("yes")));
}
Add(new cMenuEditBoolItem(tr("Volume control"), &AudioSoftvol, tr("Hardware"), tr("Software")));
Add(new cMenuEditBoolItem(tr("Enable normalize volume"), &AudioNormalize, trVDR("no"), trVDR("yes")));
Add(new cMenuEditIntItem(tr(" Max normalize factor (/1000)"), &AudioMaxNormalize, 0, 10000));
@@ -1322,10 +1307,12 @@ eOSState cMenuSetupSoft::ProcessKey(eKeys key)
int old_osd_size;
int old_resolution_shown[RESOLUTIONS];
int i;
int old_pass;
old_general = General;
old_video = Video;
old_audio = Audio;
old_pass = AudioPassthroughDefault;
#ifdef USE_PIP
old_pip = Pip;
#endif
@@ -1340,6 +1327,7 @@ eOSState cMenuSetupSoft::ProcessKey(eKeys key)
#ifdef USE_PIP
|| old_pip != Pip
#endif
|| old_pass != AudioPassthroughDefault
|| old_osd_size != OsdSize) {
Create(); // update menu
} else {
@@ -1428,13 +1416,6 @@ cMenuSetupSoft::cMenuSetupSoft(void)
CutTopBottom[i] = ConfigVideoCutTopBottom[i];
CutLeftRight[i] = ConfigVideoCutLeftRight[i];
}
//
// auto-crop
//
AutoCropInterval = ConfigAutoCropInterval;
AutoCropDelay = ConfigAutoCropDelay;
AutoCropTolerance = ConfigAutoCropTolerance;
//
// audio
//
@@ -1591,12 +1572,6 @@ void cMenuSetupSoft::Store(void)
VideoSetCutTopBottom(ConfigVideoCutTopBottom);
VideoSetCutLeftRight(ConfigVideoCutLeftRight);
SetupStore("AutoCrop.Interval", ConfigAutoCropInterval = AutoCropInterval);
SetupStore("AutoCrop.Delay", ConfigAutoCropDelay = AutoCropDelay);
SetupStore("AutoCrop.Tolerance", ConfigAutoCropTolerance = AutoCropTolerance);
VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, ConfigAutoCropTolerance);
ConfigAutoCropEnabled = ConfigAutoCropInterval != 0;
SetupStore("AudioDelay", ConfigVideoAudioDelay = AudioDelay);
VideoSetAudioDelay(ConfigVideoAudioDelay);
SetupStore("AudioDrift", ConfigAudioDrift = AudioDrift);
@@ -2153,10 +2128,10 @@ void cSoftHdMenu::Create(void)
int dropped;
int counter;
float frametime;
int width,height;
int color;
int eotf;
char *colorstr, *eotfstr;
int width, height;
int color;
int eotf;
char *colorstr, *eotfstr;
current = Current(); // get current menu item index
Clear(); // clear the menu
@@ -2191,8 +2166,8 @@ void cSoftHdMenu::Create(void)
#endif
Add(new cOsdItem(NULL, osUnknown, false));
Add(new cOsdItem(NULL, osUnknown, false));
GetStats(&missed, &duped, &dropped, &counter, &frametime, &width, &height, &color,&eotf);
switch (color) {
GetStats(&missed, &duped, &dropped, &counter, &frametime, &width, &height, &color, &eotf);
switch (color) {
case AVCOL_SPC_RGB:
colorstr = strdup("BT 601");
eotfstr = strdup("BT 1886");
@@ -2213,8 +2188,9 @@ void cSoftHdMenu::Create(void)
}
Add(new cOsdItem(cString::sprintf(tr(" Frames missed(%d) duped(%d) dropped(%d) total(%d)"), missed, duped, dropped,
counter), osUnknown, false));
Add(new cOsdItem(cString::sprintf(tr(" Video %dx%d Color: %s Gamma: %s"), width, height, colorstr, eotfstr), osUnknown, false));
// Add(new cOsdItem(cString::sprintf(tr(" Frame Process time %2.2fms"), frametime), osUnknown, false));
Add(new cOsdItem(cString::sprintf(tr(" Video %dx%d Color: %s Gamma: %s"), width, height, colorstr, eotfstr),
osUnknown, false));
// Add(new cOsdItem(cString::sprintf(tr(" Frame Process time %2.2fms"), frametime), osUnknown, false));
SetCurrent(Get(current)); // restore selected menu entry
Display(); // display build menu
}
@@ -2297,33 +2273,6 @@ static void HandleHotkey(int code)
case 22: // toggle full screen
VideoSetFullscreen(-1);
break;
case 23: // disable auto-crop
ConfigAutoCropEnabled = 0;
VideoSetAutoCrop(0, ConfigAutoCropDelay, ConfigAutoCropTolerance);
Skins.QueueMessage(mtInfo, tr("auto-crop disabled and freezed"));
break;
case 24: // enable auto-crop
ConfigAutoCropEnabled = 1;
if (!ConfigAutoCropInterval) {
ConfigAutoCropInterval = 50;
}
VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, ConfigAutoCropTolerance);
Skins.QueueMessage(mtInfo, tr("auto-crop enabled"));
break;
case 25: // toggle auto-crop
ConfigAutoCropEnabled ^= 1;
// no interval configured, use some default
if (!ConfigAutoCropInterval) {
ConfigAutoCropInterval = 50;
}
VideoSetAutoCrop(ConfigAutoCropEnabled * ConfigAutoCropInterval, ConfigAutoCropDelay,
ConfigAutoCropTolerance);
if (ConfigAutoCropEnabled) {
Skins.QueueMessage(mtInfo, tr("auto-crop enabled"));
} else {
Skins.QueueMessage(mtInfo, tr("auto-crop disabled and freezed"));
}
break;
case 30: // change 4:3 -> window mode
case 31:
case 32:
@@ -3122,7 +3071,11 @@ bool cPluginSoftHdDevice::ProcessArgs(int argc, char *argv[])
bool cPluginSoftHdDevice::Initialize(void)
{
// dsyslog("[softhddev]%s:\n", __FUNCTION__);
#if defined PLACEBO
const char *d;
d = cPlugin::ConfigDirectory("shaders");
strcpy(MyConfigDir,d);
#endif
MyDevice = new cSoftHdDevice();
return true;
@@ -3293,7 +3246,6 @@ bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value)
ConfigSuspendX11 = atoi(value);
return true;
}
if (!strcasecmp(name, "Video4to3DisplayFormat")) {
Config4to3DisplayFormat = atoi(value);
VideoSet4to3DisplayFormat(Config4to3DisplayFormat);
@@ -3436,20 +3388,6 @@ bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value)
}
}
if (!strcasecmp(name, "AutoCrop.Interval")) {
VideoSetAutoCrop(ConfigAutoCropInterval = atoi(value), ConfigAutoCropDelay, ConfigAutoCropTolerance);
ConfigAutoCropEnabled = ConfigAutoCropInterval != 0;
return true;
}
if (!strcasecmp(name, "AutoCrop.Delay")) {
VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay = atoi(value), ConfigAutoCropTolerance);
return true;
}
if (!strcasecmp(name, "AutoCrop.Tolerance")) {
VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay, ConfigAutoCropTolerance = atoi(value));
return true;
}
if (!strcasecmp(name, "AudioDelay")) {
VideoSetAudioDelay(ConfigVideoAudioDelay = atoi(value));
return true;
@@ -3705,7 +3643,6 @@ static const char *SVDRPHelpText[] = {
" 12: toggle audio pass-through\n" " 13: decrease audio delay by 10ms\n"
" 14: increase audio delay by 10ms\n" " 15: toggle ac3 mixdown\n"
" 20: disable fullscreen\n\040 21: enable fullscreen\n" " 22: toggle fullscreen\n"
" 23: disable auto-crop\n\040 24: enable auto-crop\n" " 25: toggle auto-crop\n"
" 30: stretch 4:3 to display\n\040 31: pillar box 4:3 in display\n"
" 32: center cut-out 4:3 to display\n" " 39: rotate 4:3 to display zoom mode\n"
" 40: stretch other aspect ratios to display\n" " 41: letter box other aspect ratios in display\n"

View File

@@ -645,7 +645,7 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st
q = pesdx->Buffer + pesdx->Skip;
n = pesdx->Index - pesdx->Skip;
while (n >= 5) {
int r=0;
int r = 0;
unsigned codec_id = AV_CODEC_ID_NONE;
// 4 bytes 0xFFExxxxx Mpeg audio
@@ -1326,6 +1326,7 @@ static VideoStream MyVideoStream[1]; ///< normal video stream
#ifdef USE_PIP
static VideoStream PipVideoStream[1]; ///< pip video stream
static int PiPActive = 0, mwx, mwy, mww, mwh; ///< main window frame for PiP
#endif
#ifdef DEBUG
@@ -1489,7 +1490,7 @@ static void VideoNextPacket(VideoStream * stream, int codec_id)
VideoResetPacket(stream);
}
#ifdef USE_PIP
#if defined(USE_PIP) || defined(VAAPI)
/**
** Place mpeg video data in packet ringbuffer.
@@ -1831,7 +1832,7 @@ int VideoPollInput(VideoStream * stream)
** @retval 1 stream paused
** @retval -1 empty stream
*/
int VideoDecodeInput(VideoStream * stream)
int VideoDecodeInput(VideoStream * stream, int trick)
{
int filled;
AVPacket *avpkt;
@@ -1849,6 +1850,9 @@ int VideoDecodeInput(VideoStream * stream)
stream->Close = 0;
return 1;
}
if (stream->ClearBuffers && trick)
stream->ClearBuffers = 0;
if (stream->ClearBuffers) { // clear buffer request
atomic_set(&stream->PacketsFilled, 0);
stream->PacketRead = stream->PacketWrite;
@@ -1939,7 +1943,7 @@ int VideoDecodeInput(VideoStream * stream)
avpkt->size = avpkt->stream_index;
avpkt->stream_index = 0;
#ifdef USE_PIP
#if defined(USE_PIP) || defined(VAAPI)
// fprintf(stderr, "[");
// DumpMpeg(avpkt->data, avpkt->size);
#ifdef STILL_DEBUG
@@ -2301,7 +2305,7 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
}
// SKIP PES header, begin of start code
#ifdef USE_PIP
#if defined(USE_PIP) || defined(VAAPI)
VideoMpegEnqueue(stream, pts, dts, check - 2, l + 2);
#else
VideoEnqueue(stream, pts, dts, check - 2, l + 2);
@@ -2314,7 +2318,7 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
return size;
}
#ifdef USE_PIP
#if defined(USE_PIP) || defined(VAAPI)
if (stream->CodecID == AV_CODEC_ID_MPEG2VIDEO) {
// SKIP PES header
VideoMpegEnqueue(stream, pts, dts, data + 9 + n, size - 9 - n);
@@ -2675,12 +2679,14 @@ void StillPicture(const uint8_t * data, int size)
#ifdef STILL_DEBUG
fprintf(stderr, "still-picture\n");
#endif
for (i = 0; i < (MyVideoStream->CodecID == AV_CODEC_ID_HEVC ? 12 : 12); ++i) {
const uint8_t *split;
int n;
// FIXME: vdr pes recordings sends mixed audio/video
if ((data[3] & 0xF0) == 0xE0) { // PES packet
split = data;
n = size;
// split the I-frame into single pes packets
@@ -2714,7 +2720,7 @@ void StillPicture(const uint8_t * data, int size)
VideoNextPacket(MyVideoStream, MyVideoStream->CodecID); // terminate last packet
} else { // ES packet
if (MyVideoStream->CodecID != AV_CODEC_ID_MPEG2VIDEO) {
if (0 && MyVideoStream->CodecID != AV_CODEC_ID_MPEG2VIDEO) {
VideoNextPacket(MyVideoStream, AV_CODEC_ID_NONE); // close last stream
MyVideoStream->CodecID = AV_CODEC_ID_MPEG2VIDEO;
}
@@ -2871,11 +2877,11 @@ const char *CommandLineHelp(void)
" -p device\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n"
" -c channel\taudio mixer channel name (fe. PCM)\n" " -d display\tdisplay of x11 server (fe. :0.0)\n"
" -f\t\tstart with fullscreen window (only with window manager)\n"
" -g geometry\tx11 window geometry wxh+x+y\n"
" -r Refresh\tRefreshrate for DRM (default is 50 Hz)\n"
" -C Connector\tConnector for DRM (default is current Connector)\n"
" -v device\tvideo driver device (cuvid)\n"
" -s\t\tstart in suspended mode\n" " -x\t\tstart x11 server, with -xx try to connect, if this fails\n"
" -g geometry\tx11 window geometry wxh+x+y\n" " -r Refresh\tRefreshrate for DRM (default is 50 Hz)\n"
" -C Connector\tConnector for DRM (default is current Connector)\n"
" -S shader\tShader to use.\n\t\tOnly with placebo. Can be repeated for more shaders\n"
" -v device\tvideo driver device (cuvid)\n" " -s\t\tstart in suspended mode\n"
" -x\t\tstart x11 server, with -xx try to connect, if this fails\n"
" -X args\tX11 server arguments (f.e. -nocursor)\n" " -w workaround\tenable/disable workarounds\n"
"\tno-hw-decoder\t\tdisable hw decoder, use software decoder only\n"
"\tno-mpeg-hw-decoder\tdisable hw decoder for mpeg only\n"
@@ -2907,7 +2913,7 @@ int ProcessArgs(int argc, char *const argv[])
#endif
for (;;) {
switch (getopt(argc, argv, "-a:c:C:r:d:fg:p:sv:w:xDX:")) {
switch (getopt(argc, argv, "-a:c:C:r:d:fg:p:S:sv:w:xDX:")) {
case 'a': // audio device for pcm
AudioSetDevice(optarg);
continue;
@@ -2916,10 +2922,16 @@ int ProcessArgs(int argc, char *const argv[])
continue;
case 'C': // Connector for DRM
VideoSetConnector(optarg);
continue;
case 'r': // Connector for DRM
continue;
case 'r': // Connector for DRM
VideoSetRefresh(optarg);
continue;
continue;
case 'S': // Shader
if (VideoSetShader(optarg) < 0) {
fprintf(stderr,_("Too much shaders definded\n"));
return 0;
}
continue;
case 'p': // pass-through audio device
AudioSetPassthroughDevice(optarg);
continue;
@@ -3369,19 +3381,21 @@ void Resume(void)
** @param[out] dropped dropped frames
** @param[out] count number of decoded frames
*/
void GetStats(int *missed, int *duped, int *dropped, int *counter, float *frametime, int *width, int *height, int *color, int *eotf)
void GetStats(int *missed, int *duped, int *dropped, int *counter, float *frametime, int *width, int *height,
int *color, int *eotf)
{
*missed = 0;
*duped = 0;
*dropped = 0;
*counter = 0;
*frametime = 0.0f;
*width = 0;
*height = 0;
*color = NULL;
*eotf = NULL;
*width = 0;
*height = 0;
*color = NULL;
*eotf = NULL;
if (MyVideoStream->HwDecoder) {
VideoGetStats(MyVideoStream->HwDecoder, missed, duped, dropped, counter, frametime, width, height, color, eotf);
VideoGetStats(MyVideoStream->HwDecoder, missed, duped, dropped, counter, frametime, width, height, color,
eotf);
}
}
@@ -3395,6 +3409,12 @@ void GetStats(int *missed, int *duped, int *dropped, int *counter, float *framet
*/
void ScaleVideo(int x, int y, int width, int height)
{
#ifdef USE_PIP
if (PiPActive && !(x & y & width & height)) {
Info("[softhddev]%s: fullscreen with PiP active.\n", __FUNCTION__);
x = mwx; y = mwy; width = mww; height = mwh;
}
#endif
if (MyVideoStream->HwDecoder) {
VideoSetOutputPosition(MyVideoStream->HwDecoder, x, y, width, height);
}
@@ -3453,6 +3473,8 @@ void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, int pip
VideoStreamOpen(PipVideoStream);
}
PipSetPosition(x, y, width, height, pip_x, pip_y, pip_width, pip_height);
mwx = x; mwy = y; mww = width; mwh = height;
PiPActive = 1;
}
/**
@@ -3466,6 +3488,8 @@ void PipStop(void)
return;
}
PiPActive = 0;
mwx = 0; mwy = 0; mww = 0; mwh = 0;
ScaleVideo(0, 0, 0, 0);
PipVideoStream->Close = 1;

1349
video.c

File diff suppressed because it is too large Load Diff

10
video.h
View File

@@ -44,7 +44,7 @@ extern signed char VideoHardwareDecoder; ///< flag use hardware decoder
extern char VideoIgnoreRepeatPict; ///< disable repeat pict warning
extern int VideoAudioDelay; ///< audio/video delay
extern char ConfigStartX11Server; ///< flag start the x11 server
extern char MyConfigDir[];
//----------------------------------------------------------------------------
// Prototypes
//----------------------------------------------------------------------------
@@ -179,9 +179,6 @@ extern void VideoSetBackground(uint32_t);
/// Set audio delay.
extern void VideoSetAudioDelay(int);
/// Set auto-crop parameters.
extern void VideoSetAutoCrop(int, int, int);
/// Clear OSD.
extern void VideoOsdClear(void);
@@ -234,7 +231,7 @@ extern void VideoExit(void); ///< Cleanup and exit video module.
extern int VideoPollInput(VideoStream *);
/// Decode video input buffers.
extern int VideoDecodeInput(VideoStream *);
extern int VideoDecodeInput(VideoStream *, int);
/// Get number of input buffers.
extern int VideoGetBuffers(const VideoStream *);
@@ -245,6 +242,9 @@ extern void SetDPMSatBlackScreen(int);
/// Raise the frontend window
extern int VideoRaiseWindow(void);
/// Set Shaders
extern int VideoSetShader(char *);
#ifdef USE_OPENGLOSD
extern void ActivateOsd(GLuint, int, int, int, int);
#endif