34 Commits
v2.1.0 ... v3.0

Author SHA1 Message Date
jojo61
f7449c8d3a Prepare Makefile for batch processing 2020-02-01 11:47:11 +01:00
jojo61
5ef6597340 Fix cuvid compile 2020-01-31 14:15:56 +01:00
jojo61
c06b891c2b Final fix for memory leak 2020-01-31 12:28:18 +01:00
jojo61
26945ef9d2 One more try to fix memleak 2020-01-27 18:00:45 +01:00
jojo61
e0239a549e One more fix for memleak 2020-01-21 16:59:03 +01:00
jojo61
431e37e93f try to fix memleak 2020-01-19 16:58:28 +01:00
jojo61
4b5b27382a Fixed skindesigner shady in drm Version 2020-01-18 16:20:08 +01:00
jojo61
2ec7a250a3 Fix drm Filepointer 2020-01-18 12:58:02 +01:00
jojo61
1674600882 Fixes for drm Aspectratio and UHD HDR 2020-01-15 18:01:27 +01:00
jojo61
269c396a2c more drm DETA/ATTA fixes 2020-01-13 18:17:07 +01:00
jojo61
ce3813a9e9 Minor cleanups 2020-01-08 17:04:18 +01:00
jojo61
53314a17f5 Remove deprecated call 2020-01-08 17:00:53 +01:00
jojo61
e4e6a81f54 Add patch for skinnopacity 2020-01-08 16:44:37 +01:00
jojo61
70b67f4466 Fix DETA/ATTA with DRM (hopefully) 2020-01-07 22:22:07 +01:00
jojo61
2f0b1d0df9 Reverse DETA fix 2020-01-03 13:58:24 +01:00
jojo61
dfe70f4f96 More OpenGL and DRM fixes 2020-01-03 12:22:47 +01:00
jojo61
fe3681f6eb Fix UHD crash with DRM 2019-12-28 11:16:01 +01:00
jojo61
50299f178f Provide environment Setting for OpenGL
- no .drirc needad anymore
2019-12-28 10:57:47 +01:00
jojo61
6ea4f5076b Reverse commit need more testing 2019-12-28 07:55:51 +01:00
jojo61
3cb66dd3de make .drirc obsolete 2019-12-27 14:26:12 +01:00
jojo61
68fa4fe4dc Some refactoring of shaders 2019-12-23 14:53:30 +01:00
jojo61
463109fcb6 Prepare for v4l2m2m for Raspi 4 2019-12-23 11:14:38 +01:00
jojo61
d1bc51edb8 Fixed missing lib 2019-12-20 12:12:53 +01:00
jojo61
67832ac333 Enabled -C Option for DRM 2019-12-17 12:15:35 +01:00
jojo61
1274e673ec More cuvid fixes 2019-12-17 11:12:00 +01:00
jojo61
4334894515 Fix cuvid without placebo 2019-12-17 10:22:22 +01:00
jojo61
d2dedb40dd New Parameter -r for Refreshrate with DRM
Fixed aspectratio with DRM
2019-12-12 11:31:40 +01:00
jojo61
3bed988b14 Inital Support for DRM with HDR10 and HDR-HLG 2019-12-10 10:45:22 +01:00
jojo61
f17e58c7c5 Provide Patches for HDR with Intel NUC and LSPCON
The patches are for the drm-intel Branch of Linux
see https://github.com/freedesktop/drm-intel
2019-12-08 14:43:43 +01:00
jojo61
5cd68b6eed Merge pull request #21 from dnehring7/master
Fix indentation.
2019-11-19 13:15:27 +00:00
Dirk Nehring
1a56d620ac Fix indentation. 2019-11-18 13:01:19 +01:00
jojo61
695a6495dd Small Fix for Fonts 2019-11-17 11:07:15 +01:00
jojo61
780c594ba2 More fixes for OSD 2019-11-13 17:09:49 +01:00
jojo61
8838d4c754 Fixed OSD Size for skindesigner with UHD
Fixed corruption with fast Menue Switch
2019-11-11 17:45:59 +01:00
14 changed files with 2840 additions and 1183 deletions

View File

@@ -1,6 +1,6 @@
#
# Makefile for a Video Disk Recorder plugin
#
#
# $Id: 2a41981a57e5e83036463c6a08c84b86ed9d2be3 $
# The official name of this plugin.
@@ -9,22 +9,30 @@
### Configuration (edit this for your needs)
# comment out if not needed
# config as needed
# what kind of driver do we make -
# if VAAPI is enabled the drivername is softhdvaapi
# if CUVID is enabled the drivername is softhdcuvid
#VAAPI=1
CUVID=1
# what kind of decoder do we make -
# if VAAPI is enabled the pluginname is softhdvaapi
# if CUVID is enabled the pluginname is softhdcuvid
# if DRM is enabled the pluginname is softhddrm
VAAPI ?= 0
CUVID ?= 1
# use libplacebo - available for both drivers
#LIBPLACEBO=1
# if you enable DRM then the plugin will only run without X server
# only valid for VAAPI
# does not work with libplacebo
DRM= ?= 0
# use libplacebo - available for both decoders but not for DRM
LIBPLACEBO ?= 0
# use YADIF deint - only available with cuvid
#YADIF=1
CONFIG := #-DDEBUG # remove # to enable debug output
@@ -60,7 +68,6 @@ SWRESAMPLE = 1
#AVRESAMPLE = 1
#endif
CONFIG := #-DDEBUG #-DOSD_DEBUG # enable debug output+functions
CONFIG += -DHAVE_GL # needed for mpv libs
#CONFIG += -DSTILL_DEBUG=2 # still picture debug verbose level
CONFIG += -DAV_INFO -DAV_INFO_TIME=3000 # info/debug a/v sync
@@ -92,7 +99,7 @@ TMPDIR ?= /tmp
### The compiler options:
export CFLAGS = $(call PKGCFG,cflags)
export CFLAGS = $(call PKGCFG,cflags)
export CXXFLAGS = $(call PKGCFG,cxxflags)
ifeq ($(CFLAGS),)
@@ -136,26 +143,35 @@ endif
ifeq ($(OPENGL),1)
CONFIG += -DUSE_GLX
_CFLAGS += $(shell pkg-config --cflags gl glu glew)
#LIBS += $(shell pkg-config --libs glu glew)
#LIBS += $(shell pkg-config --libs glu glew)
_CFLAGS += $(shell pkg-config --cflags freetype2)
LIBS += $(shell pkg-config --libs freetype2)
endif
ifeq ($(VAAPI),1)
CONFIG += -DVAAPI
CONFIG += -DVAAPI
#LIBPLACEBO=1
PLUGIN = softhdvaapi
LIBS += -lEGL
LIBS += -lEGL
endif
ifeq ($(LIBPLACEBO),1)
CONFIG += -DPLACEBO
endif
ifeq ($(DRM),1)
PLUGIN = softhddrm
CONFIG += -DUSE_DRM -DVAAPI
_CFLAGS += $(shell pkg-config --cflags libdrm)
LIBS += -lgbm -ldrm
LIBS += -lEGL
endif
ifeq ($(CUVID),1)
CONFIG += -DUSE_PIP # PIP support
CONFIG += -DCUVID # enable CUVID decoder
LIBS += -lEGL -lGL
LIBS += -lEGL -lGL
ifeq ($(YADIF),1)
CONFIG += -DYADIF # Yadif only with CUVID
endif
@@ -173,7 +189,7 @@ SOFILE = libvdr-$(PLUGIN).so
#
# Test that libswresample is available
# Test that libswresample is available
#
#ifneq (exists, $(shell pkg-config libswresample && echo exists))
# $(warning ******************************************************************)
@@ -182,7 +198,7 @@ SOFILE = libvdr-$(PLUGIN).so
#endif
#
# Test and set config for libavutil
# Test and set config for libavutil
#
ifneq (exists, $(shell pkg-config libavutil && echo exists))
$(warning ******************************************************************)
@@ -193,7 +209,7 @@ _CFLAGS += $(shell pkg-config --cflags libavutil)
LIBS += $(shell pkg-config --libs libavutil)
#
# Test and set config for libswscale
# Test and set config for libswscale
#
ifneq (exists, $(shell pkg-config libswscale && echo exists))
$(warning ******************************************************************)
@@ -233,10 +249,10 @@ endif
#_CFLAGS += $(shell pkg-config --cflags libavcodec x11 x11-xcb xcb xcb-icccm)
#LIBS += -lrt $(shell pkg-config --libs libavcodec x11 x11-xcb xcb xcb-icccm)
_CFLAGS += $(shell pkg-config --cflags x11 x11-xcb xcb xcb-icccm)
LIBS += -lrt $(shell pkg-config --libs x11 x11-xcb xcb xcb-icccm)
_CFLAGS += $(shell pkg-config --cflags x11 x11-xcb xcb xcb-icccm)
LIBS += -lrt $(shell pkg-config --libs x11 x11-xcb xcb xcb-icccm)
_CFLAGS += -I/usr/local/cuda/include
_CFLAGS += -I/usr/local/cuda/include
_CFLAGS += -I./opengl -I./
LIBS += -L/usr/lib64
@@ -247,10 +263,10 @@ LIBS += -lplacebo
endif
ifeq ($(CUVID),1)
LIBS += -lcuda -L/usr/local/cuda/targets/x86_64-linux/lib -lcudart -lnvcuvid
LIBS += -lcuda -L/usr/local/cuda/targets/x86_64-linux/lib -lcudart -lnvcuvid
endif
LIBS += -lGLEW -lGLU -ldl -lglut
LIBS += -lGLEW -lGLU -ldl -lglut
### Includes and Defines (add further entries here):
INCLUDES +=
@@ -261,19 +277,19 @@ DEFINES += -DPLUGIN_NAME_I18N='"$(PLUGIN)"' -D_GNU_SOURCE $(CONFIG) \
### Make it standard
override CXXFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \
-g -Wextra -Winit-self -Werror=overloaded-virtual -std=c++0x
override CFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \
-g -W -Wextra -Winit-self -Werror=overloaded-virtual -Wno-unused-parameter
override CFLAGS += $(_CFLAGS) $(DEFINES) $(INCLUDES) \
-g -W -Wextra -Winit-self -Wdeclaration-after-statement
### The object files (add further files here):
OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o
OBJS = softhdcuvid.o softhddev.o video.o audio.o codec.o ringbuffer.o
ifeq ($(OPENGLOSD),1)
OBJS += openglosd.o
OBJS += openglosd.o
endif
SRCS = $(wildcard $(OBJS:.o=.c)) softhdcuvid.cpp
SRCS = $(wildcard $(OBJS:.o=.c)) *.cpp
### The main target:
@@ -290,11 +306,11 @@ $(DEPFILE): Makefile
### Internationalization (I18N):
PODIR = po
I18Npo = $(wildcard $(PODIR)/*.po)
I18Nmo = $(addsuffix .mo, $(foreach file, $(I18Npo), $(basename $(file))))
PODIR = po
I18Npo = $(wildcard $(PODIR)/*.po)
I18Nmo = $(addsuffix .mo, $(foreach file, $(I18Npo), $(basename $(file))))
I18Nmsgs = $(addprefix $(DESTDIR)$(LOCDIR)/, $(addsuffix /LC_MESSAGES/vdr-$(PLUGIN).mo, $(notdir $(foreach file, $(I18Npo), $(basename $(file))))))
I18Npot = $(PODIR)/$(PLUGIN).pot
I18Npot = $(PODIR)/$(PLUGIN).pot
%.mo: %.po
msgfmt -c -o $@ $<
@@ -322,7 +338,7 @@ $(OBJS): Makefile
$(SOFILE): $(OBJS) shaders.h
$(CXX) $(CXXFLAGS) $(LDFLAGS) -shared $(OBJS) $(LIBS) -o $@
$(CXX) $(CXXFLAGS) $(LDFLAGS) -shared $(OBJS) $(LIBS) -o $@
install-lib: $(SOFILE)
install -D $^ $(DESTDIR)$(LIBDIR)/$^.$(APIVERSION)
@@ -343,11 +359,13 @@ clean:
## Private Targets:
HDRS= $(wildcard *.h)
HDRS= $(wildcard *.h)
indent:
for i in $(SRCS) $(HDRS); do \
indent $$i; \
unexpand -a $$i | sed -e s/constconst/const/ > $$i.up; \
mv $$i.up $$i; \
done
video_test: video.c Makefile

311
codec.c
View File

@@ -129,13 +129,13 @@ struct _video_decoder_
//----------------------------------------------------------------------------
/**
** Callback to negotiate the PixelFormat.
** Callback to negotiate the PixelFormat.
**
** @param video_ctx codec context
** @param fmt is the list of formats which are supported by
** the codec, it is terminated by -1 as 0 is a
** valid format, the formats are ordered by
** quality.
** @param video_ctx codec context
** @param fmt is the list of formats which are supported by
** the codec, it is terminated by -1 as 0 is a
** valid format, the formats are ordered by
** quality.
*/
static enum AVPixelFormat Codec_get_format(AVCodecContext * video_ctx, const enum AVPixelFormat *fmt)
{
@@ -143,7 +143,6 @@ static enum AVPixelFormat Codec_get_format(AVCodecContext * video_ctx, const enu
enum AVPixelFormat fmt1;
decoder = video_ctx->opaque;
// bug in ffmpeg 1.1.1, called with zero width or height
if (!video_ctx->width || !video_ctx->height) {
Error("codec/video: ffmpeg/libav buggy: width or height zero\n");
@@ -157,12 +156,12 @@ static enum AVPixelFormat Codec_get_format(AVCodecContext * video_ctx, const enu
// static void Codec_free_buffer(void *opaque, uint8_t *data);
/**
** Video buffer management, get buffer for frame.
** Video buffer management, get buffer for frame.
**
** Called at the beginning of each frame to get a buffer for it.
** Called at the beginning of each frame to get a buffer for it.
**
** @param video_ctx Codec context
** @param frame Get buffer for this frame
** @param video_ctx Codec context
** @param frame Get buffer for this frame
*/
static int Codec_get_buffer2(AVCodecContext * video_ctx, AVFrame * frame, int flags)
{
@@ -195,11 +194,11 @@ static int Codec_get_buffer2(AVCodecContext * video_ctx, AVFrame * frame, int fl
//----------------------------------------------------------------------------
/**
** Allocate a new video decoder context.
** Allocate a new video decoder context.
**
** @param hw_decoder video hardware decoder
** @param hw_decoder video hardware decoder
**
** @returns private decoder pointer for video decoder.
** @returns private decoder pointer for video decoder.
*/
VideoDecoder *CodecVideoNewDecoder(VideoHwDecoder * hw_decoder)
{
@@ -214,9 +213,9 @@ VideoDecoder *CodecVideoNewDecoder(VideoHwDecoder * hw_decoder)
}
/**
** Deallocate a video decoder context.
** Deallocate a video decoder context.
**
** @param decoder private video decoder
** @param decoder private video decoder
*/
void CodecVideoDelDecoder(VideoDecoder * decoder)
{
@@ -224,10 +223,10 @@ void CodecVideoDelDecoder(VideoDecoder * decoder)
}
/**
** Open video decoder.
** Open video decoder.
**
** @param decoder private video decoder
** @param codec_id video codec id
** @param decoder private video decoder
** @param codec_id video codec id
*/
void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
{
@@ -258,6 +257,20 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
}
}
#endif
#ifdef RASPI
switch (codec_id) {
case AV_CODEC_ID_MPEG2VIDEO:
name = "mpeg2_v4l2m2m";
break;
case AV_CODEC_ID_H264:
name = "h264_v4l2m2m";
// name = "h264_mmal";
break;
case AV_CODEC_ID_HEVC:
name = "hevc_v4l2m2m";
break;
}
#endif
if (name && (video_codec = avcodec_find_decoder_by_name(name))) {
Debug(3, "codec: decoder found\n");
} else if ((video_codec = avcodec_find_decoder(codec_id)) == NULL) {
@@ -272,10 +285,16 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
if (!(decoder->VideoCtx = avcodec_alloc_context3(video_codec))) {
Fatal(_("codec: can't allocate video codec context\n"));
}
#ifndef RASPI
if (!HwDeviceContext) {
Fatal("codec: no hw device context to be used");
}
decoder->VideoCtx->hw_device_ctx = av_buffer_ref(HwDeviceContext);
#else
decoder->VideoCtx->pix_fmt = AV_PIX_FMT_DRM_PRIME; /* request a DRM frame
// decoder->VideoCtx->pix_fmt = AV_PIX_FMT_MMAL; /* request a DRM frame */
#endif
// FIXME: for software decoder use all cpus, otherwise 1
decoder->VideoCtx->thread_count = 1;
@@ -290,12 +309,12 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
#ifdef YADIF
deint = 2;
#endif
#ifdef VAAPI
decoder->VideoCtx->extra_hw_frames = 8; // VIDEO_SURFACES_MAX +1
#if defined VAAPI && !defined RASPI
// decoder->VideoCtx->extra_hw_frames = 8; // VIDEO_SURFACES_MAX +1
if (video_codec->capabilities & (AV_CODEC_CAP_AUTO_THREADS)) {
Debug(3, "codec: auto threads enabled");
decoder->VideoCtx->thread_count = 0;
}
// decoder->VideoCtx->thread_count = 0;
}
if (video_codec->capabilities & AV_CODEC_CAP_TRUNCATED) {
Debug(3, "codec: supports truncated packets");
@@ -307,17 +326,32 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
}
if (video_codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) {
Debug(3, "codec: supports frame threads");
decoder->VideoCtx->thread_count = 0;
// decoder->VideoCtx->thread_count = 0;
// decoder->VideoCtx->thread_type |= FF_THREAD_FRAME;
}
if (video_codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) {
Debug(3, "codec: supports slice threads");
decoder->VideoCtx->thread_count = 0;
// decoder->VideoCtx->thread_count = 0;
// decoder->VideoCtx->thread_type |= FF_THREAD_SLICE;
}
if (av_opt_set_int(decoder->VideoCtx, "refcounted_frames", 1, 0) < 0)
Fatal(_("VAAPI Refcounts invalid\n"));
// if (av_opt_set_int(decoder->VideoCtx, "refcounted_frames", 1, 0) < 0)
// Fatal(_("VAAPI Refcounts invalid\n"));
decoder->VideoCtx->thread_safe_callbacks = 0;
#endif
#ifdef RASPI
decoder->VideoCtx->codec_id = codec_id;
decoder->VideoCtx->flags |= AV_CODEC_FLAG_BITEXACT;
if (video_codec->capabilities & AV_CODEC_CAP_FRAME_THREADS || AV_CODEC_CAP_SLICE_THREADS) {
Debug(3, "codec: supports frame threads");
decoder->VideoCtx->thread_count = 4;
// decoder->VideoCtx->thread_type |= FF_THREAD_FRAME;
}
if (video_codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) {
Debug(3, "codec: supports slice threads");
decoder->VideoCtx->thread_type |= FF_THREAD_SLICE;
}
#endif
#ifdef CUVID
@@ -367,7 +401,7 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
//decoder->VideoCtx->debug = FF_DEBUG_STARTCODE;
//decoder->VideoCtx->err_recognition |= AV_EF_EXPLODE;
// av_log_set_level(AV_LOG_DEBUG);
//av_log_set_level(AV_LOG_DEBUG);
av_log_set_level(0);
decoder->VideoCtx->get_format = Codec_get_format;
@@ -387,15 +421,15 @@ void CodecVideoOpen(VideoDecoder * decoder, int codec_id)
// reset buggy ffmpeg/libav flag
decoder->GetFormatDone = 0;
#ifdef YADIF
#if defined (YADIF) || defined (RASPI)
decoder->filter = 0;
#endif
}
/**
** Close video decoder.
** Close video decoder.
**
** @param video_decoder private video decoder
** @param video_decoder private video decoder
*/
void CodecVideoClose(VideoDecoder * video_decoder)
{
@@ -423,15 +457,15 @@ void CodecVideoClose(VideoDecoder * video_decoder)
#if 0
/**
** Display pts...
** Display pts...
**
** ffmpeg-0.9 pts always AV_NOPTS_VALUE
** ffmpeg-0.9 pkt_pts nice monotonic (only with HD)
** ffmpeg-0.9 pkt_dts wild jumping -160 - 340 ms
** ffmpeg-0.9 pts always AV_NOPTS_VALUE
** ffmpeg-0.9 pkt_pts nice monotonic (only with HD)
** ffmpeg-0.9 pkt_dts wild jumping -160 - 340 ms
**
** libav 0.8_pre20111116 pts always AV_NOPTS_VALUE
** libav 0.8_pre20111116 pkt_pts always 0 (could be fixed?)
** libav 0.8_pre20111116 pkt_dts wild jumping -160 - 340 ms
** libav 0.8_pre20111116 pts always AV_NOPTS_VALUE
** libav 0.8_pre20111116 pkt_pts always 0 (could be fixed?)
** libav 0.8_pre20111116 pkt_dts wild jumping -160 - 340 ms
*/
void DisplayPts(AVCodecContext * video_ctx, AVFrame * frame)
{
@@ -445,7 +479,7 @@ void DisplayPts(AVCodecContext * video_ctx, AVFrame * frame)
}
ms_delay = (1000 * video_ctx->time_base.num) / video_ctx->time_base.den;
ms_delay += frame->repeat_pict * ms_delay / 2;
printf("codec: PTS %s%s %" PRId64 " %d %d/%d %d/%d %dms\n", frame->repeat_pict ? "r" : " ",
printf("codec: PTS %s%s %" PRId64 " %d %d/%d %d/%d %dms\n", frame->repeat_pict ? "r" : " ",
frame->interlaced_frame ? "I" : " ", pts, (int)(pts - last_pts) / 90, video_ctx->time_base.num,
video_ctx->time_base.den, video_ctx->framerate.num, video_ctx->framerate.den, ms_delay);
@@ -457,10 +491,10 @@ void DisplayPts(AVCodecContext * video_ctx, AVFrame * frame)
#endif
/**
** Decode a video packet.
** Decode a video packet.
**
** @param decoder video decoder data
** @param avpkt video packet
** @param decoder video decoder data
** @param avpkt video packet
*/
extern int CuvidTestSurfaces();
@@ -468,52 +502,62 @@ extern int CuvidTestSurfaces();
extern int init_filters(AVCodecContext * dec_ctx, void *decoder, AVFrame * frame);
extern int push_filters(AVCodecContext * dec_ctx, void *decoder, AVFrame * frame);
#endif
#ifdef VAAPI
void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
{
AVCodecContext *video_ctx = decoder->VideoCtx;
if (video_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
if (video_ctx->codec_type == AVMEDIA_TYPE_VIDEO && CuvidTestSurfaces()) {
int ret;
AVPacket pkt[1];
AVFrame *frame;
*pkt = *avpkt; // use copy
ret = avcodec_send_packet(video_ctx, pkt);
if (ret < 0) {
Debug(4, "codec: sending video packet failed");
return;
}
frame = av_frame_alloc();
ret = avcodec_receive_frame(video_ctx, frame);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
Debug(4, "codec: receiving video frame failed");
av_frame_free(&frame);
return;
}
if (ret >= 0) {
if (decoder->filter) {
if (decoder->filter == 1) {
if (init_filters(video_ctx, decoder->HwDecoder, frame) < 0) {
Debug(3, "video: Init of VAAPI deint Filter failed\n");
decoder->filter = 0;
} else {
Debug(3, "Init VAAPI deint ok\n");
decoder->filter = 2;
}
if (!CuvidTestSurfaces())
usleep(1000);
ret = 0;
while (ret >= 0 && CuvidTestSurfaces()) {
frame = av_frame_alloc();
ret = avcodec_receive_frame(video_ctx, frame);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
Debug(4, "codec: receiving video frame failed");
av_frame_free(&frame);
return;
}
if (ret >= 0) {
if (decoder->filter) {
if (decoder->filter == 1) {
if (init_filters(video_ctx, decoder->HwDecoder, frame) < 0) {
Debug(3, "video: Init of VAAPI deint Filter failed\n");
decoder->filter = 0;
} else {
Debug(3, "Init VAAPI deint ok\n");
decoder->filter = 2;
}
}
if (frame->interlaced_frame && decoder->filter == 2 && (frame->height != 720)) { // broken ZDF sends Interlaced flag
push_filters(video_ctx, decoder->HwDecoder, frame);
continue;
}
}
if (frame->interlaced_frame && decoder->filter == 2 && (frame->height != 720)) { // broken ZDF sends Interlaced flag
ret = push_filters(video_ctx, decoder->HwDecoder, frame);
return;
}
VideoRenderFrame(decoder->HwDecoder, video_ctx, frame);
} else {
av_frame_free(&frame);
return;
}
VideoRenderFrame(decoder->HwDecoder, video_ctx, frame);
} else {
av_frame_free(&frame);
}
}
}
}
#endif
#ifdef CUVID
void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
@@ -544,7 +588,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
if (!CuvidTestSurfaces())
usleep(1000);
// printf("send packet to decode %s\n",consumed?"ok":"Full");
// printf("send packet to decode %s %04x\n",consumed?"ok":"Full",ret1);
if ((ret1 == AVERROR(EAGAIN) || ret1 == AVERROR_EOF || ret1 >= 0) && CuvidTestSurfaces()) {
ret = 0;
@@ -556,7 +600,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
} else {
got_frame = 0;
}
// printf("got %s packet from decoder\n",got_frame?"1":"no");
// printf("got %s packet from decoder\n",got_frame?"1":"no");
if (got_frame) { // frame completed
#ifdef YADIF
if (decoder->filter) {
@@ -599,9 +643,9 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
#endif
/**
** Flush the video decoder.
** Flush the video decoder.
**
** @param decoder video decoder data
** @param decoder video decoder data
*/
void CodecVideoFlushBuffers(VideoDecoder * decoder)
{
@@ -686,8 +730,8 @@ enum IEC61937
};
#ifdef USE_AUDIO_DRIFT_CORRECTION
#define CORRECT_PCM 1 ///< do PCM audio-drift correction
#define CORRECT_AC3 2 ///< do AC-3 audio-drift correction
#define CORRECT_PCM 1 ///< do PCM audio-drift correction
#define CORRECT_AC3 2 ///< do AC-3 audio-drift correction
static char CodecAudioDrift; ///< flag: enable audio-drift correction
#else
static const int CodecAudioDrift = 0;
@@ -703,9 +747,9 @@ static const int CodecPassthrough = 0;
static char CodecDownmix; ///< enable AC-3 decoder downmix
/**
** Allocate a new audio decoder context.
** Allocate a new audio decoder context.
**
** @returns private decoder pointer for audio decoder.
** @returns private decoder pointer for audio decoder.
*/
AudioDecoder *CodecAudioNewDecoder(void)
{
@@ -722,9 +766,9 @@ AudioDecoder *CodecAudioNewDecoder(void)
}
/**
** Deallocate an audio decoder context.
** Deallocate an audio decoder context.
**
** @param decoder private audio decoder
** @param decoder private audio decoder
*/
void CodecAudioDelDecoder(AudioDecoder * decoder)
{
@@ -733,10 +777,10 @@ void CodecAudioDelDecoder(AudioDecoder * decoder)
}
/**
** Open audio decoder.
** Open audio decoder.
**
** @param audio_decoder private audio decoder
** @param codec_id audio codec id
** @param audio_decoder private audio decoder
** @param codec_id audio codec id
*/
void CodecAudioOpen(AudioDecoder * audio_decoder, int codec_id)
{
@@ -784,9 +828,9 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, int codec_id)
}
/**
** Close audio decoder.
** Close audio decoder.
**
** @param audio_decoder private audio decoder
** @param audio_decoder private audio decoder
*/
void CodecAudioClose(AudioDecoder * audio_decoder)
{
@@ -831,9 +875,9 @@ void CodecAudioClose(AudioDecoder * audio_decoder)
}
/**
** Set audio drift correction.
** Set audio drift correction.
**
** @param mask enable mask (PCM, AC-3)
** @param mask enable mask (PCM, AC-3)
*/
void CodecSetAudioDrift(int mask)
{
@@ -844,9 +888,9 @@ void CodecSetAudioDrift(int mask)
}
/**
** Set audio pass-through.
** Set audio pass-through.
**
** @param mask enable mask (PCM, AC-3, E-AC-3)
** @param mask enable mask (PCM, AC-3, E-AC-3)
*/
void CodecSetAudioPassthrough(int mask)
{
@@ -857,9 +901,9 @@ void CodecSetAudioPassthrough(int mask)
}
/**
** Set audio downmix.
** Set audio downmix.
**
** @param onoff enable/disable downmix.
** @param onoff enable/disable downmix.
*/
void CodecSetAudioDownmix(int onoff)
{
@@ -871,15 +915,15 @@ void CodecSetAudioDownmix(int onoff)
}
/**
** Reorder audio frame.
** Reorder audio frame.
**
** ffmpeg L R C Ls Rs -> alsa L R Ls Rs C
** ffmpeg L R C LFE Ls Rs -> alsa L R Ls Rs C LFE
** ffmpeg L R C LFE Ls Rs Rl Rr -> alsa L R Ls Rs C LFE Rl Rr
** ffmpeg L R C Ls Rs -> alsa L R Ls Rs C
** ffmpeg L R C LFE Ls Rs -> alsa L R Ls Rs C LFE
** ffmpeg L R C LFE Ls Rs Rl Rr -> alsa L R Ls Rs C LFE Rl Rr
**
** @param buf[IN,OUT] sample buffer
** @param size size of sample buffer in bytes
** @param channels number of channels interleaved in sample buffer
** @param buf[IN,OUT] sample buffer
** @param size size of sample buffer in bytes
** @param channels number of channels interleaved in sample buffer
*/
static void CodecReorderAudioFrame(int16_t * buf, int size, int channels)
{
@@ -931,10 +975,10 @@ static void CodecReorderAudioFrame(int16_t * buf, int size, int channels)
}
/**
** Handle audio format changes helper.
** Handle audio format changes helper.
**
** @param audio_decoder audio decoder data
** @param[out] passthrough pass-through output
** @param audio_decoder audio decoder data
** @param[out] passthrough pass-through output
*/
static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, int *passthrough)
{
@@ -991,10 +1035,10 @@ static int CodecAudioUpdateHelper(AudioDecoder * audio_decoder, int *passthrough
}
/**
** Audio pass-through decoder helper.
** Audio pass-through decoder helper.
**
** @param audio_decoder audio decoder data
** @param avpkt undecoded audio packet
** @param audio_decoder audio decoder data
** @param avpkt undecoded audio packet
*/
static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPacket * avpkt)
{
@@ -1106,10 +1150,10 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac
#if !defined(USE_SWRESAMPLE) && !defined(USE_AVRESAMPLE)
/**
** Set/update audio pts clock.
** Set/update audio pts clock.
**
** @param audio_decoder audio decoder data
** @param pts presentation timestamp
** @param audio_decoder audio decoder data
** @param pts presentation timestamp
*/
static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts)
{
@@ -1206,11 +1250,11 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts)
}
/**
** Handle audio format changes.
** Handle audio format changes.
**
** @param audio_decoder audio decoder data
** @param audio_decoder audio decoder data
**
** @note this is the old not good supported version
** @note this is the old not good supported version
*/
static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder)
{
@@ -1277,11 +1321,11 @@ static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder)
}
/**
** Codec enqueue audio samples.
** Codec enqueue audio samples.
**
** @param audio_decoder audio decoder data
** @param data samples data
** @param count number of bytes in sample data
** @param audio_decoder audio decoder data
** @param data samples data
** @param count number of bytes in sample data
*/
void CodecAudioEnqueue(AudioDecoder * audio_decoder, int16_t * data, int count)
{
@@ -1410,12 +1454,12 @@ int myavcodec_decode_audio3(AVCodecContext * avctx, int16_t * samples, int *fram
}
/**
** Decode an audio packet.
** Decode an audio packet.
**
** PTS must be handled self.
** PTS must be handled self.
**
** @param audio_decoder audio decoder data
** @param avpkt audio packet
** @param audio_decoder audio decoder data
** @param avpkt audio packet
*/
void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
{
@@ -1486,10 +1530,10 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
#if defined(USE_SWRESAMPLE) || defined(USE_AVRESAMPLE)
/**
** Set/update audio pts clock.
** Set/update audio pts clock.
**
** @param audio_decoder audio decoder data
** @param pts presentation timestamp
** @param audio_decoder audio decoder data
** @param pts presentation timestamp
*/
static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts)
{
@@ -1604,9 +1648,9 @@ static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts)
}
/**
** Handle audio format changes.
** Handle audio format changes.
**
** @param audio_decoder audio decoder data
** @param audio_decoder audio decoder data
*/
static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder)
{
@@ -1665,14 +1709,14 @@ static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder)
}
/**
** Decode an audio packet.
** Decode an audio packet.
**
** PTS must be handled self.
** PTS must be handled self.
**
** @note the caller has not aligned avpkt and not cleared the end.
** @note the caller has not aligned avpkt and not cleared the end.
**
** @param audio_decoder audio decoder data
** @param avpkt audio packet
** @param audio_decoder audio decoder data
** @param avpkt audio packet
*/
void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
@@ -1737,9 +1781,9 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
#endif
/**
** Flush the audio decoder.
** Flush the audio decoder.
**
** @param decoder audio decoder data
** @param decoder audio decoder data
*/
void CodecAudioFlushBuffers(AudioDecoder * decoder)
{
@@ -1751,7 +1795,7 @@ void CodecAudioFlushBuffers(AudioDecoder * decoder)
//----------------------------------------------------------------------------
/**
** Empty log callback
** Empty log callback
*/
static void CodecNoopCallback( __attribute__((unused))
void *ptr, __attribute__((unused))
@@ -1761,7 +1805,7 @@ static void CodecNoopCallback( __attribute__((unused))
}
/**
** Codec init
** Codec init
*/
void CodecInit(void)
{
@@ -1772,11 +1816,10 @@ void CodecInit(void)
#else
(void)CodecNoopCallback;
#endif
avcodec_register_all(); // register all formats and codecs
}
/**
** Codec exit.
** Codec exit.
*/
void CodecExit(void)
{

609
drm.c Normal file
View File

@@ -0,0 +1,609 @@
#include <unistd.h>
#include <gbm.h>
#include <sys/mman.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
#include <drm_fourcc.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
#define DRM_DEBUG
//----------------------------------------------------------------------------
// DRM
//----------------------------------------------------------------------------
struct _Drm_Render_
{
int fd_drm;
drmModeModeInfo mode;
drmModeCrtc *saved_crtc;
// drmEventContext ev;
int bpp;
uint32_t connector_id, crtc_id, video_plane;
uint32_t hdr_metadata;
uint32_t mmWidth,mmHeight; // Size in mm
uint32_t hdr_blob_id;
};
typedef struct _Drm_Render_ VideoRender;
struct {
struct gbm_device *dev;
struct gbm_surface *surface;
} gbm;
VideoRender *render;
//----------------------------------------------------------------------------
// Helper functions
//----------------------------------------------------------------------------
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#endif
struct type_name {
unsigned int type;
const char *name;
};
static const char *util_lookup_type_name(unsigned int type,
const struct type_name *table,
unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++)
if (table[i].type == type)
return table[i].name;
return NULL;
}
static const struct type_name connector_type_names[] = {
{ DRM_MODE_CONNECTOR_Unknown, "unknown" },
{ DRM_MODE_CONNECTOR_VGA, "VGA" },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
{ DRM_MODE_CONNECTOR_Composite, "composite" },
{ DRM_MODE_CONNECTOR_SVIDEO, "s-video" },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
{ DRM_MODE_CONNECTOR_Component, "component" },
{ DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN" },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
};
const char *util_lookup_connector_type_name(unsigned int type)
{
return util_lookup_type_name(type, connector_type_names,
ARRAY_SIZE(connector_type_names));
}
static uint64_t GetPropertyValue(int fd_drm, uint32_t objectID,
uint32_t objectType, const char *propName)
{
uint32_t i;
int found = 0;
uint64_t value = 0;
drmModePropertyPtr Prop;
drmModeObjectPropertiesPtr objectProps =
drmModeObjectGetProperties(fd_drm, objectID, objectType);
for (i = 0; i < objectProps->count_props; i++) {
if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL)
fprintf(stderr, "GetPropertyValue: Unable to query property.\n");
if (strcmp(propName, Prop->name) == 0) {
value = objectProps->prop_values[i];
found = 1;
}
drmModeFreeProperty(Prop);
if (found)
break;
}
drmModeFreeObjectProperties(objectProps);
#ifdef DRM_DEBUG
if (!found)
fprintf(stderr, "GetPropertyValue: Unable to find value for property \'%s\'.\n",
propName);
#endif
return value;
}
static uint32_t GetPropertyID(int fd_drm, uint32_t objectID,
uint32_t objectType, const char *propName)
{
uint32_t i;
int found = 0;
uint32_t value = -1;
drmModePropertyPtr Prop;
drmModeObjectPropertiesPtr objectProps =
drmModeObjectGetProperties(fd_drm, objectID, objectType);
for (i = 0; i < objectProps->count_props; i++) {
if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL)
fprintf(stderr, "GetPropertyValue: Unable to query property.\n");
if (strcmp(propName, Prop->name) == 0) {
value = objectProps->props[i];
found = 1;
}
drmModeFreeProperty(Prop);
if (found)
break;
}
drmModeFreeObjectProperties(objectProps);
#ifdef DRM_DEBUG
if (!found)
Debug(3,"GetPropertyValue: Unable to find ID for property \'%s\'.\n",propName);
#endif
return value;
}
static int SetPropertyRequest(drmModeAtomicReqPtr ModeReq, int fd_drm,
uint32_t objectID, uint32_t objectType,
const char *propName, uint64_t value)
{
uint32_t i;
uint64_t id = 0;
drmModePropertyPtr Prop;
drmModeObjectPropertiesPtr objectProps =
drmModeObjectGetProperties(fd_drm, objectID, objectType);
for (i = 0; i < objectProps->count_props; i++) {
if ((Prop = drmModeGetProperty(fd_drm, objectProps->props[i])) == NULL)
printf( "SetPropertyRequest: Unable to query property.\n");
if (strcmp(propName, Prop->name) == 0) {
id = Prop->prop_id;
drmModeFreeProperty(Prop);
break;
}
drmModeFreeProperty(Prop);
}
drmModeFreeObjectProperties(objectProps);
if (id == 0)
printf( "SetPropertyRequest: Unable to find value for property \'%s\'.\n",
propName);
return drmModeAtomicAddProperty(ModeReq, objectID, id, value);
}
static void CuvidSetVideoMode(void);
void set_video_mode(int width, int height)
{
drmModeConnector *connector;
drmModeModeInfo *mode;
int ii;
if (height != 1080 && height != 2160)
return;
connector = drmModeGetConnector(render->fd_drm, render->connector_id);
for (ii = 0; ii < connector->count_modes; ii++) {
mode = &connector->modes[ii];
printf("Mode %d %dx%d Rate %d\n",ii,mode->hdisplay,mode->vdisplay,mode->vrefresh);
if (width == mode->hdisplay &&
height == mode->vdisplay &&
mode->vrefresh == DRMRefresh &&
render->mode.hdisplay != width &&
render->mode.vdisplay != height &&
!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
VideoWindowWidth = mode->hdisplay;
VideoWindowHeight = mode->vdisplay;
eglDestroySurface (eglDisplay, eglSurface);
EglCheck();
gbm_surface_destroy (gbm.surface);
InitBo(render->bpp);
CuvidSetVideoMode();
Debug(3,"Set new mode %d:%d\n",mode->hdisplay,mode->vdisplay);
break;
}
}
}
static int FindDevice(VideoRender * render)
{
drmVersion *version;
drmModeRes *resources;
drmModeConnector *connector;
drmModeEncoder *encoder = 0;
drmModeModeInfo *mode;
drmModePlane *plane;
drmModePlaneRes *plane_res;
drmModeObjectPropertiesPtr props;
uint32_t j, k;
uint64_t has_dumb;
uint64_t has_prime;
int i,ii=0;
char connectorstr[10];
int found = 0;
#ifdef RASPI
render->fd_drm = open("/dev/dri/card1", O_RDWR);
#else
render->fd_drm = open("/dev/dri/card0", O_RDWR);
#endif
if (render->fd_drm < 0) {
fprintf(stderr, "FindDevice: cannot open /dev/dri/card0: %m\n");
return -errno;
}
drmSetMaster(render->fd_drm);
version = drmGetVersion(render->fd_drm);
fprintf(stderr, "FindDevice: open /dev/dri/card0: %s\n", version->name);
// check capability
if (drmGetCap(render->fd_drm, DRM_CAP_DUMB_BUFFER, &has_dumb) < 0 || has_dumb == 0)
fprintf(stderr, "FindDevice: drmGetCap DRM_CAP_DUMB_BUFFER failed or doesn't have dumb buffer\n");
if (drmSetClientCap(render->fd_drm, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1) != 0)
fprintf(stderr, "FindDevice: DRM_CLIENT_CAP_UNIVERSAL_PLANES not available.\n");
if (drmSetClientCap(render->fd_drm, DRM_CLIENT_CAP_ATOMIC, 1) != 0)
fprintf(stderr, "FindDevice: DRM_CLIENT_CAP_ATOMIC not available.\n");
if (drmGetCap(render->fd_drm, DRM_CAP_PRIME, &has_prime) < 0)
fprintf(stderr, "FindDevice: DRM_CAP_PRIME not available.\n");
if (drmGetCap(render->fd_drm, DRM_PRIME_CAP_EXPORT, &has_prime) < 0)
fprintf(stderr, "FindDevice: DRM_PRIME_CAP_EXPORT not available.\n");
if (drmGetCap(render->fd_drm, DRM_PRIME_CAP_IMPORT, &has_prime) < 0)
fprintf(stderr, "FindDevice: DRM_PRIME_CAP_IMPORT not available.\n");
if ((resources = drmModeGetResources(render->fd_drm)) == NULL){
fprintf(stderr, "FindDevice: cannot retrieve DRM resources (%d): %m\n", errno);
return -errno;
}
#ifdef DEBUG
Debug(3,"[FindDevice] DRM have %i connectors, %i crtcs, %i encoders\n",
resources->count_connectors, resources->count_crtcs,
resources->count_encoders);
#endif
// find all available connectors
for (i = 0; i < resources->count_connectors; i++) {
connector = drmModeGetConnector(render->fd_drm, resources->connectors[i]);
if (!connector) {
fprintf(stderr, "FindDevice: cannot retrieve DRM connector (%d): %m\n", errno);
return -errno;
}
sprintf(connectorstr,"%s-%u",util_lookup_connector_type_name(connector->connector_type),connector->connector_type_id);
printf("Connector >%s< is %sconnected\n",connectorstr,connector->connection == DRM_MODE_CONNECTED?"":"not ");
if (DRMConnector && strcmp(DRMConnector,connectorstr))
continue;
if (connector->connection == DRM_MODE_CONNECTED && connector->count_modes > 0) {
float aspect = (float)connector->mmWidth / (float)connector->mmHeight;
if ((aspect > 1.70) && (aspect < 1.85)) {
render->mmHeight = 90;
render->mmWidth = 160;
} else {
render->mmHeight = connector->mmHeight;
render->mmWidth = connector->mmWidth;
}
render->connector_id = connector->connector_id;
// FIXME: use default encoder/crtc pair
if ((encoder = drmModeGetEncoder(render->fd_drm, connector->encoder_id)) == NULL){
fprintf(stderr, "FindDevice: cannot retrieve encoder (%d): %m\n", errno);
return -errno;
}
render->crtc_id = encoder->crtc_id;
render->hdr_metadata = GetPropertyID(render->fd_drm, connector->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "HDR_OUTPUT_METADATA");
printf("ID %d of METADATA in Connector %d connected %d\n",render->hdr_metadata,connector->connector_id,connector->connection);
memcpy(&render->mode, &connector->modes[0], sizeof(drmModeModeInfo)); // set fallback
// search Modes for Connector
for (ii = 0; ii < connector->count_modes; ii++) {
mode = &connector->modes[ii];
printf("Mode %d %dx%d Rate %d\n",ii,mode->hdisplay,mode->vdisplay,mode->vrefresh);
if (VideoWindowWidth && VideoWindowHeight) { // preset by command line
if (VideoWindowWidth == mode->hdisplay &&
VideoWindowHeight == mode->vdisplay &&
mode->vrefresh == DRMRefresh &&
!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
break;
}
}
else {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) {
memcpy(&render->mode, mode, sizeof(drmModeModeInfo));
VideoWindowWidth = mode->hdisplay;
VideoWindowHeight = mode->vdisplay;
break;
}
}
}
found = 1;
i = resources->count_connectors; // uuuuhh
}
VideoWindowWidth = render->mode.hdisplay;
VideoWindowHeight = render->mode.vdisplay;
if (found)
printf("Use Mode %d %dx%d Rate %d\n",ii,render->mode.hdisplay,render->mode.vdisplay,render->mode.vrefresh);
drmModeFreeConnector(connector);
}
if (!found) {
Debug(3,"Requested Connector not found or not connected\n");
printf("Requested Connector not found or not connected\n");
return -1;
}
// find first plane
if ((plane_res = drmModeGetPlaneResources(render->fd_drm)) == NULL)
fprintf(stderr, "FindDevice: cannot retrieve PlaneResources (%d): %m\n", errno);
for (j = 0; j < plane_res->count_planes; j++) {
plane = drmModeGetPlane(render->fd_drm, plane_res->planes[j]);
if (plane == NULL)
fprintf(stderr, "FindDevice: cannot query DRM-KMS plane %d\n", j);
for (i = 0; i < resources->count_crtcs; i++) {
if (plane->possible_crtcs & (1 << i))
break;
}
uint64_t type = GetPropertyValue(render->fd_drm, plane_res->planes[j],
DRM_MODE_OBJECT_PLANE, "type");
uint64_t zpos = 0;
#ifdef DRM_DEBUG // If more then 2 crtcs this must rewriten!!!
printf("[FindDevice] Plane id %i crtc_id %i possible_crtcs %i possible CRTC %i type %s\n",
plane->plane_id, plane->crtc_id, plane->possible_crtcs, resources->crtcs[i],
(type == DRM_PLANE_TYPE_PRIMARY) ? "primary plane" :
(type == DRM_PLANE_TYPE_OVERLAY) ? "overlay plane" :
(type == DRM_PLANE_TYPE_CURSOR) ? "cursor plane" : "No plane type");
#endif
// test pixel format and plane caps
for (k = 0; k < plane->count_formats; k++) {
if (encoder->possible_crtcs & plane->possible_crtcs) {
switch (plane->formats[k]) {
#ifdef RASPI
case DRM_FORMAT_ARGB8888:
#else
case DRM_FORMAT_XRGB2101010:
#endif
if (!render->video_plane) {
render->video_plane = plane->plane_id;
}
break;
default:
break;
}
}
}
drmModeFreePlane(plane);
}
drmModeFreePlaneResources(plane_res);
drmModeFreeEncoder(encoder);
drmModeFreeResources(resources);
#ifdef DRM_DEBUG
printf("[FindDevice] DRM setup CRTC: %i video_plane: %i \n",
render->crtc_id, render->video_plane);
#endif
// save actual modesetting
render->saved_crtc = drmModeGetCrtc(render->fd_drm, render->crtc_id);
return 0;
}
///
/// Initialize video output module.
///
void VideoInitDrm()
{
int i;
if (!(render = calloc(1, sizeof(*render)))) {
Fatal(_("video/DRM: out of memory\n"));
return;
}
if (FindDevice(render)){
Fatal(_( "VideoInit: FindDevice() failed\n"));
}
gbm.dev = gbm_create_device (render->fd_drm);
assert (gbm.dev != NULL);
PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display = NULL;
get_platform_display =
(void *) eglGetProcAddress("eglGetPlatformDisplay");
assert(get_platform_display != NULL);
eglDisplay = get_platform_display(EGL_PLATFORM_GBM_KHR, gbm.dev, NULL);
assert (eglDisplay != NULL);
// return;
drmModeAtomicReqPtr ModeReq;
const uint32_t flags = DRM_MODE_ATOMIC_ALLOW_MODESET;
uint32_t modeID = 0;
if (drmModeCreatePropertyBlob(render->fd_drm, &render->mode, sizeof(render->mode), &modeID) != 0) {
fprintf(stderr, "Failed to create mode property.\n");
return;
}
if (!(ModeReq = drmModeAtomicAlloc())) {
fprintf(stderr, "cannot allocate atomic request (%d): %m\n", errno);
return;
}
printf("set CRTC %d of Connector %d aktiv\n",render->crtc_id,render->connector_id);
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID);
SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id);
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 1);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
if (drmModeDestroyPropertyBlob(render->fd_drm, modeID) != 0)
fprintf(stderr, "cannot destroy property blob (%d): %m\n", errno);
drmModeAtomicFree(ModeReq);
}
void get_drm_aspect(int *num,int *den)
{
Debug(3,"mmHeight %d mmWidth %d VideoHeight %d VideoWidth %d\n",render->mmHeight,render->mmWidth,VideoWindowHeight,VideoWindowWidth);
*num = VideoWindowWidth * render->mmHeight;
*den = VideoWindowHeight * render->mmWidth;
}
struct gbm_bo *bo = NULL, *next_bo=NULL;
struct drm_fb *fb;
static int m_need_modeset = 0;
static int old_color=-1,old_trc=-1;
void InitBo(int bpp) {
// create the GBM and EGL surface
render->bpp = bpp;
gbm.surface = gbm_surface_create (gbm.dev, VideoWindowWidth,VideoWindowHeight,
bpp==10?GBM_FORMAT_XRGB2101010:GBM_FORMAT_ARGB8888,
GBM_BO_USE_SCANOUT|GBM_BO_USE_RENDERING);
assert(gbm.surface != NULL);
eglSurface = eglCreateWindowSurface (eglDisplay, eglConfig, gbm.surface, NULL);
assert(eglSurface != NULL);
}
static struct gbm_bo *previous_bo = NULL;
static uint32_t previous_fb;
static void drm_swap_buffers () {
uint32_t fb;
eglSwapBuffers (eglDisplay, eglSurface);
struct gbm_bo *bo = gbm_surface_lock_front_buffer (gbm.surface);
#if 1
if (bo == NULL)
bo = gbm_surface_lock_front_buffer (gbm.surface);
#endif
assert (bo != NULL);
uint32_t handle = gbm_bo_get_handle (bo).u32;
uint32_t pitch = gbm_bo_get_stride (bo);
drmModeAddFB (render->fd_drm, VideoWindowWidth,VideoWindowHeight,render->bpp==10? 30:24, 32, pitch, handle, &fb);
// drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode);
if (m_need_modeset) {
drmModeAtomicReqPtr ModeReq;
const uint32_t flags = DRM_MODE_ATOMIC_ALLOW_MODESET;
uint32_t modeID = 0;
if (drmModeCreatePropertyBlob(render->fd_drm, &render->mode, sizeof(render->mode), &modeID) != 0) {
fprintf(stderr, "Failed to create mode property.\n");
return;
}
if (!(ModeReq = drmModeAtomicAlloc())) {
fprintf(stderr, "cannot allocate atomic request (%d): %m\n", errno);
return;
}
// Need to disable the CRTC in order to submit the HDR data....
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 0);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
sleep(2);
SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "Colorspace",old_color==AVCOL_PRI_BT2020?9:2 );
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "MODE_ID", modeID);
SetPropertyRequest(ModeReq, render->fd_drm, render->connector_id,
DRM_MODE_OBJECT_CONNECTOR, "CRTC_ID", render->crtc_id);
SetPropertyRequest(ModeReq, render->fd_drm, render->crtc_id,
DRM_MODE_OBJECT_CRTC, "ACTIVE", 1);
if (drmModeAtomicCommit(render->fd_drm, ModeReq, flags, NULL) != 0)
fprintf(stderr, "cannot set atomic mode (%d): %m\n", errno);
if (drmModeDestroyPropertyBlob(render->fd_drm, modeID) != 0)
fprintf(stderr, "cannot destroy prperty blob (%d): %m\n", errno);
drmModeAtomicFree(ModeReq);
m_need_modeset = 0;
}
drmModeSetCrtc (render->fd_drm, render->crtc_id, fb, 0, 0, &render->connector_id, 1, &render->mode);
if (previous_bo) {
drmModeRmFB (render->fd_drm, previous_fb);
gbm_surface_release_buffer (gbm.surface, previous_bo);
}
previous_bo = bo;
previous_fb = fb;
}
static void drm_clean_up () {
// set the previous crtc
if (!render)
return;
Debug(3,"drm clean up\n");
if (previous_bo) {
drmModeRmFB (render->fd_drm, previous_fb);
gbm_surface_release_buffer (gbm.surface, previous_bo);
}
drmModeSetCrtc (render->fd_drm, render->saved_crtc->crtc_id, render->saved_crtc->buffer_id,
render->saved_crtc->x, render->saved_crtc->y, &render->connector_id, 1, &render->saved_crtc->mode);
drmModeFreeCrtc (render->saved_crtc);
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
render->hdr_blob_id = 0;
eglDestroySurface (eglDisplay, eglSurface);
EglCheck();
gbm_surface_destroy (gbm.surface);
eglDestroyContext (eglDisplay, eglContext);
EglCheck();
eglDestroyContext (eglDisplay, eglSharedContext);
EglCheck();
eglSharedContext = NULL;
eglTerminate (eglDisplay);
EglCheck();
gbm_device_destroy (gbm.dev);
drmDropMaster(render->fd_drm);
close (render->fd_drm);
eglDisplay = NULL;
free(render);
}

492
hdr.c Normal file
View File

@@ -0,0 +1,492 @@
#include <libavutil/mastering_display_metadata.h>
/**
* struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
*
* HDR Metadata Infoframe as per CTA 861.G spec. This is expected
* to match exactly with the spec.
*
* Userspace is expected to pass the metadata information as per
* the format described in this structure.
*/
struct hdr_metadata_infoframe {
/**
* @eotf: Electro-Optical Transfer Function (EOTF)
* used in the stream.
*/
__u8 eotf;
/**
* @metadata_type: Static_Metadata_Descriptor_ID.
*/
__u8 metadata_type;
/**
* @display_primaries: Color Primaries of the Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @display_primaries.x: X cordinate of color primary.
* @display_primaries.y: Y cordinate of color primary.
*/
struct {
__u16 x, y;
} display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @white_point.x: X cordinate of whitepoint of color primary.
* @white_point.y: Y cordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
} white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_display_mastering_luminance;
/**
* @min_display_mastering_luminance: Min Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of
* 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF
* represents 6.5535 cd/m2.
*/
__u16 min_display_mastering_luminance;
/**
* @max_cll: Max Content Light Level.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_cll;
/**
* @max_fall: Max Frame Average Light Level.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_fall;
};
/**
* struct hdr_output_metadata - HDR output metadata
*
* Metadata Information to be passed from userspace
*/
struct hdr_output_metadata {
/**
* @metadata_type: Static_Metadata_Descriptor_ID.
*/
__u32 metadata_type;
/**
* @hdmi_metadata_type1: HDR Metadata Infoframe.
*/
union {
struct hdr_metadata_infoframe hdmi_metadata_type1;
};
};
enum hdr_metadata_eotf {
EOTF_TRADITIONAL_GAMMA_SDR,
EOTF_TRADITIONAL_GAMMA_HDR,
EOTF_ST2084,
EOTF_HLG,
};
enum metadata_id {
METADATA_TYPE1,
};
void
weston_hdr_metadata(void *data,
uint16_t display_primary_r_x,
uint16_t display_primary_r_y,
uint16_t display_primary_g_x,
uint16_t display_primary_g_y,
uint16_t display_primary_b_x,
uint16_t display_primary_b_y,
uint16_t white_point_x,
uint16_t white_point_y,
uint16_t min_luminance,
uint16_t max_luminance,
uint16_t max_cll,
uint16_t max_fall,
enum hdr_metadata_eotf eotf)
{
uint8_t *data8;
uint16_t *data16;
data8 = data;
*data8++ = eotf;
*data8++ = METADATA_TYPE1;
data16 = (void*)data8;
*data16++ = display_primary_r_x;
*data16++ = display_primary_r_y;
*data16++ = display_primary_g_x;
*data16++ = display_primary_g_y;
*data16++ = display_primary_b_x;
*data16++ = display_primary_b_y;
*data16++ = white_point_x;
*data16++ = white_point_y;
*data16++ = max_luminance;
*data16++ = min_luminance;
*data16++ = max_cll;
*data16++ = max_fall;
}
struct weston_vector {
float f[4];
};
struct weston_colorspace {
struct weston_vector r, g, b;
struct weston_vector whitepoint;
const char *name;
const char *whitepoint_name;
};
struct weston_colorspace hdr10;
static const struct weston_colorspace bt470m = {
.r = {{ 0.670f, 0.330f, }},
.g = {{ 0.210f, 0.710f, }},
.b = {{ 0.140f, 0.080f, }},
.whitepoint = {{ 0.3101f, 0.3162f, }},
.name = "BT.470 M",
.whitepoint_name = "C",
};
static const struct weston_colorspace bt470bg = {
.r = {{ 0.640f, 0.330f, }},
.g = {{ 0.290f, 0.600f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "BT.470 B/G",
.whitepoint_name = "D65",
};
static const struct weston_colorspace smpte170m = {
.r = {{ 0.630f, 0.340f, }},
.g = {{ 0.310f, 0.595f, }},
.b = {{ 0.155f, 0.070f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "SMPTE 170M",
.whitepoint_name = "D65",
};
static const struct weston_colorspace smpte240m = {
.r = {{ 0.630f, 0.340f, }},
.g = {{ 0.310f, 0.595f, }},
.b = {{ 0.155f, 0.070f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "SMPTE 240M",
.whitepoint_name = "D65",
};
static const struct weston_colorspace bt709 = {
.r = {{ 0.640f, 0.330f, }},
.g = {{ 0.300f, 0.600f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "BT.709",
.whitepoint_name = "D65",
};
static const struct weston_colorspace bt2020 = {
.r = {{ 0.708f, 0.292f, }},
.g = {{ 0.170f, 0.797f, }},
.b = {{ 0.131f, 0.046f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "BT.2020",
.whitepoint_name = "D65",
};
static const struct weston_colorspace srgb = {
.r = {{ 0.640f, 0.330f, }},
.g = {{ 0.300f, 0.600f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "sRGB",
.whitepoint_name = "D65",
};
static const struct weston_colorspace adobergb = {
.r = {{ 0.640f, 0.330f, }},
.g = {{ 0.210f, 0.710f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "AdobeRGB",
.whitepoint_name = "D65",
};
static const struct weston_colorspace dci_p3 = {
.r = {{ 0.680f, 0.320f, }},
.g = {{ 0.265f, 0.690f, }},
.b = {{ 0.150f, 0.060f, }},
.whitepoint = {{ 0.3127f, 0.3290f, }},
.name = "DCI-P3 D65",
.whitepoint_name = "D65",
};
static const struct weston_colorspace prophotorgb = {
.r = {{ 0.7347f, 0.2653f, }},
.g = {{ 0.1596f, 0.8404f, }},
.b = {{ 0.0366f, 0.0001f, }},
.whitepoint = {{ .3457, .3585 }},
.name = "ProPhoto RGB",
.whitepoint_name = "D50",
};
static const struct weston_colorspace ciergb = {
.r = {{ 0.7347f, 0.2653f, }},
.g = {{ 0.2738f, 0.7174f, }},
.b = {{ 0.1666f, 0.0089f, }},
.whitepoint = {{ 1.0f / 3.0f, 1.0f / 3.0f, }},
.name = "CIE RGB",
.whitepoint_name = "E",
};
static const struct weston_colorspace ciexyz = {
.r = {{ 1.0f, 0.0f, }},
.g = {{ 0.0f, 1.0f, }},
.b = {{ 0.0f, 0.0f, }},
.whitepoint = {{ 1.0f / 3.0f, 1.0f / 3.0f, }},
.name = "CIE XYZ",
.whitepoint_name = "E",
};
const struct weston_colorspace ap0 = {
.r = {{ 0.7347f, 0.2653f, }},
.g = {{ 0.0000f, 1.0000f, }},
.b = {{ 0.0001f, -0.0770f, }},
.whitepoint = {{ .32168f, .33767f, }},
.name = "ACES primaries #0",
.whitepoint_name = "D60",
};
const struct weston_colorspace ap1 = {
.r = {{ 0.713f, 0.393f, }},
.g = {{ 0.165f, 0.830f, }},
.b = {{ 0.128f, 0.044f, }},
.whitepoint = {{ 0.32168f, 0.33767f, }},
.name = "ACES primaries #1",
.whitepoint_name = "D60",
};
static const struct weston_colorspace * const colorspaces[] = {
&bt470m,
&bt470bg,
&smpte170m,
&smpte240m,
&bt709,
&bt2020,
&srgb,
&adobergb,
&dci_p3,
&prophotorgb,
&ciergb,
&ciexyz,
&ap0,
&ap1,
};
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof(a)[0])
const struct weston_colorspace *
weston_colorspace_lookup(const char *name)
{
unsigned i;
if (!name)
return NULL;
for (i = 0; i < ARRAY_LENGTH(colorspaces); i++) {
const struct weston_colorspace *c = colorspaces[i];
if (!strcmp(c->name, name))
return c;
}
return NULL;
}
static int cleanup=0;
static uint16_t encode_xyy(float xyy)
{
return xyy * 50000;
}
static AVMasteringDisplayMetadata md_save = {0};
static AVContentLightMetadata ld_save = {0};
static void set_hdr_metadata(int color,int trc, AVFrameSideData *sd1, AVFrameSideData *sd2)
{
drmModeAtomicReqPtr ModeReq;
struct weston_colorspace *cs;
enum hdr_metadata_eotf eotf;
struct hdr_output_metadata data;
int ret,MaxCLL=1500,MaxFALL=400;
int max_lum=4000,min_lum=0050;
struct AVMasteringDisplayMetadata *md = NULL;
struct AVContentLightMetadata *ld = NULL;
if (render->hdr_metadata == -1) { // Metadata not supported
return;
}
// clean up FFMEPG stuff
if (trc == AVCOL_TRC_BT2020_10)
trc = AVCOL_TRC_ARIB_STD_B67;
if (trc == AVCOL_TRC_UNSPECIFIED)
trc = AVCOL_TRC_BT709;
if (color == AVCOL_PRI_UNSPECIFIED)
color = AVCOL_PRI_BT709;
if ((old_color == color && old_trc == trc && !sd1 && !sd2) || !render->hdr_metadata)
return; // nothing to do
if (sd1)
md = sd1->data;
if (sd2)
ld = sd2->data;
if (md && !memcmp(md,&md_save,sizeof(md_save)))
if (ld && !memcmp(ld,&ld_save,sizeof(ld_save))) {
return;
}
else if (ld && !memcmp(ld,&ld_save,sizeof(ld_save))) {
return;
}
if (ld)
memcpy(&ld_save,ld,sizeof(ld_save));
if (md)
memcpy(&md_save,md,sizeof(md_save));
Debug(3,"Update HDR to TRC %d color %d\n",trc,color);
if (trc == AVCOL_TRC_BT2020_10)
trc = AVCOL_TRC_ARIB_STD_B67;
old_color = color;
old_trc = trc;
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
switch(trc) {
case AVCOL_TRC_BT709: // 1
case AVCOL_TRC_UNSPECIFIED: // 2
eotf = EOTF_TRADITIONAL_GAMMA_SDR;
break;
case AVCOL_TRC_BT2020_10: // 14
case AVCOL_TRC_BT2020_12:
case AVCOL_TRC_ARIB_STD_B67: // 18 HLG
eotf = EOTF_HLG;
break;
case AVCOL_TRC_SMPTE2084: // 16
eotf = EOTF_ST2084;
default:
eotf = EOTF_TRADITIONAL_GAMMA_SDR;
break;
}
switch (color) {
case AVCOL_PRI_BT709: // 1
case AVCOL_PRI_UNSPECIFIED: // 2
cs = weston_colorspace_lookup("BT.709");
break;
case AVCOL_PRI_BT2020: // 9
cs = weston_colorspace_lookup("BT.2020");
break;
case AVCOL_PRI_BT470BG: // 5
cs = weston_colorspace_lookup("BT.470 B/G"); // BT.601
break;
default:
cs = weston_colorspace_lookup("BT.709");
break;
}
if (md) { // we got Metadata
if (md->has_primaries) {
Debug(3,"Mastering Display Metadata,\n has_primaries:%d has_luminance:%d \n"
"r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f) \n"
"min_luminance=%f, max_luminance=%f\n",
md->has_primaries, md->has_luminance,
av_q2d(md->display_primaries[0][0]),
av_q2d(md->display_primaries[0][1]),
av_q2d(md->display_primaries[1][0]),
av_q2d(md->display_primaries[1][1]),
av_q2d(md->display_primaries[2][0]),
av_q2d(md->display_primaries[2][1]),
av_q2d(md->white_point[0]), av_q2d(md->white_point[1]),
av_q2d(md->min_luminance), av_q2d(md->max_luminance));
cs = &hdr10;
cs->r.f[0] = (float)md->display_primaries[0][0].num / (float)md->display_primaries[0][0].den;
cs->r.f[1] = (float)md->display_primaries[0][1].num / (float)md->display_primaries[0][1].den;
cs->g.f[0] = (float)md->display_primaries[1][0].num / (float)md->display_primaries[1][0].den;
cs->g.f[1] = (float)md->display_primaries[1][1].num / (float)md->display_primaries[1][1].den;
cs->b.f[0] = (float)md->display_primaries[2][0].num / (float)md->display_primaries[2][0].den;
cs->b.f[1] = (float)md->display_primaries[2][1].num / (float)md->display_primaries[2][1].den;
cs->whitepoint.f[0] = (float)md->white_point[0].num / (float)md->white_point[0].den;
cs->whitepoint.f[1] = (float)md->white_point[1].num / (float)md->white_point[1].den;
}
if (md->has_luminance) {
max_lum = av_q2d(md->max_luminance);
min_lum = av_q2d(md->min_luminance) * 10000 ;
printf("max_lum %d min_lum %d\n",max_lum,min_lum);
}
}
if (ld) {
Debug(3,"Has MaxCLL %d MaxFALL %d\n",ld->MaxCLL,ld->MaxFALL);
MaxCLL = ld->MaxCLL;
MaxFALL = ld->MaxFALL;
}
data.metadata_type = 7; // ????????????????????????
weston_hdr_metadata(&data.hdmi_metadata_type1,
encode_xyy(cs->r.f[0]),
encode_xyy(cs->r.f[1]),
encode_xyy(cs->g.f[0]),
encode_xyy(cs->g.f[1]),
encode_xyy(cs->b.f[0]),
encode_xyy(cs->b.f[1]),
encode_xyy(cs->whitepoint.f[0]),
encode_xyy(cs->whitepoint.f[1]),
max_lum, // max_display_mastering_luminance
min_lum, // min_display_mastering_luminance
MaxCLL, // Maximum Content Light Level (MaxCLL)
MaxFALL, // Maximum Frame-Average Light Level (MaxFALL)
eotf);
ret = drmModeCreatePropertyBlob(render->fd_drm, &data, sizeof(data), &render->hdr_blob_id);
if (ret) {
printf("DRM: HDR metadata: failed blob create \n");
render->hdr_blob_id = 0;
return;
}
ret = drmModeConnectorSetProperty(render->fd_drm, render->connector_id,
render->hdr_metadata, render->hdr_blob_id);
if (ret) {
printf("DRM: HDR metadata: failed property set %d\n",ret);
if (render->hdr_blob_id)
drmModeDestroyPropertyBlob(render->fd_drm, render->hdr_blob_id);
render->hdr_blob_id = 0;
return;
}
m_need_modeset = 1;
Debug(3,"DRM: HDR metadata: prop set\n");
}

View File

@@ -21,107 +21,17 @@ void ConvertColor(const GLint &colARGB, glm::vec4 &col) {
****************************************************************************************/
#ifdef CUVID
const char *rectVertexShader =
"#version 330 core \n\
\
layout (location = 0) in vec2 position; \
out vec4 rectCol; \
uniform vec4 inColor; \
uniform mat4 projection; \
\
void main() \
{ \
gl_Position = projection * vec4(position.x, position.y, 0.0, 1.0); \
rectCol = inColor; \
} \
";
const char *rectFragmentShader =
"#version 330 core \n\
\
in vec4 rectCol; \
out vec4 color; \
\
void main() \
{ \
color = rectCol; \
} \
";
const char *textureVertexShader =
"#version 330 core \n\
\
layout (location = 0) in vec2 position; \
layout (location = 1) in vec2 texCoords; \
\
out vec2 TexCoords; \
out vec4 alphaValue;\
\
uniform mat4 projection; \
uniform vec4 alpha; \
\
void main() \
{ \
gl_Position = projection * vec4(position.x, position.y, 0.0, 1.0); \
TexCoords = texCoords; \
alphaValue = alpha; \
} \
";
const char *textureFragmentShader =
"#version 330 core \n\
in vec2 TexCoords; \
in vec4 alphaValue; \
out vec4 color; \
\
uniform sampler2D screenTexture; \
\
void main() \
{ \
color = texture(screenTexture, TexCoords) * alphaValue; \
} \
";
const char *textVertexShader =
"#version 330 core \n\
\
layout (location = 0) in vec2 position; \
layout (location = 1) in vec2 texCoords; \
\
out vec2 TexCoords; \
out vec4 textColor; \
\
uniform mat4 projection; \
uniform vec4 inColor; \
\
void main() \
{ \
gl_Position = projection * vec4(position.x, position.y, 0.0, 1.0); \
TexCoords = texCoords; \
textColor = inColor; \
} \
";
const char *textFragmentShader =
"#version 330 core \n\
in vec2 TexCoords; \
in vec4 textColor; \
\
out vec4 color; \
\
uniform sampler2D glyphTexture; \
\
void main() \
{ \
vec4 sampled = vec4(1.0, 1.0, 1.0, texture(glyphTexture, TexCoords).r); \
color = textColor * sampled; \
} \
";
const char *glversion = "#version 330 core ";
#else
#ifdef RASPI
const char *glversion = "#version 300 es";
#else
const char *glversion = "#version 300 es ";
#endif
#endif
const char *rectVertexShader =
"\n \
"%s\n \
\
layout (location = 0) in vec2 position; \
out vec4 rectCol; \
@@ -136,7 +46,7 @@ void main() \
";
const char *rectFragmentShader =
"\n \
"%s\n \
\
precision mediump float; \
in vec4 rectCol; \
@@ -149,7 +59,7 @@ void main() \
";
const char *textureVertexShader =
"\n \
"%s\n \
\
layout (location = 0) in vec2 position; \
layout (location = 1) in vec2 texCoords; \
@@ -169,7 +79,7 @@ void main() \
";
const char *textureFragmentShader =
"\n \
"%s\n \
precision mediump float; \
in vec2 TexCoords; \
in vec4 alphaValue; \
@@ -184,7 +94,7 @@ void main() \
";
const char *textVertexShader =
"\n \
"%s\n \
\
layout (location = 0) in vec2 position; \
layout (location = 1) in vec2 texCoords; \
@@ -204,7 +114,7 @@ void main() \
";
const char *textFragmentShader =
"\n \
"%s\n \
precision mediump float; \
in vec2 TexCoords; \
in vec4 textColor; \
@@ -219,7 +129,19 @@ void main() \
color = textColor * sampled; \
} \
";
#endif
///
/// GLX check error.
///
#define GlxCheck(void)\
{\
GLenum err;\
\
if ((err = glGetError()) != GL_NO_ERROR) {\
esyslog( "video/glx: error %s:%d %d '%s'\n",__FILE__,__LINE__, err, gluErrorString(err));\
}\
}
static cShader *Shaders[stCount];
void cShader::Use(void) {
@@ -288,20 +210,27 @@ void cShader::SetMatrix4(const GLchar *name, const glm::mat4 &matrix) {
bool cShader::Compile(const char *vertexCode, const char *fragmentCode) {
GLuint sVertex, sFragment;
char *buffer = (char *)malloc(1000);
// Vertex Shader
sVertex = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(sVertex, 1, &vertexCode, NULL);
sprintf(buffer,vertexCode,glversion);
glShaderSource(sVertex, 1, (const GLchar**) &buffer, NULL);
glCompileShader(sVertex);
// esyslog("[softhddev]:SHADER:VERTEX %s\n",vertexCode);
if (!CheckCompileErrors(sVertex))
if (!CheckCompileErrors(sVertex)) {
free(buffer);
return false;
}
// Fragment Shader
sFragment = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(sFragment, 1, &fragmentCode, NULL);
sprintf(buffer,fragmentCode,glversion);
glShaderSource(sFragment, 1, (const GLchar**)&buffer, NULL);
glCompileShader(sFragment);
// esyslog("[softhddev]:SHADER:FRAGMENT %s\n",fragmentCode);
if (!CheckCompileErrors(sFragment))
if (!CheckCompileErrors(sFragment)) {
free(buffer);
return false;
}
// link Program
id = glCreateProgram();
glAttachShader(id, sVertex);
@@ -312,6 +241,7 @@ bool cShader::Compile(const char *vertexCode, const char *fragmentCode) {
// Delete the shaders as they're linked into our program now and no longer necessery
glDeleteShader(sVertex);
glDeleteShader(sFragment);
free(buffer);
return true;
}
@@ -322,14 +252,14 @@ bool cShader::CheckCompileErrors(GLuint object, bool program) {
glGetShaderiv(object, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(object, 1024, NULL, infoLog);
esyslog("[softhddev]:SHADER: Compile-time error: Type: %d - \n%s\n", type, infoLog);
esyslog("\n[softhddev]:SHADER: Compile-time error: Type: %d - \n>%s<\n", type, infoLog);
return false;
}
} else {
glGetProgramiv(object, GL_LINK_STATUS, &success);
if (!success) {
glGetProgramInfoLog(object, 1024, NULL, infoLog);
esyslog("[softhddev]:SHADER: Link-time error: Type: %d - \n%s\n", type, infoLog);
esyslog("[softhddev]:SHADER: Link-time error: Type: %d - \n>%s<\n", type, infoLog);
return false;
}
}
@@ -398,7 +328,9 @@ void cOglGlyph::LoadTexture(FT_BitmapGlyph ftGlyph) {
}
extern "C" void GlxInitopengl();
extern "C" void GlxDrawopengl();
extern "C" void GlxDestroy();
/****************************************************************************************
* cOglFont
****************************************************************************************/
@@ -439,9 +371,12 @@ cOglFont *cOglFont::Get(const char *name, int charHeight) {
}
void cOglFont::Init(void) {
fonts = new cList<cOglFont>;
if (FT_Init_FreeType(&ftLib))
if (FT_Init_FreeType(&ftLib)) {
esyslog("[softhddev]failed to initialize FreeType library!");
return;
}
fonts = new cList<cOglFont>;
initiated = true;
}
@@ -895,22 +830,15 @@ bool cOglCmdCopyBufferToOutputFb::Execute(void) {
pthread_mutex_lock(&OSDMutex);
fb->BindRead();
oFb->BindWrite();
// glClear(GL_COLOR_BUFFER_BIT);
//#ifdef PLACEBO
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
if (posd)
glReadPixels(0, 0 ,fb->Width(), fb->Height(),GL_RGBA,GL_UNSIGNED_BYTE,posd);
//#else
#if 0
fb->Blit(x, y + fb->Height(), x + fb->Width(), y);
glFlush();
#endif
ActivateOsd(oFb->texture,x, y, fb->Width() ,fb->Height());
glFlush();
oFb->Unbind();
pthread_mutex_unlock(&OSDMutex);
ActivateOsd(oFb->texture,x, y, fb->Width() ,fb->Height());
return true;
}
@@ -1277,8 +1205,9 @@ bool cOglCmdDrawText::Execute(void) {
esyslog("[softhddev]ERROR: could not load glyph %x", sym);
}
if ( limitX && xGlyph + g->AdvanceX() > limitX )
if ( limitX && xGlyph + g->AdvanceX() > limitX ) {
break;
}
kerning = f->Kerning(g, prevSym);
prevSym = sym;
@@ -1324,6 +1253,7 @@ cOglCmdDrawImage::cOglCmdDrawImage(cOglFb *fb, tColor *argb, GLint width, GLint
this->overlay = overlay;
this->scaleX = scaleX;
this->scaleY = scaleY;
}
cOglCmdDrawImage::~cOglCmdDrawImage(void) {
@@ -1332,7 +1262,10 @@ cOglCmdDrawImage::~cOglCmdDrawImage(void) {
bool cOglCmdDrawImage::Execute(void) {
GLuint texture;
#ifdef USE_DRM
GlxDrawopengl(); // here we need the Shared Context for upload
GlxCheck();
#endif
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(
@@ -1351,7 +1284,10 @@ bool cOglCmdDrawImage::Execute(void) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
#ifdef USE_DRM
GlxInitopengl(); // Reset Context
GlxCheck();
#endif
GLfloat x1 = x; //left
GLfloat y1 = y; //top
@@ -1441,7 +1377,10 @@ cOglCmdStoreImage::~cOglCmdStoreImage(void) {
}
bool cOglCmdStoreImage::Execute(void) {
#ifdef USE_DRM
GlxDrawopengl(); // here we need the Shared Context for upload
GlxCheck();
#endif
glGenTextures(1, &imageRef->texture);
glBindTexture(GL_TEXTURE_2D, imageRef->texture);
glTexImage2D(
@@ -1460,7 +1399,10 @@ bool cOglCmdStoreImage::Execute(void) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
#ifdef USE_DRM
GlxInitopengl(); // Reset Context
GlxCheck();
#endif
return true;
}
@@ -1483,6 +1425,7 @@ bool cOglCmdDropImage::Execute(void) {
cOglThread::cOglThread(cCondWait *startWait, int maxCacheSize) : cThread("oglThread") {
stalled = false;
memCached = 0;
this->maxCacheSize = maxCacheSize * 1024 * 1024;
this->startWait = startWait;
wait = new cCondWait();
@@ -1495,6 +1438,7 @@ cOglThread::cOglThread(cCondWait *startWait, int maxCacheSize) : cThread("oglThr
}
Start();
}
cOglThread::~cOglThread() {
@@ -1693,13 +1637,12 @@ void cOglThread::Action(void) {
dsyslog("[softhddev]OpenGL Worker Thread Ended");
}
extern "C" int GlxInitopengl();
bool cOglThread::InitOpenGL(void) {
#ifdef USE_DRM
GlxInitopengl();
#else
const char *displayName = X11DisplayName;
if (!displayName) {
displayName = getenv("DISPLAY");
@@ -1732,7 +1675,8 @@ bool cOglThread::InitOpenGL(void) {
esyslog("[softhddev]glewInit failed, aborting\n");
return false;
}
#endif
VertexBuffers[vbText]->EnableBlending();
glDisable(GL_DEPTH_TEST);
return true;
@@ -1794,8 +1738,11 @@ void cOglThread::Cleanup(void) {
DeleteShaders();
// glVDPAUFiniNV();
cOglFont::Cleanup();
#ifndef USE_DRM
glutExit();
#else
GlxDestroy();
#endif
pthread_mutex_unlock(&OSDMutex);
}
@@ -2061,11 +2008,11 @@ cOglOsd::cOglOsd(int Left, int Top, uint Level, std::shared_ptr<cOglThread> oglT
cOglOsd::~cOglOsd() {
OsdClose();
SetActive(false);
#if 0
if (posd)
free(posd);
posd = 0;
#endif
oglThread->DoCmd(new cOglCmdDeleteFb(bFb));
}

View File

@@ -0,0 +1,312 @@
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 8358152e403e..573ab6ea1a6e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1274,6 +1274,7 @@ struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
enum lspcon_vendor vendor;
+ bool hdr_supported;
};
struct intel_digital_port {
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f8f1308643a9..a1d0127b7f57 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -35,6 +35,8 @@
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+#define DPCD_MCA_LSPCON_HDR_STATUS 0x70003
+
/* AUX addresses to write MCA AVI IF */
#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
@@ -104,6 +106,31 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
return true;
}
+static bool lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ u8 hdr_caps;
+ int ret;
+
+ /* Enable HDR for MCA based LSPCON devices */
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = drm_dp_dpcd_read(&dp->aux, DPCD_MCA_LSPCON_HDR_STATUS,
+ &hdr_caps, 1);
+ else
+ return false;
+
+ if (ret < 0) {
+ DRM_DEBUG_KMS("hdr capability detection failed\n");
+ lspcon->hdr_supported = false;
+ return false;
+ } else if (hdr_caps & 0x1) {
+ DRM_DEBUG_KMS("lspcon capable of HDR\n");
+ lspcon->hdr_supported = true;
+ }
+
+ return true;
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -581,6 +608,11 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
+ if (!lspcon_detect_hdr_capability(lspcon)) {
+ DRM_ERROR("LSPCON hdr detection failed\n");
+ return false;
+ }
+
connector->ycbcr_420_allowed = true;
lspcon->active = true;
DRM_DEBUG_KMS("Success: LSPCON init\n");
--
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index b54ccbb5aad5..051e30ad80e7 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -576,6 +576,16 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
return val & mask;
}
+void lspcon_drm_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ DRM_DEBUG_KMS("Update HDR metadata for lspcon\n");
+ /* It uses the legacy hsw implementation for the same */
+ hsw_write_infoframe(encoder, crtc_state, type, frame, len);
+}
+
static const u8 infoframe_type_to_idx[] = {
HDMI_PACKET_TYPE_GENERAL_CONTROL,
HDMI_PACKET_TYPE_GAMUT_METADATA,
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index a1d0127b7f57..51ad5f02e700 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -460,27 +460,41 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- bool ret;
+ bool ret = true;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
- /* LSPCON only needs AVI IF */
- if (type != HDMI_INFOFRAME_TYPE_AVI)
+ if (!(type == HDMI_INFOFRAME_TYPE_AVI ||
+ type == HDMI_PACKET_TYPE_GAMUT_METADATA))
return;
- if (lspcon->vendor == LSPCON_VENDOR_MCA)
- ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
- frame, len);
- else
- ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
- frame, len);
+ /*
+ * Supporting HDR on MCA LSPCON
+ * Todo: Add support for Parade later
+ */
+ if (type == HDMI_PACKET_TYPE_GAMUT_METADATA &&
+ lspcon->vendor != LSPCON_VENDOR_MCA)
+ return;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA) {
+ if (type == HDMI_INFOFRAME_TYPE_AVI)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else if (type == HDMI_PACKET_TYPE_GAMUT_METADATA)
+ lspcon_drm_write_infoframe(encoder, crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ frame, VIDEO_DIP_DATA_SIZE);
+ } else {
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, frame,
+ len);
+ }
if (!ret) {
- DRM_ERROR("Failed to write AVI infoframes\n");
+ DRM_ERROR("Failed to write infoframes\n");
return;
}
- DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
+ DRM_DEBUG_DRIVER("Infoframes updated successfully\n");
}
void lspcon_read_infoframe(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index 37cfddf8a9c5..65878904f672 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -35,4 +35,8 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
void lspcon_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *crtc_state);
+void lspcon_drm_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
#endif /* __INTEL_LSPCON_H__ */
--
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 51ad5f02e700..c32452360eeb 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -627,6 +627,11 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
+ if (lspcon->vendor == LSPCON_VENDOR_MCA && lspcon->hdr_supported)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+
connector->ycbcr_420_allowed = true;
lspcon->active = true;
DRM_DEBUG_KMS("Success: LSPCON init\n");
--
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index d0a937fb0c56..e78b3a1626fd 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -416,6 +416,7 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
if (state->hdr_output_metadata)
drm_property_blob_get(state->hdr_output_metadata);
+ state->hdr_metadata_changed = false;
/* Don't copy over a writeback job, they are used only once */
state->writeback_job = NULL;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 0d466d3b0809..5beabcd42d30 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -734,6 +734,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
val,
sizeof(struct hdr_output_metadata), -1,
&replaced);
+ state->hdr_metadata_changed |= replaced;
return ret;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9ba794cb9b4f..dee3a593564c 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3851,6 +3851,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_lspcon *lspcon =
+ enc_to_intel_lspcon(&encoder->base);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -3860,6 +3862,12 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_psr_enable(intel_dp, crtc_state);
intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
+
+ /* Set the infoframe for NON modeset cases as well */
+ if (lspcon->active && lspcon->hdr_supported &&
+ conn_state->hdr_metadata_changed)
+ intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp, crtc_state,
+ conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 5eeafa45831a..cc616fd31d8b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -4651,7 +4651,7 @@ intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
}
-static void
+void
intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index 65878904f672..3404cff8c337 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -14,6 +14,7 @@ struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
struct intel_lspcon;
+struct intel_dp;
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
@@ -39,4 +40,7 @@ void lspcon_drm_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
+void intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
#endif /* __INTEL_LSPCON_H__ */
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 5f8c3389d46f..1f0b4fcf0bd3 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -661,6 +661,7 @@ struct drm_connector_state {
* DRM blob property for HDR output metadata
*/
struct drm_property_blob *hdr_output_metadata;
+ u8 hdr_metadata_changed : 1;
};
/**
--
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index c32452360eeb..8565bf73c4cd 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -505,6 +505,11 @@ void lspcon_read_infoframe(struct intel_encoder *encoder,
/* FIXME implement this */
}
+/* HDMI HDR Colorspace Spec Definitions */
+#define NORMAL_COLORIMETRY_MASK 0x3
+#define EXTENDED_COLORIMETRY_MASK 0x7
+#define HDMI_COLORIMETRY_BT2020_YCC ((3 << 0) | (6 << 2) | (0 << 5))
+
void lspcon_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -549,6 +554,19 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
+ /*
+ * Set BT2020 colorspace if driving HDR data
+ * ToDo: Make this generic and expose all colorspaces for lspcon
+ */
+ if (lspcon->active && conn_state->hdr_metadata_changed) {
+ frame.avi.colorimetry =
+ HDMI_COLORIMETRY_BT2020_YCC &
+ NORMAL_COLORIMETRY_MASK;
+ frame.avi.extended_colorimetry =
+ (HDMI_COLORIMETRY_BT2020_YCC >> 2) &
+ EXTENDED_COLORIMETRY_MASK;
+ }
+
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
DRM_ERROR("Failed to pack AVI IF\n");
--

38
patches/UHD-10Bit.patch Normal file
View File

@@ -0,0 +1,38 @@
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index cc616fd31d8b..f2d1d7bd87d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -616,8 +616,10 @@ intel_dp_mode_valid(struct drm_connector *connector,
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(&intel_encoder->base);
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
@@ -639,6 +641,21 @@ intel_dp_mode_valid(struct drm_connector *connector,
target_clock = fixed_mode->clock;
}
+ /*
+ * Reducing Blanking to incorporate DP and HDMI timing/link bandwidth
+ * limitations for CEA modes (4k@60 at 10 bpp). DP can drive 17.28Gbs
+ * while 4k modes (VIC97 etc) at 10 bpp required 17.8 Gbps. This will
+ * cause mode to blank out. Reduced Htotal by shortening the back porch
+ * and front porch within permissible limits.
+ */
+ if (lspcon->active && lspcon->hdr_supported &&
+ mode->clock > 570000) {
+ mode->clock = 570000;
+ mode->htotal -= 180;
+ mode->hsync_start -= 72;
+ mode->hsync_end -= 72;
+ }
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
--

View File

@@ -0,0 +1,14 @@
--- helpers.c.orig 2019-11-19 20:02:37.851039110 +0100
+++ helpers.c 2019-11-19 20:02:03.733164221 +0100
@@ -5,9 +5,9 @@
#include <vdr/skins.h>
cOsd *CreateOsd(int Left, int Top, int Width, int Height) {
- cOsd *osd = cOsdProvider::NewOsd(Left, Top);
+ cOsd *osd = cOsdProvider::NewOsd(cOsd::OsdLeft() + Left,cOsd::OsdTop() + Top);
if (osd) {
- tArea Area = { 0, 0, Width, Height, 32 };
+ tArea Area = { 0, 0, Width - 1, Height - 1, 32 };
if (osd->SetAreas(&Area, 1) == oeOk) {
return osd;
}

203
shaders.h
View File

@@ -1,97 +1,94 @@
// shader
#ifdef CUVID
char vertex_osd[] = { "\
#version 330\n\
in vec2 vertex_position;\n\
in vec2 vertex_texcoord0;\n\
out vec2 texcoord0;\n\
void main() {\n\
gl_Position = vec4(vertex_position, 1.0, 1.0);\n\
texcoord0 = vertex_texcoord0;\n\
}\n" };
char fragment_osd[] = { "\
#version 330\n\
#define texture1D texture\n\
precision mediump float; \
layout(location = 0) out vec4 out_color;\n\
in vec2 texcoord0;\n\
uniform sampler2D texture0;\n\
void main() {\n\
vec4 color; \n\
color = vec4(texture(texture0, texcoord0));\n\
out_color = color;\n\
}\n" };
char vertex[] = { "\
#version 310 es\n\
in vec2 vertex_position;\n\
in vec2 vertex_texcoord0;\n\
out vec2 texcoord0;\n\
in vec2 vertex_texcoord1;\n\
out vec2 texcoord1;\n\
void main() {\n\
gl_Position = vec4(vertex_position, 1.0, 1.0);\n\
texcoord0 = vertex_texcoord0;\n\
texcoord1 = vertex_texcoord1;\n\
}\n" };
char fragment[] = { "\
#version 310 es\n\
#define texture1D texture\n\
#define texture3D texture\n\
precision mediump float; \
layout(location = 0) out vec4 out_color;\n\
in vec2 texcoord0;\n\
in vec2 texcoord1;\n\
uniform mat3 colormatrix;\n\
uniform vec3 colormatrix_c;\n\
uniform sampler2D texture0;\n\
uniform sampler2D texture1;\n\
void main() {\n\
vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\
color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\
color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;\n\
// color conversion\n\
color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\
color.a = 1.0;\n\
// color mapping\n\
out_color = color;\n\
}\n" };
char fragment_bt2100[] = { "\
#version 310 es\n \
#define texture1D texture\n\
#define texture3D texture\n\
precision mediump float; \
layout(location = 0) out vec4 out_color;\n\
in vec2 texcoord0;\n\
in vec2 texcoord1;\n\
uniform mat3 colormatrix;\n\
uniform vec3 colormatrix_c;\n\
uniform mat3 cms_matrix;\n\
uniform sampler2D texture0;\n\
uniform sampler2D texture1;\n\
//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\
void main() {\n\
vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\
color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;\n\
color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;\n\
// color conversion\n\
color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\
color.a = 1.0;\n\
// color mapping\n\
color.rgb = clamp(color.rgb, 0.0, 1.0);\n\
color.rgb = pow(color.rgb, vec3(2.4));\n\
color.rgb = cms_matrix * color.rgb;\n\
color.rgb = clamp(color.rgb, 0.0, 1.0);\n\
color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\
out_color = color;\n\
}\n" };
const char *gl_version = "#version 330";
#else
#ifdef RASPI
const char *gl_version = "#version 300 es";
#else
const char *gl_version = "#version 300 es ";
#endif
#endif
char vertex_3[] = {"\
%s\n\
in vec2 vertex_position;\n\
in vec2 vertex_texcoord0;\n\
out vec2 texcoord0;\n\
in vec2 vertex_texcoord1;\n\
out vec2 texcoord1;\n\
in vec2 vertex_texcoord2;\n\
out vec2 texcoord2;\n\
void main() {\n\
gl_Position = vec4(vertex_position, 1.0, 1.0);\n\
texcoord0 = vertex_texcoord0;\n\
texcoord1 = vertex_texcoord1;\n\
texcoord2 = vertex_texcoord1;\n\
}\n"};
char fragment_3[] = {"\
%s\n\
#define texture1D texture\n\
#define texture3D texture\n\
precision mediump float; \
layout(location = 0) out vec4 out_color;\n\
in vec2 texcoord0;\n\
in vec2 texcoord1;\n\
in vec2 texcoord2;\n\
uniform mat3 colormatrix;\n\
uniform vec3 colormatrix_c;\n\
uniform sampler2D texture0;\n\
uniform sampler2D texture1;\n\
uniform sampler2D texture2;\n\
//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\
void main() {\n\
vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\
color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;\n\
color.g = 1.000000 * vec4(texture(texture1, texcoord1)).r;\n\
color.b = 1.000000 * vec4(texture(texture2, texcoord2)).r;\n\
// color conversion\n\
color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\
color.a = 1.0;\n\
// color mapping\n\
out_color = color;\n\
}\n"};
char fragment_bt2100_3[] = {"\
%s\n \
#define texture1D texture\n\
#define texture3D texture\n\
precision mediump float; \
layout(location = 0) out vec4 out_color;\n\
in vec2 texcoord0;\n\
in vec2 texcoord1;\n\
in vec2 texcoord2;\n\
uniform mat3 colormatrix;\n\
uniform vec3 colormatrix_c;\n\
uniform mat3 cms_matrix;\n\
uniform sampler2D texture0;\n\
uniform sampler2D texture1;\n\
uniform sampler2D texture2;\n\
//#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))\n\
void main() {\n\
vec4 color; // = vec4(0.0, 0.0, 0.0, 1.0);\n\
color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;\n\
color.g = 1.003906 * vec4(texture(texture1, texcoord1)).r;\n\
color.b = 1.003906 * vec4(texture(texture2, texcoord2)).r;\n\
// color conversion\n\
color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;\n\
color.a = 1.0;\n\
// color mapping\n\
color.rgb = clamp(color.rgb, 0.0, 1.0);\n\
color.rgb = pow(color.rgb, vec3(2.4));\n\
color.rgb = cms_matrix * color.rgb;\n\
color.rgb = clamp(color.rgb, 0.0, 1.0);\n\
color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\
out_color = color;\n\
}\n"};
char vertex_osd[] = { "\
\n\
%s\n\
in vec2 vertex_position;\n\
in vec2 vertex_texcoord0;\n\
out vec2 texcoord0;\n\
@@ -101,7 +98,7 @@ texcoord0 = vertex_texcoord0;\n\
}\n" };
char fragment_osd[] = { "\
\n\
%s\n\
#define texture1D texture\n\
precision mediump float; \
layout(location = 0) out vec4 out_color;\n\
@@ -114,7 +111,7 @@ out_color = color;\n\
}\n" };
char vertex[] = { "\
\n\
%s\n\
in vec2 vertex_position;\n\
in vec2 vertex_texcoord0;\n\
out vec2 texcoord0;\n\
@@ -127,7 +124,7 @@ texcoord1 = vertex_texcoord1;\n\
}\n" };
char fragment[] = { "\
\n\
%s\n\
#define texture1D texture\n\
#define texture3D texture\n\
precision mediump float; \
@@ -151,7 +148,7 @@ out_color = color;\n\
}\n" };
char fragment_bt2100[] = { "\
\n \
%s\n \
#define texture1D texture\n\
#define texture3D texture\n\
precision mediump float; \
@@ -179,7 +176,7 @@ color.rgb = clamp(color.rgb, 0.0, 1.0);\n\
color.rgb = pow(color.rgb, vec3(1.0/2.4));\n\
out_color = color;\n\
}\n" };
#endif
/* Color conversion matrix: RGB = m * YUV + c
* m is in row-major matrix, with m[row][col], e.g.:
@@ -281,9 +278,10 @@ static void compile_attach_shader(GLuint program, GLenum type, const char *sourc
GLint status, log_length;
char log[4000];
GLsizei len;
char *buffer = (char *) malloc(1000);
sprintf(buffer,source,gl_version);
shader = glCreateShader(type);
glShaderSource(shader, 1, &source, NULL);
glShaderSource(shader, 1, (const GLchar **)&buffer, NULL);
glCompileShader(shader);
status = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
@@ -295,6 +293,7 @@ static void compile_attach_shader(GLuint program, GLenum type, const char *sourc
glAttachShader(program, shader);
glDeleteShader(shader);
free(buffer);
}
static void link_shader(GLuint program)
@@ -338,27 +337,27 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace)
case AVCOL_SPC_RGB:
m = &yuv_bt601.m[0][0];
c = &yuv_bt601.c[0];
frag = fragment;
frag = Planes == 3?fragment_3:fragment;
Debug(3, "BT601 Colorspace used\n");
break;
case AVCOL_SPC_BT709:
case AVCOL_SPC_UNSPECIFIED: // comes with UHD
m = &yuv_bt709.m[0][0];
c = &yuv_bt709.c[0];
frag = fragment;
frag = Planes==3?fragment_3:fragment;
Debug(3, "BT709 Colorspace used\n");
break;
case AVCOL_SPC_BT2020_NCL:
m = &yuv_bt2020ncl.m[0][0];
c = &yuv_bt2020ncl.c[0];
cms = &cms_matrix[0][0];
frag = fragment_bt2100;
frag = Planes == 3?fragment_bt2100_3:fragment_bt2100;
Debug(3, "BT2020NCL Colorspace used\n");
break;
default: // fallback
m = &yuv_bt709.m[0][0];
c = &yuv_bt709.c[0];
frag = fragment;
frag = Planes==3?fragment_3:fragment;
Debug(3, "default BT709 Colorspace used %d\n", colorspace);
break;
}
@@ -366,7 +365,7 @@ static GLuint sc_generate(GLuint gl_prog, enum AVColorSpace colorspace)
Debug(3, "vor create\n");
gl_prog = glCreateProgram();
Debug(3, "vor compile vertex\n");
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, vertex);
compile_attach_shader(gl_prog, GL_VERTEX_SHADER, Planes==3?vertex_3:vertex);
Debug(3, "vor compile fragment\n");
compile_attach_shader(gl_prog, GL_FRAGMENT_SHADER, frag);
glBindAttribLocation(gl_prog, 0, "vertex_position");

View File

@@ -1,91 +0,0 @@
Pro 7 1080i
[vo/opengl] [ 1] color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;
[vo/opengl] [ 2] color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;
[vo/opengl] [ 3] // color conversion
[vo/opengl] [ 4] color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
[vo/opengl] [ 5] color.a = 1.0;
[vo/opengl] [ 6] // color mapping
UHD 10 Bit
[vo/opengl] [ 1] color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;
[vo/opengl] [ 2] color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;
[vo/opengl] [ 3] // color conversion
[vo/opengl] [ 4] color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
[vo/opengl] [ 5] color.a = 1.0;
[vo/opengl] [ 6] // color mapping
HEVC 8 Bit
[vo/opengl] [ 1] color.r = 1.003906 * vec4(texture(texture0, texcoord0)).r;
[vo/opengl] [ 2] color.gb = 1.003906 * vec4(texture(texture1, texcoord1)).rg;
[vo/opengl] [ 3] // color conversion
[vo/opengl] [ 4] color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
[vo/opengl] [ 5] color.a = 1.0;
[vo/opengl] [ 6] // color mapping
ZDF 720p
[vo/opengl] [ 1] color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;
[vo/opengl] [ 2] color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;
[vo/opengl] [ 3] // color conversion
[vo/opengl] [ 4] color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
[vo/opengl] [ 5] color.a = 1.0;
[vo/opengl] [ 6] // color mapping
VERTEX
#version 330
in vec2 vertex_position;
in vec2 vertex_texcoord0;
out vec2 texcoord0;
in vec2 vertex_texcoord1;
out vec2 texcoord1;
in vec2 vertex_texcoord2;
out vec2 texcoord2;
in vec2 vertex_texcoord3;
out vec2 texcoord3;
in vec2 vertex_texcoord4;
out vec2 texcoord4;
in vec2 vertex_texcoord5;
out vec2 texcoord5;
void main() {
gl_Position = vec4(vertex_position, 1.0, 1.0);
texcoord0 = vertex_texcoord0;
texcoord1 = vertex_texcoord1;
texcoord2 = vertex_texcoord2;
texcoord3 = vertex_texcoord3;
texcoord4 = vertex_texcoord4;
texcoord5 = vertex_texcoord5;
}
FRAGMENT
#version 330
#define texture1D texture
#define texture3D texture
out vec4 out_color;
in vec2 texcoord0;
in vec2 texcoord1;
in vec2 texcoord2;
in vec2 texcoord3;
in vec2 texcoord4;
in vec2 texcoord5;
uniform mat3 colormatrix;
uniform vec3 colormatrix_c;
uniform sampler2D texture0;
uniform vec2 texture_size0;
uniform mat2 texture_rot0;
uniform vec2 pixel_size0;
uniform sampler2D texture1;
uniform vec2 texture_size1;
uniform mat2 texture_rot1;
uniform vec2 pixel_size1;
#define LUT_POS(x, lut_size) mix(0.5 / (lut_size), 1.0 - 0.5 / (lut_size), (x))
void main() {
vec4 color = vec4(0.0, 0.0, 0.0, 1.0);
color.r = 1.000000 * vec4(texture(texture0, texcoord0)).r;
color.gb = 1.000000 * vec4(texture(texture1, texcoord1)).rg;
// color conversion
color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;
color.a = 1.0;
// color mapping
out_color = color;
}

View File

@@ -61,7 +61,7 @@ extern "C"
/// vdr-plugin version number.
/// Makefile extracts the version number for generating the file name
/// for the distribution archive.
static const char *const VERSION = "2.1.0"
static const char *const VERSION = "3.0.0"
#ifdef GIT_REV
"-GIT" GIT_REV
#endif
@@ -213,109 +213,108 @@ class cSoftRemote:public cRemote, private cThread
public:
/**
** Soft device remote class constructor.
** Soft device remote class constructor.
**
** @param name remote name
** @param name remote name
*/
cSoftRemote(void) : cRemote("XKeySym")
cSoftRemote(void):cRemote("XKeySym")
{
Start();
Start();
}
virtual ~cSoftRemote()
virtual ~ cSoftRemote()
{
Cancel(3);
Cancel(3);
}
/**
** Receive keycode.
** Receive keycode.
**
** @param code key code
** @param code key code
*/
void Receive(const char *code) {
cMutexLock MutexLock(&mutex);
Command = code;
keyReceived.Broadcast();
void Receive(const char *code)
{
cMutexLock MutexLock(&mutex);
Command = code;
keyReceived.Broadcast();
}
};
void cSoftRemote::Action(void)
{
// see also VDR's cKbdRemote::Action()
cTimeMs FirstTime;
cTimeMs LastTime;
cString FirstCommand = "";
cString LastCommand = "";
bool Delayed = false;
bool Repeat = false;
// see also VDR's cKbdRemote::Action()
cTimeMs FirstTime;
cTimeMs LastTime;
cString FirstCommand = "";
cString LastCommand = "";
bool Delayed = false;
bool Repeat = false;
while (Running()) {
while (Running()) {
cMutexLock MutexLock(&mutex);
if (keyReceived.TimedWait(mutex, Setup.RcRepeatDelta * 3 / 2) && **Command) {
if (strcmp(Command, LastCommand) == 0) {
// If two keyboard events with the same command come in without an intermediate
// timeout, this is a long key press that caused the repeat function to kick in:
Delayed = false;
FirstCommand = "";
if (FirstTime.Elapsed() < (uint)Setup.RcRepeatDelay)
continue; // repeat function kicks in after a short delay
if (LastTime.Elapsed() < (uint)Setup.RcRepeatDelta)
continue; // skip same keys coming in too fast
cRemote::Put(Command, true);
Repeat = true;
LastTime.Set();
}
else if (strcmp(Command, FirstCommand) == 0) {
// If the same command comes in twice with an intermediate timeout, we
// need to delay the second command to see whether it is going to be
// a repeat function or a separate key press:
Delayed = true;
}
else {
// This is a totally new key press, so we accept it immediately:
cRemote::Put(Command);
Delayed = false;
FirstCommand = Command;
FirstTime.Set();
}
}
else if (Repeat) {
// Timeout after a repeat function, so we generate a 'release':
cRemote::Put(LastCommand, false, true);
Repeat = false;
}
else if (Delayed && *FirstCommand) {
// Timeout after two normal key presses of the same key, so accept the
// delayed key:
cRemote::Put(FirstCommand);
Delayed = false;
FirstCommand = "";
FirstTime.Set();
}
else if (**FirstCommand && FirstTime.Elapsed() > (uint)Setup.RcRepeatDelay) {
Delayed = false;
FirstCommand = "";
FirstTime.Set();
}
if (strcmp(Command, LastCommand) == 0) {
// If two keyboard events with the same command come in without an intermediate
// timeout, this is a long key press that caused the repeat function to kick in:
Delayed = false;
FirstCommand = "";
if (FirstTime.Elapsed() < (uint) Setup.RcRepeatDelay)
continue; // repeat function kicks in after a short delay
if (LastTime.Elapsed() < (uint) Setup.RcRepeatDelta)
continue; // skip same keys coming in too fast
cRemote::Put(Command, true);
Repeat = true;
LastTime.Set();
} else if (strcmp(Command, FirstCommand) == 0) {
// If the same command comes in twice with an intermediate timeout, we
// need to delay the second command to see whether it is going to be
// a repeat function or a separate key press:
Delayed = true;
} else {
// This is a totally new key press, so we accept it immediately:
cRemote::Put(Command);
Delayed = false;
FirstCommand = Command;
FirstTime.Set();
}
} else if (Repeat) {
// Timeout after a repeat function, so we generate a 'release':
cRemote::Put(LastCommand, false, true);
Repeat = false;
} else if (Delayed && *FirstCommand) {
// Timeout after two normal key presses of the same key, so accept the
// delayed key:
cRemote::Put(FirstCommand);
Delayed = false;
FirstCommand = "";
FirstTime.Set();
} else if (**FirstCommand && FirstTime.Elapsed() > (uint) Setup.RcRepeatDelay) {
Delayed = false;
FirstCommand = "";
FirstTime.Set();
}
LastCommand = Command;
Command = "";
}
}
}
static cSoftRemote *csoft = NULL;
/**
** Feed key press as remote input (called from C part).
** Feed key press as remote input (called from C part).
**
** @param keymap target keymap "XKeymap" name (obsolete, ignored)
** @param key pressed/released key name
** @param repeat repeated key flag (obsolete, ignored)
** @param release released key flag (obsolete, ignored)
** @param letter x11 character string (system setting locale)
** @param keymap target keymap "XKeymap" name (obsolete, ignored)
** @param key pressed/released key name
** @param repeat repeated key flag (obsolete, ignored)
** @param release released key flag (obsolete, ignored)
** @param letter x11 character string (system setting locale)
*/
extern "C" void FeedKeyPress(const char *keymap, const char *key, int repeat, int release, const char *letter)
{
if (!csoft || !keymap || !key) {
return;
return;
}
csoft->Receive(key);
@@ -2473,11 +2472,17 @@ class cSoftHdDevice:public cDevice
return "softhdcuvid";
}
#endif
#ifdef VAAPI
#if defined (VAAPI) && !defined (USE_DRM)
virtual cString DeviceName(void) const
{
return "softhdvaapi";
}
#endif
#if defined (VAAPI) && defined (USE_DRM)
virtual cString DeviceName(void) const
{
return "softhddrm";
}
#endif
virtual bool HasDecoder(void) const;
virtual bool CanReplay(void) const;
@@ -3115,7 +3120,7 @@ bool cPluginSoftHdDevice::Start(void)
}
}
csoft = new cSoftRemote;
csoft = new cSoftRemote;
switch (::Start()) {
case 1:
@@ -3144,7 +3149,8 @@ void cPluginSoftHdDevice::Stop(void)
// dsyslog("[softhddev]%s:\n", __FUNCTION__);
::Stop();
delete csoft;
delete csoft;
csoft = NULL;
}

View File

@@ -515,7 +515,7 @@ enum
#define PES_START_CODE_SIZE 6 ///< size of pes start code with length
#define PES_HEADER_SIZE 9 ///< size of pes header
#define PES_MAX_HEADER_SIZE (PES_HEADER_SIZE + 256) ///< maximal header size
#define PES_MAX_PAYLOAD (512 * 1024) ///< max pay load size
#define PES_MAX_PAYLOAD (512 * 1024) ///< max pay load size
///
/// PES demuxer.
@@ -885,9 +885,9 @@ static void PesParse(PesDemux * pesdx, const uint8_t * data, int size, int is_st
//////////////////////////////////////////////////////////////////////////////
/// Transport stream packet size
#define TS_PACKET_SIZE 188
#define TS_PACKET_SIZE 188
/// Transport stream packet sync byte
#define TS_PACKET_SYNC 0x47
#define TS_PACKET_SYNC 0x47
///
/// transport stream demuxer typedef.
@@ -982,11 +982,11 @@ static int TsDemuxer(TsDemux * tsdx, const uint8_t * data, int size)
#endif
/**
** Play audio packet.
** Play audio packet.
**
** @param data data of exactly one complete PES packet
** @param size size of PES packet
** @param id PES packet type
** @param data data of exactly one complete PES packet
** @param size size of PES packet
** @param id PES packet type
*/
int PlayAudio(const uint8_t * data, int size, uint8_t id)
{
@@ -1211,14 +1211,14 @@ int PlayAudio(const uint8_t * data, int size, uint8_t id)
#ifndef NO_TS_AUDIO
/**
** Play transport stream audio packet.
** Play transport stream audio packet.
**
** VDR can have buffered data belonging to previous channel!
** VDR can have buffered data belonging to previous channel!
**
** @param data data of exactly one complete TS packet
** @param size size of TS packet (always TS_PACKET_SIZE)
** @param data data of exactly one complete TS packet
** @param size size of TS packet (always TS_PACKET_SIZE)
**
** @returns number of bytes consumed;
** @returns number of bytes consumed;
*/
int PlayTsAudio(const uint8_t * data, int size)
{
@@ -1265,9 +1265,9 @@ int PlayTsAudio(const uint8_t * data, int size)
#endif
/**
** Set volume of audio device.
** Set volume of audio device.
**
** @param volume VDR volume (0 .. 255)
** @param volume VDR volume (0 .. 255)
*/
void SetVolumeDevice(int volume)
{
@@ -1275,7 +1275,7 @@ void SetVolumeDevice(int volume)
}
/**
*** Resets channel ID (restarts audio).
*** Resets channel ID (restarts audio).
**/
void ResetChannelId(void)
{
@@ -1291,7 +1291,7 @@ void ResetChannelId(void)
#define VIDEO_PACKET_MAX 256 ///< max number of video packets 192
/**
** Video output stream device structure. Parser, decoder, display.
** Video output stream device structure. Parser, decoder, display.
*/
struct __video_stream__
{
@@ -1344,9 +1344,9 @@ static volatile char Usr1Signal; ///< true got usr1 signal
//////////////////////////////////////////////////////////////////////////////
/**
** Initialize video packet ringbuffer.
** Initialize video packet ringbuffer.
**
** @param stream video stream
** @param stream video stream
*/
static void VideoPacketInit(VideoStream * stream)
{
@@ -1367,9 +1367,9 @@ static void VideoPacketInit(VideoStream * stream)
}
/**
** Cleanup video packet ringbuffer.
** Cleanup video packet ringbuffer.
**
** @param stream video stream
** @param stream video stream
*/
static void VideoPacketExit(VideoStream * stream)
{
@@ -1383,12 +1383,12 @@ static void VideoPacketExit(VideoStream * stream)
}
/**
** Place video data in packet ringbuffer.
** Place video data in packet ringbuffer.
**
** @param stream video stream
** @param pts presentation timestamp of pes packet
** @param data data of pes packet
** @param size size of pes packet
** @param stream video stream
** @param pts presentation timestamp of pes packet
** @param data data of pes packet
** @param size size of pes packet
*/
static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const void *data, int size)
{
@@ -1431,9 +1431,9 @@ static void VideoEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const v
}
/**
** Reset current packet.
** Reset current packet.
**
** @param stream video stream
** @param stream video stream
*/
static void VideoResetPacket(VideoStream * stream)
{
@@ -1449,10 +1449,10 @@ static void VideoResetPacket(VideoStream * stream)
}
/**
** Finish current packet advance to next.
** Finish current packet advance to next.
**
** @param stream video stream
** @param codec_id codec id of packet (MPEG/H264)
** @param stream video stream
** @param codec_id codec id of packet (MPEG/H264)
*/
static void VideoNextPacket(VideoStream * stream, int codec_id)
{
@@ -1493,18 +1493,18 @@ static void VideoNextPacket(VideoStream * stream, int codec_id)
#ifdef USE_PIP
/**
** Place mpeg video data in packet ringbuffer.
** Place mpeg video data in packet ringbuffer.
**
** Some tv-stations sends mulitple pictures in a single PES packet.
** Split the packet into single picture packets.
** Nick/CC, Viva, MediaShop, Deutsches Music Fernsehen
** Some tv-stations sends mulitple pictures in a single PES packet.
** Split the packet into single picture packets.
** Nick/CC, Viva, MediaShop, Deutsches Music Fernsehen
**
** FIXME: this code can be written much faster
** FIXME: this code can be written much faster
**
** @param stream video stream
** @param pts presentation timestamp of pes packet
** @param data data of pes packet
** @param size size of pes packet
** @param stream video stream
** @param pts presentation timestamp of pes packet
** @param data data of pes packet
** @param size size of pes packet
*/
static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, const uint8_t * data, int size)
{
@@ -1533,7 +1533,7 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con
#endif
if (!p[0] || p[0] == 0xb3) {
#ifdef DEBUG
printf("last: %d start aspect %02x\n", stream->StartCodeState, p[4]);
printf("last: %d start aspect %02x\n", stream->StartCodeState, p[4]);
#endif
stream->PacketRb[stream->PacketWrite].stream_index -= 3;
VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO);
@@ -1551,7 +1551,7 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con
#endif
if (p[0] == 0x01 && (!p[1] || p[1] == 0xb3)) {
#ifdef DEBUG
printf("last: %d start aspect %02x\n", stream->StartCodeState, p[5]);
printf("last: %d start aspect %02x\n", stream->StartCodeState, p[5]);
#endif
stream->PacketRb[stream->PacketWrite].stream_index -= 2;
VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO);
@@ -1568,7 +1568,7 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con
#endif
if (!p[0] && p[1] == 0x01 && (!p[2] || p[2] == 0xb3)) {
#ifdef DEBUG
printf("last: %d start aspect %02x\n", stream->StartCodeState, p[6]);
printf("last: %d start aspect %02x\n", stream->StartCodeState, p[6]);
#endif
stream->PacketRb[stream->PacketWrite].stream_index -= 1;
VideoNextPacket(stream, AV_CODEC_ID_MPEG2VIDEO);
@@ -1650,19 +1650,19 @@ static void VideoMpegEnqueue(VideoStream * stream, int64_t pts, int64_t dts, con
#endif
/**
** Fix packet for FFMpeg.
** Fix packet for FFMpeg.
**
** Some tv-stations sends mulitple pictures in a single PES packet.
** Current ffmpeg 0.10 and libav-0.8 has problems with this.
** Split the packet into single picture packets.
** Some tv-stations sends mulitple pictures in a single PES packet.
** Current ffmpeg 0.10 and libav-0.8 has problems with this.
** Split the packet into single picture packets.
**
** FIXME: there are stations which have multiple pictures and
** the last picture incomplete in the PES packet.
** FIXME: there are stations which have multiple pictures and
** the last picture incomplete in the PES packet.
**
** FIXME: move function call into PlayVideo, than the hardware
** decoder didn't need to support multiple frames decoding.
** FIXME: move function call into PlayVideo, than the hardware
** decoder didn't need to support multiple frames decoding.
**
** @param avpkt ffmpeg a/v packet
** @param avpkt ffmpeg a/v packet
*/
#ifndef USE_PIP
@@ -1728,9 +1728,9 @@ static void FixPacketForFFMpeg(VideoDecoder * vdecoder, AVPacket * avpkt)
#endif
/**
** Open video stream.
** Open video stream.
**
** @param stream video stream
** @param stream video stream
*/
static void VideoStreamOpen(VideoStream * stream)
{
@@ -1745,13 +1745,13 @@ static void VideoStreamOpen(VideoStream * stream)
}
/**
** Close video stream.
** Close video stream.
**
** @param stream video stream
** @param delhw flag delete hardware decoder
** @param stream video stream
** @param delhw flag delete hardware decoder
**
** @note must be called from the video thread, otherwise xcb has a
** deadlock.
** @note must be called from the video thread, otherwise xcb has a
** deadlock.
*/
static void VideoStreamClose(VideoStream * stream, int delhw)
{
@@ -1782,14 +1782,14 @@ static void VideoStreamClose(VideoStream * stream, int delhw)
}
/**
** Poll PES packet ringbuffer.
** Poll PES packet ringbuffer.
**
** Called if video frame buffers are full.
** Called if video frame buffers are full.
**
** @param stream video stream
** @param stream video stream
**
** @retval 1 something todo
** @retval -1 empty stream
** @retval 1 something todo
** @retval -1 empty stream
*/
int VideoPollInput(VideoStream * stream)
{
@@ -1824,13 +1824,13 @@ int VideoPollInput(VideoStream * stream)
}
/**
** Decode from PES packet ringbuffer.
** Decode from PES packet ringbuffer.
**
** @param stream video stream
** @param stream video stream
**
** @retval 0 packet decoded
** @retval 1 stream paused
** @retval -1 empty stream
** @retval 0 packet decoded
** @retval 1 stream paused
** @retval -1 empty stream
*/
int VideoDecodeInput(VideoStream * stream)
{
@@ -1975,9 +1975,9 @@ int VideoDecodeInput(VideoStream * stream)
}
/**
** Get number of video buffers.
** Get number of video buffers.
**
** @param stream video stream
** @param stream video stream
*/
int VideoGetBuffers(const VideoStream * stream)
{
@@ -1985,9 +1985,9 @@ int VideoGetBuffers(const VideoStream * stream)
}
/**
** Try video start.
** Try video start.
**
** NOT TRUE: Could be called, when already started.
** NOT TRUE: Could be called, when already started.
*/
static void StartVideo(void)
{
@@ -2005,7 +2005,7 @@ static void StartVideo(void)
}
/**
** Stop video.
** Stop video.
*/
static void StopVideo(void)
{
@@ -2043,9 +2043,9 @@ static void StopVideo(void)
#ifdef DEBUG
/**
** Dump mpeg video packet.
** Dump mpeg video packet.
**
** Function to dump a mpeg packet, not needed.
** Function to dump a mpeg packet, not needed.
*/
static void DumpMpeg(const uint8_t * data, int size)
{
@@ -2067,9 +2067,9 @@ static void DumpMpeg(const uint8_t * data, int size)
}
/**
** Dump h264 video packet.
** Dump h264 video packet.
**
** Function to Dump a h264 packet, not needed.
** Function to Dump a h264 packet, not needed.
*/
static int DumpH264(const uint8_t * data, int size)
{
@@ -2091,9 +2091,9 @@ static int DumpH264(const uint8_t * data, int size)
}
/**
** Validate mpeg video packet.
** Validate mpeg video packet.
**
** Function to validate a mpeg packet, not needed.
** Function to validate a mpeg packet, not needed.
*/
static int ValidateMpeg(const uint8_t * data, int size)
{
@@ -2126,13 +2126,13 @@ static int ValidateMpeg(const uint8_t * data, int size)
#endif
/**
** Play video packet.
** Play video packet.
**
** @param stream video stream
** @param data data of exactly one complete PES packet
** @param size size of PES packet
** @param stream video stream
** @param data data of exactly one complete PES packet
** @param size size of PES packet
**
** @return number of bytes used, 0 if internal buffer are full.
** @return number of bytes used, 0 if internal buffer are full.
**
*/
int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
@@ -2352,20 +2352,20 @@ int PlayVideo3(VideoStream * stream, const uint8_t * data, int size)
}
/**
** Play video packet.
** Play video packet.
**
** @param data data of exactly one complete PES packet
** @param size size of PES packet
** @param data data of exactly one complete PES packet
** @param size size of PES packet
**
** @return number of bytes used, 0 if internal buffer are full.
** @return number of bytes used, 0 if internal buffer are full.
**
** @note vdr sends incomplete packets, va-api h264 decoder only
** supports complete packets.
** We buffer here until we receive an complete PES Packet, which
** is no problem, the audio is always far behind us.
** cTsToPes::GetPes splits the packets.
** @note vdr sends incomplete packets, va-api h264 decoder only
** supports complete packets.
** We buffer here until we receive an complete PES Packet, which
** is no problem, the audio is always far behind us.
** cTsToPes::GetPes splits the packets.
**
** @todo FIXME: combine the 5 ifs at start of the function
** @todo FIXME: combine the 5 ifs at start of the function
*/
int PlayVideo(const uint8_t * data, int size)
{
@@ -2378,16 +2378,16 @@ extern uint8_t *CreateJpeg(uint8_t *, int *, int, int, int);
#if defined(USE_JPEG) && JPEG_LIB_VERSION >= 80
/**
** Create a jpeg image in memory.
** Create a jpeg image in memory.
**
** @param image raw RGB image
** @param raw_size size of raw image
** @param size[out] size of jpeg image
** @param quality jpeg quality
** @param width number of horizontal pixels in image
** @param height number of vertical pixels in image
** @param image raw RGB image
** @param raw_size size of raw image
** @param size[out] size of jpeg image
** @param quality jpeg quality
** @param width number of horizontal pixels in image
** @param height number of vertical pixels in image
**
** @returns allocated jpeg image.
** @returns allocated jpeg image.
*/
uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, int width, int height)
{
@@ -2429,13 +2429,13 @@ uint8_t *CreateJpeg(uint8_t * image, int raw_size, int *size, int quality, int w
#endif
/**
** Grabs the currently visible screen image.
** Grabs the currently visible screen image.
**
** @param size size of the returned data
** @param jpeg flag true, create JPEG data
** @param quality JPEG quality
** @param width number of horizontal pixels in the frame
** @param height number of vertical pixels in the frame
** @param size size of the returned data
** @param jpeg flag true, create JPEG data
** @param quality JPEG quality
** @param width number of horizontal pixels in the frame
** @param height number of vertical pixels in the frame
*/
uint8_t *GrabImage(int *size, int jpeg, int quality, int width, int height)
{
@@ -2461,9 +2461,9 @@ uint8_t *GrabImage(int *size, int jpeg, int quality, int width, int height)
//////////////////////////////////////////////////////////////////////////////
/**
** Set play mode, called on channel switch.
** Set play mode, called on channel switch.
**
** @param play_mode play mode (none, video+audio, audio-only, ...)
** @param play_mode play mode (none, video+audio, audio-only, ...)
*/
int SetPlayMode(int play_mode)
{
@@ -2514,8 +2514,8 @@ int SetPlayMode(int play_mode)
}
/**
** Gets the current System Time Counter, which can be used to
** synchronize audio, video and subtitles.
** Gets the current System Time Counter, which can be used to
** synchronize audio, video and subtitles.
*/
int64_t GetSTC(void)
{
@@ -2528,11 +2528,11 @@ int64_t GetSTC(void)
}
/**
** Get video stream size and aspect.
** Get video stream size and aspect.
**
** @param width[OUT] width of video stream
** @param height[OUT] height of video stream
** @param aspect[OUT] aspect ratio (4/3, 16/9, ...) of video stream
** @param width[OUT] width of video stream
** @param height[OUT] height of video stream
** @param aspect[OUT] aspect ratio (4/3, 16/9, ...) of video stream
*/
void GetVideoSize(int *width, int *height, double *aspect)
{
@@ -2562,12 +2562,12 @@ void GetVideoSize(int *width, int *height, double *aspect)
}
/**
** Set trick play speed.
** Set trick play speed.
**
** Every single frame shall then be displayed the given number of
** times.
** Every single frame shall then be displayed the given number of
** times.
**
** @param speed trick speed
** @param speed trick speed
*/
void TrickSpeed(int speed)
{
@@ -2583,7 +2583,7 @@ void TrickSpeed(int speed)
}
/**
** Clears all video and audio data from the device.
** Clears all video and audio data from the device.
*/
void Clear(void)
{
@@ -2606,7 +2606,7 @@ void Clear(void)
}
/**
** Sets the device into play mode.
** Sets the device into play mode.
*/
void Play(void)
{
@@ -2616,7 +2616,7 @@ void Play(void)
}
/**
** Sets the device into "freeze frame" mode.
** Sets the device into "freeze frame" mode.
*/
void Freeze(void)
{
@@ -2626,7 +2626,7 @@ void Freeze(void)
}
/**
** Turns off audio while replaying.
** Turns off audio while replaying.
*/
void Mute(void)
{
@@ -2636,10 +2636,10 @@ void Mute(void)
}
/**
** Display the given I-frame as a still picture.
** Display the given I-frame as a still picture.
**
** @param data pes frame data
** @param size number of bytes in frame
** @param data pes frame data
** @param size number of bytes in frame
*/
void StillPicture(const uint8_t * data, int size)
{
@@ -2746,17 +2746,17 @@ void StillPicture(const uint8_t * data, int size)
}
/**
** Poll if device is ready. Called by replay.
** Poll if device is ready. Called by replay.
**
** This function is useless, the return value is ignored and
** all buffers are overrun by vdr.
** This function is useless, the return value is ignored and
** all buffers are overrun by vdr.
**
** The dvd plugin is using this correct.
** The dvd plugin is using this correct.
**
** @param timeout timeout to become ready in ms
** @param timeout timeout to become ready in ms
**
** @retval true if ready
** @retval false if busy
** @retval true if ready
** @retval false if busy
*/
int Poll(int timeout)
{
@@ -2789,9 +2789,9 @@ int Poll(int timeout)
}
/**
** Flush the device output buffers.
** Flush the device output buffers.
**
** @param timeout timeout to flush in ms
** @param timeout timeout to flush in ms
*/
int Flush(int timeout)
{
@@ -2809,11 +2809,11 @@ int Flush(int timeout)
//////////////////////////////////////////////////////////////////////////////
/**
** Get OSD size and aspect.
** Get OSD size and aspect.
**
** @param width[OUT] width of OSD
** @param height[OUT] height of OSD
** @param aspect[OUT] aspect ratio (4/3, 16/9, ...) of OSD
** @param width[OUT] width of OSD
** @param height[OUT] height of OSD
** @param aspect[OUT] aspect ratio (4/3, 16/9, ...) of OSD
*/
void GetOsdSize(int *width, int *height, double *aspect)
{
@@ -2835,7 +2835,7 @@ void GetOsdSize(int *width, int *height, double *aspect)
}
/**
** Close OSD.
** Close OSD.
*/
void OsdClose(void)
{
@@ -2843,16 +2843,16 @@ void OsdClose(void)
}
/**
** Draw an OSD pixmap.
** Draw an OSD pixmap.
**
** @param xi x-coordinate in argb image
** @param yi y-coordinate in argb image
** @paran height height in pixel in argb image
** @paran width width in pixel in argb image
** @param pitch pitch of argb image
** @param argb 32bit ARGB image data
** @param x x-coordinate on screen of argb image
** @param y y-coordinate on screen of argb image
** @param xi x-coordinate in argb image
** @param yi y-coordinate in argb image
** @paran height height in pixel in argb image
** @paran width width in pixel in argb image
** @param pitch pitch of argb image
** @param argb 32bit ARGB image data
** @param x x-coordinate on screen of argb image
** @param y y-coordinate on screen of argb image
*/
void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, const uint8_t * argb, int x, int y)
{
@@ -2864,17 +2864,20 @@ void OsdDrawARGB(int xi, int yi, int height, int width, int pitch, const uint8_t
//////////////////////////////////////////////////////////////////////////////
/**
** Return command line help string.
** Return command line help string.
*/
const char *CommandLineHelp(void)
{
return " -a device\taudio device (fe. alsa: hw:0,0 oss: /dev/dsp)\n"
" -p device\taudio device for pass-through (hw:0,1 or /dev/dsp1)\n"
" -c channel\taudio mixer channel name (fe. PCM)\n" " -d display\tdisplay of x11 server (fe. :0.0)\n"
" -c channel\taudio mixer channel name (fe. PCM)\n" " -d display\tdisplay of x11 server (fe. :0.0)\n"
" -f\t\tstart with fullscreen window (only with window manager)\n"
" -g geometry\tx11 window geometry wxh+x+y\n" " -v device\tvideo driver device (cuvid)\n"
" -g geometry\tx11 window geometry wxh+x+y\n"
" -r Refresh\tRefreshrate for DRM (default is 50 Hz)\n"
" -C Connector\tConnector for DRM (default is current Connector)\n"
" -v device\tvideo driver device (cuvid)\n"
" -s\t\tstart in suspended mode\n" " -x\t\tstart x11 server, with -xx try to connect, if this fails\n"
" -X args\tX11 server arguments (f.e. -nocursor)\n" " -w workaround\tenable/disable workarounds\n"
" -X args\tX11 server arguments (f.e. -nocursor)\n" " -w workaround\tenable/disable workarounds\n"
"\tno-hw-decoder\t\tdisable hw decoder, use software decoder only\n"
"\tno-mpeg-hw-decoder\tdisable hw decoder for mpeg only\n"
"\tstill-hw-decoder\tenable hardware decoder for still-pictures\n"
@@ -2883,14 +2886,14 @@ const char *CommandLineHelp(void)
"\talsa-no-close-open\tdisable close open to fix alsa no sound bug\n"
"\talsa-close-open-delay\tenable close open delay to fix no sound bug\n"
"\tignore-repeat-pict\tdisable repeat pict message\n"
"\tuse-possible-defect-frames prefer faster channel switch\n" " -D\t\tstart in detached mode\n";
"\tuse-possible-defect-frames prefer faster channel switch\n" " -D\t\tstart in detached mode\n";
}
/**
** Process the command line arguments.
** Process the command line arguments.
**
** @param argc number of arguments
** @param argv arguments vector
** @param argc number of arguments
** @param argv arguments vector
*/
int ProcessArgs(int argc, char *const argv[])
{
@@ -2905,13 +2908,19 @@ int ProcessArgs(int argc, char *const argv[])
#endif
for (;;) {
switch (getopt(argc, argv, "-a:c:d:fg:p:sv:w:xDX:")) {
switch (getopt(argc, argv, "-a:c:C:r:d:fg:p:sv:w:xDX:")) {
case 'a': // audio device for pcm
AudioSetDevice(optarg);
continue;
case 'c': // channel of audio mixer
AudioSetChannel(optarg);
continue;
case 'C': // Connector for DRM
VideoSetConnector(optarg);
continue;
case 'r': // Connector for DRM
VideoSetRefresh(optarg);
continue;
case 'p': // pass-through audio device
AudioSetPassthroughDevice(optarg);
continue;
@@ -3008,9 +3017,9 @@ static const char *X11Server = LOCALBASE "/bin/X"; ///< default x11 server
static pid_t X11ServerPid; ///< x11 server pid
/**
** USR1 signal handler.
** USR1 signal handler.
**
** @param sig signal number
** @param sig signal number
*/
static void Usr1Handler(int __attribute__((unused)) sig)
{
@@ -3020,7 +3029,7 @@ static void Usr1Handler(int __attribute__((unused)) sig)
}
/**
** Start the X server
** Start the X server
*/
static void StartXServer(void)
{
@@ -3104,7 +3113,7 @@ static void StartXServer(void)
}
/**
** Exit + cleanup.
** Exit + cleanup.
*/
void SoftHdDeviceExit(void)
{
@@ -3164,11 +3173,11 @@ void SoftHdDeviceExit(void)
}
/**
** Prepare plugin.
** Prepare plugin.
**
** @retval 0 normal start
** @retval 1 suspended start
** @retval -1 detached start
** @retval 0 normal start
** @retval 1 suspended start
** @retval -1 detached start
*/
int Start(void)
{
@@ -3210,9 +3219,9 @@ int Start(void)
}
/**
** Stop plugin.
** Stop plugin.
**
** @note stop everything, but don't cleanup, module is still called.
** @note stop everything, but don't cleanup, module is still called.
*/
void Stop(void)
{
@@ -3222,7 +3231,7 @@ void Stop(void)
}
/**
** Perform any cleanup or other regular tasks.
** Perform any cleanup or other regular tasks.
*/
void Housekeeping(void)
{
@@ -3252,7 +3261,7 @@ void Housekeeping(void)
}
/**
** Main thread hook, periodic called from main thread.
** Main thread hook, periodic called from main thread.
*/
void MainThreadHook(void)
{
@@ -3273,11 +3282,11 @@ void MainThreadHook(void)
extern void DelPip(void);
/**
** Suspend plugin.
** Suspend plugin.
**
** @param video suspend closes video
** @param audio suspend closes audio
** @param dox11 suspend closes x11 server
** @param video suspend closes video
** @param audio suspend closes audio
** @param dox11 suspend closes x11 server
*/
void Suspend(int video, int audio, int dox11)
{
@@ -3320,7 +3329,7 @@ void Suspend(int video, int audio, int dox11)
}
/**
** Resume plugin.
** Resume plugin.
*/
void Resume(void)
{
@@ -3354,12 +3363,12 @@ void Resume(void)
}
/*
** Get decoder statistics.
** Get decoder statistics.
**
** @param[out] missed missed frames
** @param[out] duped duped frames
** @param[out] dropped dropped frames
** @param[out] count number of decoded frames
** @param[out] missed missed frames
** @param[out] duped duped frames
** @param[out] dropped dropped frames
** @param[out] count number of decoded frames
*/
void GetStats(int *missed, int *duped, int *dropped, int *counter, float *frametime)
{
@@ -3374,12 +3383,12 @@ void GetStats(int *missed, int *duped, int *dropped, int *counter, float *framet
}
/**
** Scale the currently shown video.
** Scale the currently shown video.
**
** @param x video window x coordinate OSD relative
** @param y video window y coordinate OSD relative
** @param width video window width OSD relative
** @param height video window height OSD relative
** @param x video window x coordinate OSD relative
** @param y video window y coordinate OSD relative
** @param width video window width OSD relative
** @param height video window height OSD relative
*/
void ScaleVideo(int x, int y, int width, int height)
{
@@ -3395,16 +3404,16 @@ void ScaleVideo(int x, int y, int width, int height)
#ifdef USE_PIP
/**
** Set PIP position.
** Set PIP position.
**
** @param x video window x coordinate OSD relative
** @param y video window y coordinate OSD relative
** @param width video window width OSD relative
** @param height video window height OSD relative
** @param pip_x pip window x coordinate OSD relative
** @param pip_y pip window y coordinate OSD relative
** @param pip_width pip window width OSD relative
** @param pip_height pip window height OSD relative
** @param x video window x coordinate OSD relative
** @param y video window y coordinate OSD relative
** @param width video window width OSD relative
** @param height video window height OSD relative
** @param pip_x pip window x coordinate OSD relative
** @param pip_y pip window y coordinate OSD relative
** @param pip_width pip window width OSD relative
** @param pip_height pip window height OSD relative
*/
void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height)
{
@@ -3420,16 +3429,16 @@ void PipSetPosition(int x, int y, int width, int height, int pip_x, int pip_y, i
}
/**
** Start PIP stream.
** Start PIP stream.
**
** @param x video window x coordinate OSD relative
** @param y video window y coordinate OSD relative
** @param width video window width OSD relative
** @param height video window height OSD relative
** @param pip_x pip window x coordinate OSD relative
** @param pip_y pip window y coordinate OSD relative
** @param pip_width pip window width OSD relative
** @param pip_height pip window height OSD relative
** @param x video window x coordinate OSD relative
** @param y video window y coordinate OSD relative
** @param width video window width OSD relative
** @param height video window height OSD relative
** @param pip_x pip window x coordinate OSD relative
** @param pip_y pip window y coordinate OSD relative
** @param pip_width pip window width OSD relative
** @param pip_height pip window height OSD relative
*/
void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, int pip_width, int pip_height)
{
@@ -3444,7 +3453,7 @@ void PipStart(int x, int y, int width, int height, int pip_x, int pip_y, int pip
}
/**
** Stop PIP.
** Stop PIP.
*/
void PipStop(void)
{
@@ -3464,12 +3473,12 @@ void PipStop(void)
}
/**
** PIP play video packet.
** PIP play video packet.
**
** @param data data of exactly one complete PES packet
** @param size size of PES packet
** @param data data of exactly one complete PES packet
** @param size size of PES packet
**
** @return number of bytes used, 0 if internal buffer are full.
** @return number of bytes used, 0 if internal buffer are full.
*/
int PipPlayVideo(const uint8_t * data, int size)
{

1063
video.c

File diff suppressed because it is too large Load Diff