mirror of
https://projects.vdr-developer.org/git/vdr-plugin-softhddevice.git
synced 2023-10-10 17:16:51 +00:00
Compare commits
44 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
74a62e3649 | ||
|
|
7e1a42f7ed | ||
|
|
dda9011abc | ||
|
|
de79e9211f | ||
|
|
b0d9f41020 | ||
|
|
4d1a516c80 | ||
|
|
995f1286bd | ||
|
|
fd0ae12f24 | ||
|
|
db258a0fbd | ||
|
|
0df8e8a5fc | ||
|
|
6a28064dce | ||
|
|
b5e9077c74 | ||
|
|
3b4ace14cf | ||
|
|
5aa868c296 | ||
|
|
43b48224b5 | ||
|
|
144f22314f | ||
|
|
51eb720265 | ||
|
|
e977007dd3 | ||
|
|
769f00b4f6 | ||
|
|
aa426cd8b2 | ||
|
|
b2cab00599 | ||
|
|
b54d62ef35 | ||
|
|
9b68248a3e | ||
|
|
762959fbb4 | ||
|
|
07b426f2b5 | ||
|
|
668a6ec277 | ||
|
|
82f61de117 | ||
|
|
67e571f02b | ||
|
|
c17af0e958 | ||
|
|
2561214c3e | ||
|
|
7382bd60ff | ||
|
|
73b93f1aba | ||
|
|
0243b1c8a7 | ||
|
|
6ce760ccd8 | ||
|
|
2f869884ba | ||
|
|
5d8dea1b6b | ||
|
|
1f232db5b4 | ||
|
|
c4ad13c53f | ||
|
|
98f73f2199 | ||
|
|
89ca44206c | ||
|
|
5c9b85b69b | ||
|
|
09cfab3856 | ||
|
|
30e903d90a | ||
|
|
852d367225 |
25
ChangeLog
25
ChangeLog
@@ -1,3 +1,28 @@
|
||||
User johns
|
||||
Date:
|
||||
|
||||
Experimental ac3 audio drift correction support.
|
||||
Removes LPCM detection from TS parser.
|
||||
Rewrote video/audio start code.
|
||||
Add support for attach/detach plugin.
|
||||
OSS needs bigger audio buffers.
|
||||
Improved audio drift correction support.
|
||||
Experimental audio drift correction support.
|
||||
Add SVDRP HOTK command support.
|
||||
Increased audio buffer time for PES packets.
|
||||
Support configuration and set of video background.
|
||||
Survive lost X11 display.
|
||||
Fix bug: 100% cpu use with plugins like mp3.
|
||||
Wakeup display thread on channel switch, osd can now be shown without
|
||||
video.
|
||||
Makes 60Hz display mode configurable with setup.conf.
|
||||
Support downmix of AC-3 to stero.
|
||||
New audio PES packet parser.
|
||||
Fix bug: Grabbing a JPG image fails while suspended.
|
||||
Add support for hot keys.
|
||||
Add support to use characters input in edit mode.
|
||||
Adds trick speed support.
|
||||
|
||||
User johns
|
||||
Date: Thu Feb 16 09:59:14 CET 2012
|
||||
|
||||
|
||||
12
Makefile
12
Makefile
@@ -19,8 +19,12 @@ GIT_REV = $(shell git describe --always 2>/dev/null)
|
||||
### Configuration (edit this for your needs)
|
||||
|
||||
CONFIG := #-DDEBUG
|
||||
CONFIG += -DAV_INFO
|
||||
#CONFIG += -DHAVE_PTHREAD_NAME
|
||||
#CONFIG += -DUSE_AUDIO_DRIFT_CORRECTION # build new audio drift code
|
||||
#CONFIG += -DUSE_AC3_DRIFT_CORRECTION # build new ac-3 drift code
|
||||
CONFIG += -DAV_INFO -DAV_INFO_TIME=3000 # debug a/v sync
|
||||
#CONFIG += -DHAVE_PTHREAD_NAME # supports new pthread_setname_np
|
||||
#CONFIG += -DNO_TS_AUDIO # disable ts audio parser
|
||||
#CONFIG += -DUSE_TS_VIDEO # build new ts video parser
|
||||
CONFIG += $(shell pkg-config --exists vdpau && echo "-DUSE_VDPAU")
|
||||
CONFIG += $(shell pkg-config --exists libva && echo "-DUSE_VAAPI")
|
||||
CONFIG += $(shell pkg-config --exists alsa && echo "-DUSE_ALSA")
|
||||
@@ -66,7 +70,7 @@ DEFINES += $(CONFIG) -D_GNU_SOURCE -DPLUGIN_NAME_I18N='"$(PLUGIN)"' \
|
||||
$(if $(GIT_REV), -DGIT_REV='"$(GIT_REV)"')
|
||||
|
||||
_CFLAGS = $(DEFINES) $(INCLUDES) \
|
||||
$(shell pkg-config --cflags libavcodec libavformat) \
|
||||
$(shell pkg-config --cflags libavcodec) \
|
||||
`pkg-config --cflags x11 x11-xcb xcb xcb-xv xcb-shm xcb-dpms xcb-atom\
|
||||
xcb-screensaver xcb-randr xcb-glx xcb-icccm xcb-keysyms`\
|
||||
`pkg-config --cflags gl glu` \
|
||||
@@ -82,7 +86,7 @@ override CXXFLAGS += $(_CFLAGS)
|
||||
override CFLAGS += $(_CFLAGS)
|
||||
|
||||
LIBS += -lrt \
|
||||
$(shell pkg-config --libs libavcodec libavformat) \
|
||||
$(shell pkg-config --libs libavcodec) \
|
||||
`pkg-config --libs x11 x11-xcb xcb xcb-xv xcb-shm xcb-dpms xcb-atom\
|
||||
xcb-screensaver xcb-randr xcb-glx xcb-icccm xcb-keysyms`\
|
||||
`pkg-config --libs gl glu` \
|
||||
|
||||
31
README.txt
31
README.txt
@@ -139,12 +139,17 @@ Setup: /etc/vdr/setup.conf
|
||||
|
||||
softhddevice.AudioDelay = 0
|
||||
+n or -n ms
|
||||
delay audio or delay video
|
||||
|
||||
softhddevice.AudioPassthrough = 0
|
||||
0 = none, 1 = AC-3
|
||||
|
||||
for AC-3 the pass-through device is used.
|
||||
|
||||
softhddevice.AudioDownmix = 0
|
||||
0 = none, 1 = downmix
|
||||
downmix AC-3 to stero.
|
||||
|
||||
softhddevice.AutoCrop.Interval = 0
|
||||
0 disables auto-crop
|
||||
n each 'n' frames auto-crop is checked.
|
||||
@@ -157,6 +162,12 @@ Setup: /etc/vdr/setup.conf
|
||||
if detected crop area is too small, cut max 'n' pixels at top and
|
||||
bottom.
|
||||
|
||||
softhddevice.Background = 0
|
||||
32bit RGBA background color
|
||||
(Red * 16777216 + Green * 65536 + Blue * 256 + Alpha)
|
||||
or hex RRGGBBAA
|
||||
grey = 2155905279
|
||||
|
||||
softhddevice.SkipLines = 0
|
||||
skip 'n' lines at top and bottom of the video picture.
|
||||
|
||||
@@ -171,6 +182,10 @@ Setup: /etc/vdr/setup.conf
|
||||
softhddevice.Suspend.X11 = 0
|
||||
1 suspend stops X11 server (not working yet)
|
||||
|
||||
softhddevice.60HzMode = 0
|
||||
0 disable 60Hz display mode
|
||||
1 enable 60Hz display mode
|
||||
|
||||
VideoDisplayFormat = ?
|
||||
0 pan and scan
|
||||
1 letter box
|
||||
@@ -209,8 +224,20 @@ Commandline:
|
||||
SVDRP:
|
||||
------
|
||||
|
||||
Use 'svdrpsend.pl plug softhddevice HELP' to see the SVDRP commands
|
||||
help and which are supported by the plugin.
|
||||
Use 'svdrpsend.pl plug softhddevice HELP'
|
||||
or 'svdrpsend plug softhddevice HELP' to see the SVDRP commands help
|
||||
and which are supported by the plugin.
|
||||
|
||||
Keymacros:
|
||||
----------
|
||||
|
||||
See keymacros.conf how to setup the macros.
|
||||
|
||||
This are the supported key sequences:
|
||||
|
||||
@softhddevice Blue 1 0 disable pass-through
|
||||
@softhddevice Blue 1 1 enable pass-through
|
||||
@softhddevice Blue 1 2 toggle pass-through
|
||||
|
||||
Running:
|
||||
--------
|
||||
|
||||
18
Todo
18
Todo
@@ -25,7 +25,8 @@ missing:
|
||||
suspend plugin didn't restore full-screen (is this wanted?)
|
||||
Option deinterlace off / deinterlace force!
|
||||
ColorSpace aren't configurable with the gui.
|
||||
Inverse telecine isn't configurable with the gui.
|
||||
Replay of old vdr 1.6 recordings.
|
||||
svdrp support for hot-keys.
|
||||
|
||||
crash:
|
||||
AudioPlayHandlerThread -> pthread_cond_wait
|
||||
@@ -36,9 +37,10 @@ video:
|
||||
reduce warnings after channel switch
|
||||
grab image with hardware and better scaling support
|
||||
hard channel switch
|
||||
OSD can only be shown after some stream could be shown
|
||||
yaepghd changed position is lost on channel switch
|
||||
pause (live tv) has sometime problems with SAT1 HD Pro7 HD
|
||||
radio show black background
|
||||
radio no need to wait on video buffers
|
||||
|
||||
vdpau:
|
||||
software decoder path not working
|
||||
@@ -76,13 +78,16 @@ x11:
|
||||
support embedded mode
|
||||
|
||||
audio:
|
||||
write TS -> PES parser, which feeds audio before the next start packet
|
||||
Combine alsa+oss ringbuffer code.
|
||||
Make alsa thread/polled and oss thread/polled output module runtime
|
||||
selectable.
|
||||
software volume support (could be done with asound.conf)
|
||||
Mute should do a real mute and not only set volume to zero.
|
||||
Starting suspended and muted, didn't register the mute.
|
||||
Relaxed audio sync checks at end of packet and already in sync
|
||||
samplerate problem resume/suspend.
|
||||
only wait for video start, if video is running.
|
||||
Not primary device, don't use and block audio/video.
|
||||
|
||||
audio/alsa:
|
||||
better downmix of >2 channels on 2 channel hardware
|
||||
@@ -99,8 +104,11 @@ HDMI/SPDIF Passthrough:
|
||||
only AC-3 written
|
||||
|
||||
playback of recording
|
||||
pause is not reset, when replay exit
|
||||
replay/pause need 100% cpu
|
||||
pause is not reset, when replay exit (fixed?)
|
||||
replay/pause need 100% cpu (fixed?)
|
||||
|
||||
plugins:
|
||||
mp3 plugin needs 100% cpu (OSD updates?)
|
||||
|
||||
setup:
|
||||
Setup of decoder type.
|
||||
|
||||
317
audio.c
317
audio.c
@@ -107,9 +107,11 @@ typedef struct _audio_module_
|
||||
|
||||
void (*Thread) (void); ///< module thread handler
|
||||
void (*Enqueue) (const void *, int); ///< enqueue samples for output
|
||||
void (*VideoReady) (void); ///< video ready, start audio
|
||||
void (*FlushBuffers) (void); ///< flush sample buffers
|
||||
void (*Poller) (void); ///< output poller
|
||||
int (*FreeBytes) (void); ///< number of bytes free in buffer
|
||||
int (*UsedBytes) (void); ///< number of bytes used in buffer
|
||||
uint64_t(*GetDelay) (void); ///< get current audio delay
|
||||
void (*SetVolume) (int); ///< set output volume
|
||||
int (*Setup) (int *, int *, int); ///< setup channels, samplerate
|
||||
@@ -137,11 +139,12 @@ static const char *AudioMixerDevice; ///< alsa/OSS mixer device name
|
||||
static const char *AudioMixerChannel; ///< alsa/OSS mixer channel name
|
||||
static volatile char AudioRunning; ///< thread running / stopped
|
||||
static volatile char AudioPaused; ///< audio paused
|
||||
static volatile char AudioVideoIsReady; ///< video ready start early
|
||||
static unsigned AudioSampleRate; ///< audio sample rate in hz
|
||||
static unsigned AudioChannels; ///< number of audio channels
|
||||
static const int AudioBytesProSample = 2; ///< number of bytes per sample
|
||||
static int64_t AudioPTS; ///< audio pts clock
|
||||
static const int AudioBufferTime = 350; ///< audio buffer time in ms
|
||||
static int AudioBufferTime = 336; ///< audio buffer time in ms
|
||||
|
||||
#ifdef USE_AUDIO_THREAD
|
||||
static pthread_t AudioThread; ///< audio play thread
|
||||
@@ -151,7 +154,7 @@ static pthread_cond_t AudioStartCond; ///< condition variable
|
||||
static const int AudioThread; ///< dummy audio thread
|
||||
#endif
|
||||
|
||||
extern int VideoAudioDelay; /// import audio/video delay
|
||||
extern int VideoAudioDelay; ///< import audio/video delay
|
||||
|
||||
#ifdef USE_AUDIORING
|
||||
|
||||
@@ -289,15 +292,19 @@ static int AlsaAddToRingbuffer(const void *samples, int count)
|
||||
// too many bytes are lost
|
||||
// FIXME: should skip more, longer skip, but less often?
|
||||
}
|
||||
// Update audio clock (stupid gcc developers thinks INT64_C is unsigned)
|
||||
if (AudioPTS != (int64_t) INT64_C(0x8000000000000000)) {
|
||||
AudioPTS +=
|
||||
((int64_t) count * 90000) / (AudioSampleRate * AudioChannels *
|
||||
AudioBytesProSample);
|
||||
}
|
||||
|
||||
if (!AudioRunning) {
|
||||
if (AlsaStartThreshold < RingBufferUsedBytes(AlsaRingBuffer)) {
|
||||
Debug(4, "audio/alsa: start %4zdms\n",
|
||||
(RingBufferUsedBytes(AlsaRingBuffer) * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample));
|
||||
|
||||
// forced start
|
||||
if (AlsaStartThreshold * 2 < RingBufferUsedBytes(AlsaRingBuffer)) {
|
||||
return 1;
|
||||
}
|
||||
// enough video + audio buffered
|
||||
if (AudioVideoIsReady
|
||||
&& AlsaStartThreshold < RingBufferUsedBytes(AlsaRingBuffer)) {
|
||||
// restart play-back
|
||||
return 1;
|
||||
}
|
||||
@@ -326,7 +333,8 @@ static int AlsaPlayRingbuffer(void)
|
||||
if (n == -EAGAIN) {
|
||||
continue;
|
||||
}
|
||||
Error(_("audio/alsa: underrun error?\n"));
|
||||
Error(_("audio/alsa: avail underrun error? '%s'\n"),
|
||||
snd_strerror(n));
|
||||
err = snd_pcm_recover(AlsaPCMHandle, n, 0);
|
||||
if (err >= 0) {
|
||||
continue;
|
||||
@@ -342,6 +350,15 @@ static int AlsaPlayRingbuffer(void)
|
||||
if (AudioThread) {
|
||||
if (!AudioAlsaDriverBroken) {
|
||||
Error(_("audio/alsa: broken driver %d\n"), avail);
|
||||
Error("audio/alsa: state %s\n",
|
||||
snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle)));
|
||||
}
|
||||
if (snd_pcm_state(AlsaPCMHandle)
|
||||
== SND_PCM_STATE_PREPARED) {
|
||||
if ((err = snd_pcm_start(AlsaPCMHandle)) < 0) {
|
||||
Error(_("audio/alsa: snd_pcm_start(): %s\n"),
|
||||
snd_strerror(err));
|
||||
}
|
||||
}
|
||||
usleep(5 * 1000);
|
||||
}
|
||||
@@ -353,6 +370,9 @@ static int AlsaPlayRingbuffer(void)
|
||||
n = RingBufferGetReadPointer(AlsaRingBuffer, &p);
|
||||
if (!n) { // ring buffer empty
|
||||
if (first) { // only error on first loop
|
||||
Debug(4, "audio/alsa: empty buffers %d\n", avail);
|
||||
// ring buffer empty
|
||||
// AlsaLowWaterMark = 1;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@@ -382,7 +402,8 @@ static int AlsaPlayRingbuffer(void)
|
||||
goto again;
|
||||
}
|
||||
*/
|
||||
Error(_("audio/alsa: underrun error?\n"));
|
||||
Error(_("audio/alsa: writei underrun error? '%s'\n"),
|
||||
snd_strerror(err));
|
||||
err = snd_pcm_recover(AlsaPCMHandle, err, 0);
|
||||
if (err >= 0) {
|
||||
goto again;
|
||||
@@ -414,7 +435,7 @@ static void AlsaFlushBuffers(void)
|
||||
RingBufferReadAdvance(AlsaRingBuffer,
|
||||
RingBufferUsedBytes(AlsaRingBuffer));
|
||||
state = snd_pcm_state(AlsaPCMHandle);
|
||||
Debug(3, "audio/alsa: state %d - %s\n", state,
|
||||
Debug(3, "audio/alsa: flush state %d - %s\n", state,
|
||||
snd_pcm_state_name(state));
|
||||
if (state != SND_PCM_STATE_OPEN) {
|
||||
if ((err = snd_pcm_drop(AlsaPCMHandle)) < 0) {
|
||||
@@ -427,6 +448,7 @@ static void AlsaFlushBuffers(void)
|
||||
}
|
||||
}
|
||||
AudioRunning = 0;
|
||||
AudioVideoIsReady = 0;
|
||||
AudioPTS = INT64_C(0x8000000000000000);
|
||||
}
|
||||
|
||||
@@ -451,6 +473,14 @@ static int AlsaFreeBytes(void)
|
||||
return AlsaRingBuffer ? RingBufferFreeBytes(AlsaRingBuffer) : INT32_MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
** Get used bytes in audio output.
|
||||
*/
|
||||
static int AlsaUsedBytes(void)
|
||||
{
|
||||
return AlsaRingBuffer ? RingBufferUsedBytes(AlsaRingBuffer) : 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
@@ -573,8 +603,6 @@ static void AlsaEnqueue(const void *samples, int count)
|
||||
Debug(3, "audio/alsa: unpaused\n");
|
||||
}
|
||||
}
|
||||
// Update audio clock
|
||||
// AudioPTS += (size * 90000) / (AudioSampleRate * AudioChannels * AudioBytesProSample);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -633,14 +661,15 @@ static void AlsaThread(void)
|
||||
break;
|
||||
}
|
||||
// wait for space in kernel buffers
|
||||
if ((err = snd_pcm_wait(AlsaPCMHandle, 100)) < 0) {
|
||||
Error(_("audio/alsa: wait underrun error?\n"));
|
||||
if ((err = snd_pcm_wait(AlsaPCMHandle, 24)) < 0) {
|
||||
Error(_("audio/alsa: wait underrun error? '%s'\n"),
|
||||
snd_strerror(err));
|
||||
err = snd_pcm_recover(AlsaPCMHandle, err, 0);
|
||||
if (err >= 0) {
|
||||
continue;
|
||||
}
|
||||
Error(_("audio/alsa: snd_pcm_wait(): %s\n"), snd_strerror(err));
|
||||
usleep(100 * 1000);
|
||||
usleep(24 * 1000);
|
||||
continue;
|
||||
}
|
||||
if (AlsaFlushBuffer || AudioPaused) {
|
||||
@@ -654,11 +683,12 @@ static void AlsaThread(void)
|
||||
}
|
||||
state = snd_pcm_state(AlsaPCMHandle);
|
||||
if (state != SND_PCM_STATE_RUNNING) {
|
||||
Debug(3, "audio/alsa: stopping play\n");
|
||||
Debug(3, "audio/alsa: stopping play '%s'\n",
|
||||
snd_pcm_state_name(state));
|
||||
break;
|
||||
}
|
||||
pthread_yield();
|
||||
usleep(20 * 1000); // let fill/empty the buffers
|
||||
usleep(24 * 1000); // let fill/empty the buffers
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -687,6 +717,26 @@ static void AlsaThreadEnqueue(const void *samples, int count)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
** Video is ready, start audio if possible,
|
||||
*/
|
||||
static void AlsaVideoReady(void)
|
||||
{
|
||||
if (AudioSampleRate && AudioChannels) {
|
||||
Debug(3, "audio/alsa: start %4zdms video start\n",
|
||||
(RingBufferUsedBytes(AlsaRingBuffer) * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample));
|
||||
}
|
||||
|
||||
if (!AudioRunning) {
|
||||
// enough video + audio buffered
|
||||
if (AlsaStartThreshold < RingBufferUsedBytes(AlsaRingBuffer)) {
|
||||
AudioRunning = 1;
|
||||
pthread_cond_signal(&AudioStartCond);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
** Flush alsa buffers with thread.
|
||||
*/
|
||||
@@ -720,12 +770,12 @@ static snd_pcm_t *AlsaOpenPCM(int use_ac3)
|
||||
|
||||
// &&|| hell
|
||||
if (!(use_ac3 && ((device = AudioAC3Device)
|
||||
|| (device = getenv("ALSA_AC3_DEVICE"))
|
||||
|| (device = getenv("ALSA_PASSTHROUGH_DEVICE"))))
|
||||
|| (device = getenv("ALSA_AC3_DEVICE"))))
|
||||
&& !(device = AudioPCMDevice) && !(device = getenv("ALSA_DEVICE"))) {
|
||||
device = "default";
|
||||
}
|
||||
Debug(3, "audio/alsa: &&|| hell '%s'\n", device);
|
||||
Info(_("audio/alsa: using %sdevice '%s'\n"), use_ac3 ? "ac3 " : "",
|
||||
device);
|
||||
|
||||
// open none blocking; if device is already used, we don't want wait
|
||||
if ((err =
|
||||
@@ -752,7 +802,8 @@ static void AlsaInitPCM(void)
|
||||
snd_pcm_t *handle;
|
||||
snd_pcm_hw_params_t *hw_params;
|
||||
int err;
|
||||
snd_pcm_uframes_t buffer_size;
|
||||
|
||||
//snd_pcm_uframes_t buffer_size;
|
||||
|
||||
if (!(handle = AlsaOpenPCM(0))) {
|
||||
return;
|
||||
@@ -767,8 +818,9 @@ static void AlsaInitPCM(void)
|
||||
}
|
||||
AlsaCanPause = snd_pcm_hw_params_can_pause(hw_params);
|
||||
Info(_("audio/alsa: supports pause: %s\n"), AlsaCanPause ? "yes" : "no");
|
||||
snd_pcm_hw_params_get_buffer_size_max(hw_params, &buffer_size);
|
||||
Info(_("audio/alsa: max buffer size %lu\n"), buffer_size);
|
||||
// needs audio setup
|
||||
//snd_pcm_hw_params_get_buffer_size_max(hw_params, &buffer_size);
|
||||
//Info(_("audio/alsa: max buffer size %lu\n"), buffer_size);
|
||||
|
||||
AlsaPCMHandle = handle;
|
||||
}
|
||||
@@ -868,6 +920,9 @@ static uint64_t AlsaGetDelay(void)
|
||||
if (!AlsaPCMHandle || !AudioSampleRate) {
|
||||
return 0UL;
|
||||
}
|
||||
if (!AudioRunning) { // audio not running
|
||||
return 0UL;
|
||||
}
|
||||
// FIXME: thread safe? __assert_fail_base in snd_pcm_delay
|
||||
|
||||
// delay in frames in alsa + kernel buffers
|
||||
@@ -941,7 +996,7 @@ static int AlsaSetup(int *freq, int *channels, int use_ac3)
|
||||
snd_pcm_set_params(AlsaPCMHandle, SND_PCM_FORMAT_S16,
|
||||
AlsaUseMmap ? SND_PCM_ACCESS_MMAP_INTERLEAVED :
|
||||
SND_PCM_ACCESS_RW_INTERLEAVED, *channels, *freq, 1,
|
||||
125 * 1000))) {
|
||||
96 * 1000))) {
|
||||
Error(_("audio/alsa: set params error: %s\n"), snd_strerror(err));
|
||||
|
||||
/*
|
||||
@@ -1091,16 +1146,21 @@ static int AlsaSetup(int *freq, int *channels, int use_ac3)
|
||||
// update buffer
|
||||
|
||||
snd_pcm_get_params(AlsaPCMHandle, &buffer_size, &period_size);
|
||||
Info(_("audio/alsa: buffer size %lu, period size %lu\n"), buffer_size,
|
||||
period_size);
|
||||
Info(_("audio/alsa: buffer size %lu %zdms, period size %lu %zdms\n"),
|
||||
buffer_size, snd_pcm_frames_to_bytes(AlsaPCMHandle,
|
||||
buffer_size) * 1000 / (AudioSampleRate * AudioChannels *
|
||||
AudioBytesProSample), period_size,
|
||||
snd_pcm_frames_to_bytes(AlsaPCMHandle,
|
||||
period_size) * 1000 / (AudioSampleRate * AudioChannels *
|
||||
AudioBytesProSample));
|
||||
Debug(3, "audio/alsa: state %s\n",
|
||||
snd_pcm_state_name(snd_pcm_state(AlsaPCMHandle)));
|
||||
|
||||
AlsaStartThreshold = snd_pcm_frames_to_bytes(AlsaPCMHandle, period_size);
|
||||
// buffer time/delay in ms
|
||||
delay = AudioBufferTime;
|
||||
if (VideoAudioDelay > -100) {
|
||||
delay += 100 + VideoAudioDelay / 90;
|
||||
if (VideoAudioDelay > 0) {
|
||||
delay += VideoAudioDelay / 90;
|
||||
}
|
||||
if (AlsaStartThreshold <
|
||||
(*freq * *channels * AudioBytesProSample * delay) / 1000U) {
|
||||
@@ -1216,13 +1276,16 @@ static const AudioModule AlsaModule = {
|
||||
#ifdef USE_AUDIO_THREAD
|
||||
.Thread = AlsaThread,
|
||||
.Enqueue = AlsaThreadEnqueue,
|
||||
.VideoReady = AlsaVideoReady,
|
||||
.FlushBuffers = AlsaThreadFlushBuffers,
|
||||
#else
|
||||
.Enqueue = AlsaEnqueue,
|
||||
.VideoReady = AlsaVideoReady,
|
||||
.FlushBuffers = AlsaFlushBuffers,
|
||||
#endif
|
||||
.Poller = AlsaPoller,
|
||||
.FreeBytes = AlsaFreeBytes,
|
||||
.UsedBytes = AlsaUsedBytes,
|
||||
.GetDelay = AlsaGetDelay,
|
||||
.SetVolume = AlsaSetVolume,
|
||||
.Setup = AlsaSetup,
|
||||
@@ -1248,6 +1311,7 @@ static int OssPcmFildes = -1; ///< pcm file descriptor
|
||||
static int OssMixerFildes = -1; ///< mixer file descriptor
|
||||
static int OssMixerChannel; ///< mixer channel index
|
||||
static RingBuffer *OssRingBuffer; ///< audio ring buffer
|
||||
static int OssFragmentTime; ///< fragment time in ms
|
||||
static unsigned OssStartThreshold; ///< start play, if filled
|
||||
|
||||
#ifdef USE_AUDIO_THREAD
|
||||
@@ -1276,15 +1340,19 @@ static int OssAddToRingbuffer(const void *samples, int count)
|
||||
// too many bytes are lost
|
||||
// FIXME: should skip more, longer skip, but less often?
|
||||
}
|
||||
// Update audio clock (stupid gcc developers thinks INT64_C is unsigned)
|
||||
if (AudioPTS != (int64_t) INT64_C(0x8000000000000000)) {
|
||||
AudioPTS +=
|
||||
((int64_t) count * 90000) / (AudioSampleRate * AudioChannels *
|
||||
AudioBytesProSample);
|
||||
}
|
||||
|
||||
if (!AudioRunning) {
|
||||
if (OssStartThreshold < RingBufferUsedBytes(OssRingBuffer)) {
|
||||
Debug(4, "audio/oss: start %4zdms\n",
|
||||
(RingBufferUsedBytes(OssRingBuffer) * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample));
|
||||
|
||||
// forced start
|
||||
if (OssStartThreshold * 2 < RingBufferUsedBytes(OssRingBuffer)) {
|
||||
return 1;
|
||||
}
|
||||
// enough video + audio buffered
|
||||
if (AudioVideoIsReady
|
||||
&& OssStartThreshold < RingBufferUsedBytes(OssRingBuffer)) {
|
||||
// restart play-back
|
||||
return 1;
|
||||
}
|
||||
@@ -1333,7 +1401,7 @@ static int OssPlayRingbuffer(void)
|
||||
Error(_("audio/oss: write error: %s\n"), strerror(errno));
|
||||
return 1;
|
||||
}
|
||||
Error(_("audio/oss: error not all bytes written\n"));
|
||||
Warning(_("audio/oss: error not all bytes written\n"));
|
||||
}
|
||||
// advance how many could written
|
||||
RingBufferReadAdvance(OssRingBuffer, n);
|
||||
@@ -1358,6 +1426,7 @@ static void OssFlushBuffers(void)
|
||||
}
|
||||
}
|
||||
AudioRunning = 0;
|
||||
AudioVideoIsReady = 0;
|
||||
AudioPTS = INT64_C(0x8000000000000000);
|
||||
}
|
||||
|
||||
@@ -1416,6 +1485,14 @@ static int OssFreeBytes(void)
|
||||
return OssRingBuffer ? RingBufferFreeBytes(OssRingBuffer) : INT32_MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
** Get used bytes in audio output.
|
||||
*/
|
||||
static int OssUsedBytes(void)
|
||||
{
|
||||
return OssRingBuffer ? RingBufferUsedBytes(OssRingBuffer) : 0;
|
||||
}
|
||||
|
||||
#ifdef USE_AUDIO_THREAD
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
@@ -1446,10 +1523,10 @@ static void OssThread(void)
|
||||
fds[0].fd = OssPcmFildes;
|
||||
fds[0].events = POLLOUT | POLLERR;
|
||||
// wait for space in kernel buffers
|
||||
err = poll(fds, 1, 100);
|
||||
err = poll(fds, 1, OssFragmentTime);
|
||||
if (err < 0) {
|
||||
Error(_("audio/oss: error poll %s\n"), strerror(errno));
|
||||
usleep(100 * 1000);
|
||||
usleep(OssFragmentTime * 1000);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1462,7 +1539,7 @@ static void OssThread(void)
|
||||
break;
|
||||
}
|
||||
pthread_yield();
|
||||
usleep(20 * 1000); // let fill/empty the buffers
|
||||
usleep(OssFragmentTime * 1000); // let fill/empty the buffers
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1486,6 +1563,26 @@ static void OssThreadEnqueue(const void *samples, int count)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
** Video is ready, start audio if possible,
|
||||
*/
|
||||
static void OssVideoReady(void)
|
||||
{
|
||||
if (AudioSampleRate && AudioChannels) {
|
||||
Debug(3, "audio/oss: start %4zdms video start\n",
|
||||
(RingBufferUsedBytes(OssRingBuffer) * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample));
|
||||
}
|
||||
|
||||
if (!AudioRunning) {
|
||||
// enough video + audio buffered
|
||||
if (OssStartThreshold < RingBufferUsedBytes(OssRingBuffer)) {
|
||||
AudioRunning = 1;
|
||||
pthread_cond_signal(&AudioStartCond);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
** Flush OSS buffers with thread.
|
||||
*/
|
||||
@@ -1522,7 +1619,7 @@ static int OssOpenPCM(int use_ac3)
|
||||
&& !(device = AudioPCMDevice) && !(device = getenv("OSS_AUDIODEV"))) {
|
||||
device = "/dev/dsp";
|
||||
}
|
||||
Debug(3, "audio/oss: &&|| hell '%s'\n", device);
|
||||
Info(_("audio/oss: using %sdevice '%s'\n"), use_ac3 ? "ac3" : "", device);
|
||||
|
||||
if ((fildes = open(device, O_WRONLY)) < 0) {
|
||||
Error(_("audio/oss: can't open dsp device '%s': %s\n"), device,
|
||||
@@ -1642,8 +1739,7 @@ static uint64_t OssGetDelay(void)
|
||||
if (OssPcmFildes == -1) { // setup failure
|
||||
return 0UL;
|
||||
}
|
||||
|
||||
if (!AudioRunning) {
|
||||
if (!AudioRunning) { // audio not running
|
||||
return 0UL;
|
||||
}
|
||||
// delay in bytes in kernel buffers
|
||||
@@ -1653,18 +1749,14 @@ static uint64_t OssGetDelay(void)
|
||||
strerror(errno));
|
||||
return 0UL;
|
||||
}
|
||||
if (delay == -1) {
|
||||
delay = 0UL;
|
||||
if (delay < 0) {
|
||||
delay = 0;
|
||||
}
|
||||
|
||||
pts = ((uint64_t) delay * 90 * 1000)
|
||||
pts = ((uint64_t) (delay + RingBufferUsedBytes(OssRingBuffer)) * 90 * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample);
|
||||
pts += ((uint64_t) RingBufferUsedBytes(OssRingBuffer) * 90 * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample);
|
||||
if (pts > 600 * 90) {
|
||||
Debug(4, "audio/oss: hw+sw delay %zd %" PRId64 " ms\n",
|
||||
RingBufferUsedBytes(OssRingBuffer), pts / 90);
|
||||
}
|
||||
|
||||
return pts;
|
||||
}
|
||||
@@ -1687,6 +1779,7 @@ static int OssSetup(int *freq, int *channels, int use_ac3)
|
||||
int ret;
|
||||
int tmp;
|
||||
int delay;
|
||||
audio_buf_info bi;
|
||||
|
||||
if (OssPcmFildes == -1) { // OSS not ready
|
||||
return -1;
|
||||
@@ -1750,37 +1843,46 @@ static int OssSetup(int *freq, int *channels, int use_ac3)
|
||||
|
||||
// FIXME: setup buffers
|
||||
|
||||
if (1) {
|
||||
audio_buf_info bi;
|
||||
#ifdef SNDCTL_DSP_POLICY
|
||||
tmp = 3;
|
||||
if (ioctl(OssPcmFildes, SNDCTL_DSP_POLICY, &tmp) == -1) {
|
||||
Error(_("audio/oss: ioctl(SNDCTL_DSP_POLICY): %s\n"), strerror(errno));
|
||||
} else {
|
||||
Info("audio/oss: set policy to %d\n", tmp);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ioctl(OssPcmFildes, SNDCTL_DSP_GETOSPACE, &bi) == -1) {
|
||||
Error(_("audio/oss: ioctl(SNDCTL_DSP_GETOSPACE): %s\n"),
|
||||
strerror(errno));
|
||||
bi.fragsize = 4096;
|
||||
bi.fragstotal = 16;
|
||||
} else {
|
||||
Debug(3, "audio/oss: %d bytes buffered\n", bi.bytes);
|
||||
}
|
||||
|
||||
tmp = -1;
|
||||
if (ioctl(OssPcmFildes, SNDCTL_DSP_GETODELAY, &tmp) == -1) {
|
||||
Error(_("audio/oss: ioctl(SNDCTL_DSP_GETODELAY): %s\n"),
|
||||
strerror(errno));
|
||||
// FIXME: stop player, set setup failed flag
|
||||
return -1;
|
||||
}
|
||||
if (tmp == -1) {
|
||||
tmp = 0;
|
||||
}
|
||||
OssFragmentTime = (bi.fragsize * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample);
|
||||
|
||||
Info(_("audio/oss: buffer size %d %dms, fragment size %d %dms\n"),
|
||||
bi.fragsize * bi.fragstotal, (bi.fragsize * bi.fragstotal * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample), bi.fragsize,
|
||||
OssFragmentTime);
|
||||
|
||||
// start when enough bytes for initial write
|
||||
OssStartThreshold = bi.bytes + tmp;
|
||||
OssStartThreshold = (bi.fragsize - 1) * bi.fragstotal;
|
||||
|
||||
// buffer time/delay in ms
|
||||
delay = AudioBufferTime;
|
||||
if (VideoAudioDelay > -100) {
|
||||
delay += 100 + VideoAudioDelay / 90;
|
||||
delay = AudioBufferTime + 300;
|
||||
if (VideoAudioDelay > 0) {
|
||||
delay += VideoAudioDelay / 90;
|
||||
}
|
||||
if (OssStartThreshold <
|
||||
(*freq * *channels * AudioBytesProSample * delay) / 1000U) {
|
||||
(AudioSampleRate * AudioChannels * AudioBytesProSample * delay) /
|
||||
1000U) {
|
||||
OssStartThreshold =
|
||||
(*freq * *channels * AudioBytesProSample * delay) / 1000U;
|
||||
(AudioSampleRate * AudioChannels * AudioBytesProSample * delay) /
|
||||
1000U;
|
||||
}
|
||||
// no bigger, than the buffer
|
||||
if (OssStartThreshold > RingBufferFreeBytes(OssRingBuffer)) {
|
||||
@@ -1789,7 +1891,6 @@ static int OssSetup(int *freq, int *channels, int use_ac3)
|
||||
|
||||
Info(_("audio/oss: delay %u ms\n"), (OssStartThreshold * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1843,13 +1944,16 @@ static const AudioModule OssModule = {
|
||||
#ifdef USE_AUDIO_THREAD
|
||||
.Thread = OssThread,
|
||||
.Enqueue = OssThreadEnqueue,
|
||||
.VideoReady = OssVideoReady,
|
||||
.FlushBuffers = OssThreadFlushBuffers,
|
||||
#else
|
||||
.Enqueue = OssEnqueue,
|
||||
.VideoReady = OssVideoReady,
|
||||
.FlushBuffers = OssFlushBuffers,
|
||||
#endif
|
||||
.Poller = OssPoller,
|
||||
.FreeBytes = OssFreeBytes,
|
||||
.UsedBytes = OssUsedBytes,
|
||||
.GetDelay = OssGetDelay,
|
||||
.SetVolume = OssSetVolume,
|
||||
.Setup = OssSetup,
|
||||
@@ -1885,6 +1989,14 @@ static int NoopFreeBytes(void)
|
||||
return INT32_MAX; // no driver, much space
|
||||
}
|
||||
|
||||
/**
|
||||
** Get used bytes in audio output.
|
||||
*/
|
||||
static int NoopUsedBytes(void)
|
||||
{
|
||||
return 0; // no driver, nothing used
|
||||
}
|
||||
|
||||
/**
|
||||
** Get audio delay in time stamps.
|
||||
**
|
||||
@@ -1932,9 +2044,11 @@ static void NoopVoid(void)
|
||||
static const AudioModule NoopModule = {
|
||||
.Name = "noop",
|
||||
.Enqueue = NoopEnqueue,
|
||||
.VideoReady = NoopVoid,
|
||||
.FlushBuffers = NoopVoid,
|
||||
.Poller = NoopVoid,
|
||||
.FreeBytes = NoopFreeBytes,
|
||||
.UsedBytes = NoopUsedBytes,
|
||||
.GetDelay = NoopGetDelay,
|
||||
.SetVolume = NoopSetVolume,
|
||||
.Setup = NoopSetup,
|
||||
@@ -1964,6 +2078,10 @@ static void *AudioPlayHandlerThread(void *dummy)
|
||||
pthread_cond_wait(&AudioStartCond, &AudioMutex);
|
||||
// cond_wait can return, without signal!
|
||||
} while (!AudioRunning);
|
||||
|
||||
Debug(3, "audio: ----> %d ms\n", (AudioUsedBytes() * 1000)
|
||||
/ (AudioSampleRate * AudioChannels * AudioBytesProSample));
|
||||
|
||||
pthread_mutex_unlock(&AudioMutex);
|
||||
|
||||
#ifdef USE_AUDIORING
|
||||
@@ -2064,7 +2182,38 @@ static const AudioModule *AudioModules[] = {
|
||||
*/
|
||||
void AudioEnqueue(const void *samples, int count)
|
||||
{
|
||||
if (0) {
|
||||
static uint32_t last;
|
||||
static uint32_t tick;
|
||||
static uint32_t max = 101;
|
||||
uint64_t delay;
|
||||
|
||||
delay = AudioGetDelay();
|
||||
tick = GetMsTicks();
|
||||
if ((last && tick - last > max) && AudioRunning) {
|
||||
|
||||
//max = tick - last;
|
||||
Debug(3, "audio: packet delta %d %lu\n", tick - last, delay / 90);
|
||||
}
|
||||
last = tick;
|
||||
}
|
||||
AudioUsedModule->Enqueue(samples, count);
|
||||
|
||||
// Update audio clock (stupid gcc developers thinks INT64_C is unsigned)
|
||||
if (AudioPTS != (int64_t) INT64_C(0x8000000000000000)) {
|
||||
AudioPTS +=
|
||||
((int64_t) count * 90000) / (AudioSampleRate * AudioChannels *
|
||||
AudioBytesProSample);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
** Video is ready.
|
||||
*/
|
||||
void AudioVideoReady(void)
|
||||
{
|
||||
AudioVideoIsReady = 1;
|
||||
AudioUsedModule->VideoReady();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2091,6 +2240,14 @@ int AudioFreeBytes(void)
|
||||
return AudioUsedModule->FreeBytes();
|
||||
}
|
||||
|
||||
/**
|
||||
** Get used bytes in audio output.
|
||||
*/
|
||||
int AudioUsedBytes(void)
|
||||
{
|
||||
return AudioUsedModule->UsedBytes();
|
||||
}
|
||||
|
||||
/**
|
||||
** Get audio delay in time stamps.
|
||||
**
|
||||
@@ -2183,11 +2340,12 @@ int AudioSetup(int *freq, int *channels, int use_ac3)
|
||||
void AudioPlay(void)
|
||||
{
|
||||
if (!AudioPaused) {
|
||||
Warning("audio: not paused, check the code\n");
|
||||
Debug(3, "audio: not paused, check the code\n");
|
||||
return;
|
||||
}
|
||||
Debug(3, "audio: resumed\n");
|
||||
AudioPaused = 0;
|
||||
AudioEnqueue(NULL, 0); // wakeup thread
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2196,13 +2354,28 @@ void AudioPlay(void)
|
||||
void AudioPause(void)
|
||||
{
|
||||
if (AudioPaused) {
|
||||
Warning("audio: already paused, check the code\n");
|
||||
Debug(3, "audio: already paused, check the code\n");
|
||||
return;
|
||||
}
|
||||
Debug(3, "audio: paused\n");
|
||||
AudioPaused = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
** Set audio buffer time.
|
||||
**
|
||||
** PES audio packets have a max distance of 300 ms.
|
||||
** TS audio packet have a max distance of 100 ms.
|
||||
** The period size of the audio buffer is 24 ms.
|
||||
*/
|
||||
void AudioSetBufferTime(int delay)
|
||||
{
|
||||
if (!delay) {
|
||||
delay = 336;
|
||||
}
|
||||
AudioBufferTime = delay;
|
||||
}
|
||||
|
||||
/**
|
||||
** Set pcm audio device.
|
||||
**
|
||||
|
||||
5
audio.h
5
audio.h
@@ -31,8 +31,7 @@ extern void AudioEnqueue(const void *, int); ///< buffer audio samples
|
||||
extern void AudioFlushBuffers(void); ///< flush audio buffers
|
||||
extern void AudioPoller(void); ///< poll audio events/handling
|
||||
extern int AudioFreeBytes(void); ///< free bytes in audio output
|
||||
|
||||
//extern int AudioUsedBytes(void); ///< used bytes in audio output
|
||||
extern int AudioUsedBytes(void); ///< used bytes in audio output
|
||||
extern uint64_t AudioGetDelay(void); ///< get current audio delay
|
||||
extern void AudioSetClock(int64_t); ///< set audio clock base
|
||||
extern int64_t AudioGetClock(); ///< get current audio clock
|
||||
@@ -42,6 +41,8 @@ extern int AudioSetup(int *, int *, int); ///< setup audio output
|
||||
extern void AudioPlay(void); ///< play audio
|
||||
extern void AudioPause(void); ///< pause audio
|
||||
|
||||
extern void AudioSetBufferTime(int); ///< set audio buffer time
|
||||
|
||||
extern void AudioSetDevice(const char *); ///< set PCM audio device
|
||||
extern void AudioSetDeviceAC3(const char *); ///< set pass-through device
|
||||
extern void AudioSetChannel(const char *); ///< set mixer channel
|
||||
|
||||
558
codec.c
558
codec.c
@@ -30,13 +30,10 @@
|
||||
/// many bugs and incompatiblity in it. Don't use this shit.
|
||||
///
|
||||
|
||||
/**
|
||||
** use av_parser to support insane dvb audio streams.
|
||||
*/
|
||||
#define USE_AVPARSER
|
||||
|
||||
/// compile with passthrough support (experimental)
|
||||
/// compile with passthrough support (stable, ac3 only)
|
||||
#define USE_PASSTHROUGH
|
||||
/// compile audio drift correction support (experimental)
|
||||
#define noUSE_AUDIO_DRIFT_CORRECTION
|
||||
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
@@ -355,7 +352,7 @@ void CodecVideoOpen(VideoDecoder * decoder, const char *name, int codec_id)
|
||||
{
|
||||
AVCodec *video_codec;
|
||||
|
||||
Debug(3, "codec: using codec %s or ID %#04x\n", name, codec_id);
|
||||
Debug(3, "codec: using video codec %s or ID %#06x\n", name, codec_id);
|
||||
|
||||
if (decoder->VideoCtx) {
|
||||
Error(_("codec: missing close\n"));
|
||||
@@ -380,7 +377,7 @@ void CodecVideoOpen(VideoDecoder * decoder, const char *name, int codec_id)
|
||||
if (name && (video_codec = avcodec_find_decoder_by_name(name))) {
|
||||
Debug(3, "codec: vdpau decoder found\n");
|
||||
} else if (!(video_codec = avcodec_find_decoder(codec_id))) {
|
||||
Fatal(_("codec: codec ID %#04x not found\n"), codec_id);
|
||||
Fatal(_("codec: codec ID %#06x not found\n"), codec_id);
|
||||
// FIXME: none fatal
|
||||
}
|
||||
decoder->VideoCodec = video_codec;
|
||||
@@ -603,8 +600,6 @@ struct _audio_decoder_
|
||||
AVCodec *AudioCodec; ///< audio codec
|
||||
AVCodecContext *AudioCtx; ///< audio codec context
|
||||
|
||||
/// audio parser to support insane dvb streaks
|
||||
AVCodecParserContext *AudioParser;
|
||||
int PassthroughAC3; ///< current ac-3 pass-through
|
||||
int SampleRate; ///< current stream sample rate
|
||||
int Channels; ///< current stream channels
|
||||
@@ -614,6 +609,21 @@ struct _audio_decoder_
|
||||
|
||||
ReSampleContext *ReSample; ///< audio resampling context
|
||||
|
||||
int64_t LastDelay; ///< last delay
|
||||
struct timespec LastTime; ///< last time
|
||||
int64_t LastPTS; ///< last PTS
|
||||
|
||||
int Drift; ///< accumulated audio drift
|
||||
int DriftCorr; ///< audio drift correction value
|
||||
int DriftFrac; ///< audio drift fraction for ac3
|
||||
|
||||
struct AVResampleContext *AvResample; ///< second audio resample context
|
||||
#define MAX_CHANNELS 8 ///< max number of channels supported
|
||||
int16_t *Buffer[MAX_CHANNELS]; ///< deinterleave sample buffers
|
||||
int BufferSize; ///< size of sample buffer
|
||||
int16_t *Remain[MAX_CHANNELS]; ///< filter remaining samples
|
||||
int RemainSize; ///< size of remain buffer
|
||||
int RemainCount; ///< number of remaining samples
|
||||
};
|
||||
|
||||
#ifdef USE_PASSTHROUGH
|
||||
@@ -626,6 +636,7 @@ static char CodecPassthroughAC3; ///< pass ac3 through
|
||||
|
||||
static const int CodecPassthroughAC3 = 0;
|
||||
#endif
|
||||
static char CodecDownmix; ///< enable ac-3 downmix
|
||||
|
||||
/**
|
||||
** Allocate a new audio decoder context.
|
||||
@@ -665,10 +676,12 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, const char *name,
|
||||
{
|
||||
AVCodec *audio_codec;
|
||||
|
||||
Debug(3, "codec: using audio codec %s or ID %#06x\n", name, codec_id);
|
||||
|
||||
if (name && (audio_codec = avcodec_find_decoder_by_name(name))) {
|
||||
Debug(3, "codec: audio decoder '%s' found\n", name);
|
||||
} else if (!(audio_codec = avcodec_find_decoder(codec_id))) {
|
||||
Fatal(_("codec: codec ID %#04x not found\n"), codec_id);
|
||||
Fatal(_("codec: codec ID %#06x not found\n"), codec_id);
|
||||
// FIXME: errors aren't fatal
|
||||
}
|
||||
audio_decoder->AudioCodec = audio_codec;
|
||||
@@ -676,6 +689,12 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, const char *name,
|
||||
if (!(audio_decoder->AudioCtx = avcodec_alloc_context3(audio_codec))) {
|
||||
Fatal(_("codec: can't allocate audio codec context\n"));
|
||||
}
|
||||
|
||||
if (CodecDownmix) {
|
||||
audio_decoder->AudioCtx->request_channels = 2;
|
||||
audio_decoder->AudioCtx->request_channel_layout =
|
||||
AV_CH_LAYOUT_STEREO_DOWNMIX;
|
||||
}
|
||||
pthread_mutex_lock(&CodecLockMutex);
|
||||
// open codec
|
||||
#if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(53,5,0)
|
||||
@@ -684,10 +703,20 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, const char *name,
|
||||
Fatal(_("codec: can't open audio codec\n"));
|
||||
}
|
||||
#else
|
||||
if (avcodec_open2(audio_decoder->AudioCtx, audio_codec, NULL) < 0) {
|
||||
if (1) {
|
||||
AVDictionary *av_dict;
|
||||
|
||||
av_dict = NULL;
|
||||
// FIXME: import settings
|
||||
//av_dict_set(&av_dict, "dmix_mode", "0", 0);
|
||||
//av_dict_set(&av_dict, "ltrt_cmixlev", "1.414", 0);
|
||||
//av_dict_set(&av_dict, "loro_cmixlev", "1.414", 0);
|
||||
if (avcodec_open2(audio_decoder->AudioCtx, audio_codec, &av_dict) < 0) {
|
||||
pthread_mutex_unlock(&CodecLockMutex);
|
||||
Fatal(_("codec: can't open audio codec\n"));
|
||||
}
|
||||
av_dict_free(&av_dict);
|
||||
}
|
||||
#endif
|
||||
pthread_mutex_unlock(&CodecLockMutex);
|
||||
Debug(3, "codec: audio '%s'\n", audio_decoder->AudioCtx->codec_name);
|
||||
@@ -697,14 +726,11 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, const char *name,
|
||||
// we do not send complete frames
|
||||
audio_decoder->AudioCtx->flags |= CODEC_FLAG_TRUNCATED;
|
||||
}
|
||||
if (!(audio_decoder->AudioParser =
|
||||
av_parser_init(audio_decoder->AudioCtx->codec_id))) {
|
||||
Fatal(_("codec: can't init audio parser\n"));
|
||||
}
|
||||
audio_decoder->SampleRate = 0;
|
||||
audio_decoder->Channels = 0;
|
||||
audio_decoder->HwSampleRate = 0;
|
||||
audio_decoder->HwChannels = 0;
|
||||
audio_decoder->LastDelay = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -715,14 +741,25 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, const char *name,
|
||||
void CodecAudioClose(AudioDecoder * audio_decoder)
|
||||
{
|
||||
// FIXME: output any buffered data
|
||||
if (audio_decoder->AvResample) {
|
||||
int ch;
|
||||
|
||||
av_resample_close(audio_decoder->AvResample);
|
||||
audio_decoder->AvResample = NULL;
|
||||
audio_decoder->RemainCount = 0;
|
||||
audio_decoder->BufferSize = 0;
|
||||
audio_decoder->RemainSize = 0;
|
||||
for (ch = 0; ch < MAX_CHANNELS; ++ch) {
|
||||
free(audio_decoder->Buffer[ch]);
|
||||
audio_decoder->Buffer[ch] = NULL;
|
||||
free(audio_decoder->Remain[ch]);
|
||||
audio_decoder->Remain[ch] = NULL;
|
||||
}
|
||||
}
|
||||
if (audio_decoder->ReSample) {
|
||||
audio_resample_close(audio_decoder->ReSample);
|
||||
audio_decoder->ReSample = NULL;
|
||||
}
|
||||
if (audio_decoder->AudioParser) {
|
||||
av_parser_close(audio_decoder->AudioParser);
|
||||
audio_decoder->AudioParser = NULL;
|
||||
}
|
||||
if (audio_decoder->AudioCtx) {
|
||||
pthread_mutex_lock(&CodecLockMutex);
|
||||
avcodec_close(audio_decoder->AudioCtx);
|
||||
@@ -742,6 +779,16 @@ void CodecSetAudioPassthrough(int mask)
|
||||
(void)mask;
|
||||
}
|
||||
|
||||
/**
|
||||
** Set audio downmix.
|
||||
**
|
||||
** @param onoff enable/disable downmix.
|
||||
*/
|
||||
void CodecSetAudioDownmix(int onoff)
|
||||
{
|
||||
CodecDownmix = onoff;
|
||||
}
|
||||
|
||||
/**
|
||||
** Reorder audio frame.
|
||||
**
|
||||
@@ -798,96 +845,125 @@ static void CodecReorderAudioFrame(int16_t * buf, int size, int channels)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef USE_AVPARSER
|
||||
|
||||
/**
|
||||
** Decode an audio packet.
|
||||
**
|
||||
** PTS must be handled self.
|
||||
** Set/update audio pts clock.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
** @param avpkt audio packet
|
||||
** @param pts presentation timestamp
|
||||
*/
|
||||
void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts)
|
||||
{
|
||||
int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE] __attribute__ ((aligned(16)));
|
||||
AVCodecContext *audio_ctx;
|
||||
int index;
|
||||
struct timespec nowtime;
|
||||
int64_t delay;
|
||||
int64_t tim_diff;
|
||||
int64_t pts_diff;
|
||||
int drift;
|
||||
int corr;
|
||||
|
||||
//#define spkt avpkt
|
||||
#if 1 // didn't fix crash in av_parser_parse2
|
||||
AVPacket spkt[1];
|
||||
AudioSetClock(pts);
|
||||
|
||||
// av_new_packet reserves FF_INPUT_BUFFER_PADDING_SIZE and clears it
|
||||
if (av_new_packet(spkt, avpkt->size)) {
|
||||
Error(_("codec: out of memory\n"));
|
||||
delay = AudioGetDelay();
|
||||
if (!delay) {
|
||||
return;
|
||||
}
|
||||
memcpy(spkt->data, avpkt->data, avpkt->size);
|
||||
spkt->pts = avpkt->pts;
|
||||
spkt->dts = avpkt->dts;
|
||||
clock_gettime(CLOCK_REALTIME, &nowtime);
|
||||
if (!audio_decoder->LastDelay) {
|
||||
audio_decoder->LastTime = nowtime;
|
||||
audio_decoder->LastPTS = pts;
|
||||
audio_decoder->LastDelay = delay;
|
||||
audio_decoder->Drift = 0;
|
||||
audio_decoder->DriftFrac = 0;
|
||||
Debug(3, "codec/audio: inital delay %zd ms\n", delay / 90);
|
||||
return;
|
||||
}
|
||||
// collect over some time
|
||||
pts_diff = pts - audio_decoder->LastPTS;
|
||||
if (pts_diff < 10 * 1000 * 90) {
|
||||
return;
|
||||
}
|
||||
|
||||
tim_diff = (nowtime.tv_sec - audio_decoder->LastTime.tv_sec)
|
||||
* 1000 * 1000 * 1000 + (nowtime.tv_nsec -
|
||||
audio_decoder->LastTime.tv_nsec);
|
||||
|
||||
drift =
|
||||
(tim_diff * 90) / (1000 * 1000) - pts_diff + delay -
|
||||
audio_decoder->LastDelay;
|
||||
|
||||
// adjust rounding error
|
||||
nowtime.tv_nsec -= nowtime.tv_nsec % (1000 * 1000 / 90);
|
||||
audio_decoder->LastTime = nowtime;
|
||||
audio_decoder->LastPTS = pts;
|
||||
audio_decoder->LastDelay = delay;
|
||||
|
||||
if (0) {
|
||||
Debug(3, "codec/audio: interval P:%5zdms T:%5zdms D:%4zdms %f %d\n",
|
||||
pts_diff / 90, tim_diff / (1000 * 1000), delay / 90, drift / 90.0,
|
||||
audio_decoder->DriftCorr);
|
||||
}
|
||||
// underruns and av_resample have the same time :(((
|
||||
if (abs(drift) > 10 * 90) {
|
||||
// drift too big, pts changed?
|
||||
Debug(3, "codec/audio: drift(%6d) %3dms reset\n",
|
||||
audio_decoder->DriftCorr, drift / 90);
|
||||
audio_decoder->LastDelay = 0;
|
||||
} else {
|
||||
|
||||
drift += audio_decoder->Drift;
|
||||
audio_decoder->Drift = drift;
|
||||
corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000);
|
||||
#if defined(USE_PASSTHROUGH) && !defined(USE_AC3_DRIFT_CORRECTION)
|
||||
// SPDIF/HDMI passthrough
|
||||
if (!CodecPassthroughAC3
|
||||
|| audio_decoder->AudioCtx->codec_id != CODEC_ID_AC3)
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
if (!audio_decoder->AudioParser) {
|
||||
Fatal(_("codec: internal error parser freeded while running\n"));
|
||||
{
|
||||
audio_decoder->DriftCorr = -corr;
|
||||
}
|
||||
#endif
|
||||
|
||||
audio_ctx = audio_decoder->AudioCtx;
|
||||
index = 0;
|
||||
while (spkt->size > index) {
|
||||
int n;
|
||||
int l;
|
||||
AVPacket dpkt[1];
|
||||
|
||||
av_init_packet(dpkt);
|
||||
n = av_parser_parse2(audio_decoder->AudioParser, audio_ctx,
|
||||
&dpkt->data, &dpkt->size, spkt->data + index, spkt->size - index,
|
||||
!index ? (uint64_t) spkt->pts : AV_NOPTS_VALUE,
|
||||
!index ? (uint64_t) spkt->dts : AV_NOPTS_VALUE, -1);
|
||||
|
||||
// FIXME: make this a function for both #ifdef cases
|
||||
if (dpkt->size) {
|
||||
int buf_sz;
|
||||
|
||||
dpkt->pts = audio_decoder->AudioParser->pts;
|
||||
dpkt->dts = audio_decoder->AudioParser->dts;
|
||||
buf_sz = sizeof(buf);
|
||||
l = avcodec_decode_audio3(audio_ctx, buf, &buf_sz, dpkt);
|
||||
if (l == AVERROR(EAGAIN)) {
|
||||
index += n; // this is needed for aac latm
|
||||
continue;
|
||||
if (audio_decoder->DriftCorr < -20000) { // limit correction
|
||||
audio_decoder->DriftCorr = -20000;
|
||||
} else if (audio_decoder->DriftCorr > 20000) {
|
||||
audio_decoder->DriftCorr = 20000;
|
||||
}
|
||||
if (l < 0) { // no audio frame could be decompressed
|
||||
Error(_("codec: error audio data at %d\n"), index);
|
||||
break;
|
||||
}
|
||||
#ifdef notyetFF_API_OLD_DECODE_AUDIO
|
||||
// FIXME: ffmpeg git comeing
|
||||
int got_frame;
|
||||
// FIXME: this works with libav 0.8, and only with >10ms with ffmpeg 0.10
|
||||
if (audio_decoder->AvResample && audio_decoder->DriftCorr) {
|
||||
int distance;
|
||||
|
||||
avcodec_decode_audio4(audio_ctx, frame, &got_frame, dpkt);
|
||||
#else
|
||||
#endif
|
||||
// Update audio clock
|
||||
if ((uint64_t) dpkt->pts != AV_NOPTS_VALUE) {
|
||||
AudioSetClock(dpkt->pts);
|
||||
distance = (pts_diff * audio_decoder->HwSampleRate) / (90 * 1000);
|
||||
av_resample_compensate(audio_decoder->AvResample,
|
||||
audio_decoder->DriftCorr / 10, distance);
|
||||
}
|
||||
// FIXME: must first play remainings bytes, than change and play new.
|
||||
if (audio_decoder->PassthroughAC3 != CodecPassthroughAC3
|
||||
|| audio_decoder->SampleRate != audio_ctx->sample_rate
|
||||
|| audio_decoder->Channels != audio_ctx->channels) {
|
||||
Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", audio_decoder->DriftCorr,
|
||||
drift * 1000 / 90, corr);
|
||||
}
|
||||
|
||||
/**
|
||||
** Handle audio format changes.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
*/
|
||||
static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder)
|
||||
{
|
||||
const AVCodecContext *audio_ctx;
|
||||
int err;
|
||||
int isAC3;
|
||||
|
||||
audio_ctx = audio_decoder->AudioCtx;
|
||||
|
||||
audio_decoder->PassthroughAC3 = CodecPassthroughAC3;
|
||||
|
||||
// FIXME: use swr_convert from swresample (only in ffmpeg!)
|
||||
// FIXME: tell ac3 decoder to use downmix
|
||||
if (audio_decoder->ReSample) {
|
||||
audio_resample_close(audio_decoder->ReSample);
|
||||
audio_decoder->ReSample = NULL;
|
||||
}
|
||||
if (audio_decoder->AvResample) {
|
||||
av_resample_close(audio_decoder->AvResample);
|
||||
audio_decoder->AvResample = NULL;
|
||||
audio_decoder->RemainCount = 0;
|
||||
}
|
||||
|
||||
audio_decoder->SampleRate = audio_ctx->sample_rate;
|
||||
audio_decoder->HwSampleRate = audio_ctx->sample_rate;
|
||||
@@ -907,8 +983,7 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
&audio_decoder->HwChannels, isAC3))) {
|
||||
Debug(3, "codec/audio: resample %dHz *%d -> %dHz *%d\n",
|
||||
audio_ctx->sample_rate, audio_ctx->channels,
|
||||
audio_decoder->HwSampleRate,
|
||||
audio_decoder->HwChannels);
|
||||
audio_decoder->HwSampleRate, audio_decoder->HwChannels);
|
||||
|
||||
if (err == 1) {
|
||||
audio_decoder->ReSample =
|
||||
@@ -921,15 +996,169 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
Error(_("codec/audio: resample setup error\n"));
|
||||
audio_decoder->HwChannels = 0;
|
||||
audio_decoder->HwSampleRate = 0;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
Debug(3, "codec/audio: audio setup error\n");
|
||||
// FIXME: handle errors
|
||||
audio_decoder->HwChannels = 0;
|
||||
audio_decoder->HwSampleRate = 0;
|
||||
break;
|
||||
return;
|
||||
}
|
||||
}
|
||||
// prepare audio drift resample
|
||||
#ifdef USE_AUDIO_DRIFT_CORRECTION
|
||||
if (!isAC3) {
|
||||
if (audio_decoder->AvResample) {
|
||||
Error(_("codec/audio: overwrite resample\n"));
|
||||
}
|
||||
audio_decoder->AvResample =
|
||||
av_resample_init(audio_decoder->HwSampleRate,
|
||||
audio_decoder->HwSampleRate, 16, 10, 0, 0.8);
|
||||
if (!audio_decoder->AvResample) {
|
||||
Error(_("codec/audio: AvResample setup error\n"));
|
||||
} else {
|
||||
// reset drift to some default value
|
||||
audio_decoder->DriftCorr /= 2;
|
||||
audio_decoder->DriftFrac = 0;
|
||||
av_resample_compensate(audio_decoder->AvResample,
|
||||
audio_decoder->DriftCorr / 10,
|
||||
10 * audio_decoder->HwSampleRate);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
** Codec enqueue audio samples.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
** @param data samples data
|
||||
** @param count number of samples
|
||||
**
|
||||
*/
|
||||
void CodecAudioEnqueue(AudioDecoder * audio_decoder, int16_t * data, int count)
|
||||
{
|
||||
#ifdef USE_AUDIO_DRIFT_CORRECTION
|
||||
if (audio_decoder->AvResample) {
|
||||
int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE] __attribute__ ((aligned(16)));
|
||||
int16_t buftmp[MAX_CHANNELS][(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4];
|
||||
int consumed;
|
||||
int i;
|
||||
int n;
|
||||
int ch;
|
||||
int bytes_n;
|
||||
|
||||
bytes_n = count / audio_decoder->HwChannels;
|
||||
// resize sample buffer, if needed
|
||||
if (audio_decoder->RemainCount + bytes_n > audio_decoder->BufferSize) {
|
||||
audio_decoder->BufferSize = audio_decoder->RemainCount + bytes_n;
|
||||
for (ch = 0; ch < MAX_CHANNELS; ++ch) {
|
||||
audio_decoder->Buffer[ch] =
|
||||
realloc(audio_decoder->Buffer[ch],
|
||||
audio_decoder->BufferSize);
|
||||
}
|
||||
}
|
||||
// copy remaining bytes into sample buffer
|
||||
for (ch = 0; ch < audio_decoder->HwChannels; ++ch) {
|
||||
memcpy(audio_decoder->Buffer[ch], audio_decoder->Remain[ch],
|
||||
audio_decoder->RemainCount);
|
||||
}
|
||||
// deinterleave samples into sample buffer
|
||||
for (i = 0; i < bytes_n / 2; i++) {
|
||||
for (ch = 0; ch < audio_decoder->HwChannels; ++ch) {
|
||||
audio_decoder->Buffer[ch][audio_decoder->RemainCount / 2 + i]
|
||||
= data[i * audio_decoder->HwChannels + ch];
|
||||
}
|
||||
}
|
||||
|
||||
bytes_n += audio_decoder->RemainSize;
|
||||
n = 0; // keep gcc lucky
|
||||
// resample the sample buffer into tmp buffer
|
||||
for (ch = 0; ch < audio_decoder->HwChannels; ++ch) {
|
||||
n = av_resample(audio_decoder->AvResample, buftmp[ch],
|
||||
audio_decoder->Buffer[ch], &consumed, bytes_n / 2,
|
||||
sizeof(buftmp[ch]) / 2, ch == audio_decoder->HwChannels - 1);
|
||||
// fixme remaining channels
|
||||
if (bytes_n - consumed * 2 > audio_decoder->RemainSize) {
|
||||
audio_decoder->RemainSize = bytes_n - consumed * 2;
|
||||
}
|
||||
audio_decoder->Remain[ch] =
|
||||
realloc(audio_decoder->Remain[ch], audio_decoder->RemainSize);
|
||||
memcpy(audio_decoder->Remain[ch],
|
||||
audio_decoder->Buffer[ch] + consumed,
|
||||
audio_decoder->RemainSize);
|
||||
audio_decoder->RemainCount = audio_decoder->RemainSize;
|
||||
}
|
||||
|
||||
// interleave samples from sample buffer
|
||||
for (i = 0; i < n; i++) {
|
||||
for (ch = 0; ch < audio_decoder->HwChannels; ++ch) {
|
||||
buf[i * audio_decoder->HwChannels + ch] = buftmp[ch][i];
|
||||
}
|
||||
}
|
||||
n *= 2;
|
||||
|
||||
n *= audio_decoder->HwChannels;
|
||||
CodecReorderAudioFrame(buf, n, audio_decoder->HwChannels);
|
||||
AudioEnqueue(buf, n);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
CodecReorderAudioFrame(data, count, audio_decoder->HwChannels);
|
||||
AudioEnqueue(data, count);
|
||||
}
|
||||
|
||||
/**
|
||||
** Decode an audio packet.
|
||||
**
|
||||
** PTS must be handled self.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
** @param avpkt audio packet
|
||||
*/
|
||||
void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
{
|
||||
int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE] __attribute__ ((aligned(16)));
|
||||
int buf_sz;
|
||||
int l;
|
||||
AVCodecContext *audio_ctx;
|
||||
|
||||
audio_ctx = audio_decoder->AudioCtx;
|
||||
|
||||
buf_sz = sizeof(buf);
|
||||
l = avcodec_decode_audio3(audio_ctx, buf, &buf_sz, (AVPacket *) avpkt);
|
||||
if (avpkt->size != l) {
|
||||
if (l == AVERROR(EAGAIN)) {
|
||||
Error(_("codec: latm\n"));
|
||||
return;
|
||||
}
|
||||
if (l < 0) { // no audio frame could be decompressed
|
||||
Error(_("codec: error audio data\n"));
|
||||
return;
|
||||
}
|
||||
Error(_("codec: error more than one frame data\n"));
|
||||
}
|
||||
#ifdef notyetFF_API_OLD_DECODE_AUDIO
|
||||
// FIXME: ffmpeg git comeing
|
||||
int got_frame;
|
||||
|
||||
avcodec_decode_audio4(audio_ctx, frame, &got_frame, avpkt);
|
||||
#else
|
||||
#endif
|
||||
|
||||
// update audio clock
|
||||
if (avpkt->pts != (int64_t) AV_NOPTS_VALUE) {
|
||||
CodecAudioSetClock(audio_decoder, avpkt->pts);
|
||||
|
||||
}
|
||||
// FIXME: must first play remainings bytes, than change and play new.
|
||||
if (audio_decoder->PassthroughAC3 != CodecPassthroughAC3
|
||||
|| audio_decoder->SampleRate != audio_ctx->sample_rate
|
||||
|| audio_decoder->Channels != audio_ctx->channels) {
|
||||
CodecAudioUpdateFormat(audio_decoder);
|
||||
}
|
||||
|
||||
if (audio_decoder->HwSampleRate && audio_decoder->HwChannels) {
|
||||
@@ -942,8 +1171,7 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
|
||||
// FIXME: libav-0.7.2 crash here
|
||||
outlen =
|
||||
audio_resample(audio_decoder->ReSample, outbuf, buf,
|
||||
buf_sz);
|
||||
audio_resample(audio_decoder->ReSample, outbuf, buf, buf_sz);
|
||||
#ifdef DEBUG
|
||||
if (outlen != buf_sz) {
|
||||
Debug(3, "codec/audio: possible fixed ffmpeg\n");
|
||||
@@ -957,32 +1185,52 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
audio_decoder->HwChannels *
|
||||
av_get_bytes_per_sample(audio_ctx->sample_fmt);
|
||||
Debug(4, "codec/audio: %d -> %d\n", buf_sz, outlen);
|
||||
CodecReorderAudioFrame(outbuf, outlen,
|
||||
audio_decoder->HwChannels);
|
||||
AudioEnqueue(outbuf, outlen);
|
||||
CodecAudioEnqueue(audio_decoder, outbuf, outlen);
|
||||
}
|
||||
} else {
|
||||
#ifdef USE_PASSTHROUGH
|
||||
// SPDIF/HDMI passthrough
|
||||
if (CodecPassthroughAC3
|
||||
&& audio_ctx->codec_id == CODEC_ID_AC3) {
|
||||
if (CodecPassthroughAC3 && audio_ctx->codec_id == CODEC_ID_AC3) {
|
||||
// build SPDIF header and append A52 audio to it
|
||||
// dpkt is the original data
|
||||
// avpkt is the original data
|
||||
buf_sz = 6144;
|
||||
if (buf_sz < dpkt->size + 8) {
|
||||
|
||||
#ifdef USE_AC3_DRIFT_CORRECTION
|
||||
if (1) {
|
||||
int x;
|
||||
|
||||
x = (audio_decoder->DriftFrac +
|
||||
(audio_decoder->DriftCorr * buf_sz)) / (10 *
|
||||
audio_decoder->HwSampleRate * 100);
|
||||
audio_decoder->DriftFrac =
|
||||
(audio_decoder->DriftFrac +
|
||||
(audio_decoder->DriftCorr * buf_sz)) % (10 *
|
||||
audio_decoder->HwSampleRate * 100);
|
||||
x *= audio_decoder->HwChannels * 4;
|
||||
if (x < -64) { // limit correction
|
||||
x = -64;
|
||||
} else if (x > 64) {
|
||||
x = 64;
|
||||
}
|
||||
buf_sz += x;
|
||||
}
|
||||
#endif
|
||||
if (buf_sz < avpkt->size + 8) {
|
||||
Error(_
|
||||
("codec/audio: decoded data smaller than encoded\n"));
|
||||
break;
|
||||
return;
|
||||
}
|
||||
// copy original data for output
|
||||
// FIXME: not 100% sure, if endian is correct
|
||||
buf[0] = htole16(0xF872); // iec 61937 sync word
|
||||
buf[1] = htole16(0x4E1F);
|
||||
buf[2] = htole16(0x01 | (dpkt->data[5] & 0x07) << 8);
|
||||
buf[3] = htole16(dpkt->size * 8);
|
||||
swab(dpkt->data, buf + 4, dpkt->size);
|
||||
memset(buf + 4 + dpkt->size / 2, 0,
|
||||
buf_sz - 8 - dpkt->size);
|
||||
buf[2] = htole16(0x01 | (avpkt->data[5] & 0x07) << 8);
|
||||
buf[3] = htole16(avpkt->size * 8);
|
||||
swab(avpkt->data, buf + 4, avpkt->size);
|
||||
memset(buf + 4 + avpkt->size / 2, 0, buf_sz - 8 - avpkt->size);
|
||||
// don't play with the ac-3 samples
|
||||
AudioEnqueue(buf, buf_sz);
|
||||
return;
|
||||
}
|
||||
#if 0
|
||||
//
|
||||
@@ -997,7 +1245,7 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
buf[2] = 0x1F;
|
||||
buf[3] = 0x4E;
|
||||
buf[4] = 0x00;
|
||||
switch (dpkt->size) {
|
||||
switch (avpkt->size) {
|
||||
case 512:
|
||||
buf[5] = 0x0B;
|
||||
break;
|
||||
@@ -1013,15 +1261,14 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
buf[5] = 0x00;
|
||||
break;
|
||||
}
|
||||
buf[6] = (dpkt->size * 8);
|
||||
buf[7] = (dpkt->size * 8) >> 8;
|
||||
buf[6] = (avpkt->size * 8);
|
||||
buf[7] = (avpkt->size * 8) >> 8;
|
||||
//buf[8] = 0x0B;
|
||||
//buf[9] = 0x77;
|
||||
//printf("%x %x\n", dpkt->data[0],dpkt->data[1]);
|
||||
//printf("%x %x\n", avpkt->data[0],avpkt->data[1]);
|
||||
// swab?
|
||||
memcpy(buf + 8, dpkt->data, dpkt->size);
|
||||
memset(buf + 8 + dpkt->size, 0,
|
||||
buf_sz - 8 - dpkt->size);
|
||||
memcpy(buf + 8, avpkt->data, avpkt->size);
|
||||
memset(buf + 8 + avpkt->size, 0, buf_sz - 8 - avpkt->size);
|
||||
} else if (1) {
|
||||
// FIXME: need to detect mp2
|
||||
// FIXME: mp2 passthrough
|
||||
@@ -1037,98 +1284,10 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
// True HD?
|
||||
#endif
|
||||
#endif
|
||||
CodecReorderAudioFrame(buf, buf_sz,
|
||||
audio_decoder->HwChannels);
|
||||
AudioEnqueue(buf, buf_sz);
|
||||
CodecAudioEnqueue(audio_decoder, buf, buf_sz);
|
||||
}
|
||||
}
|
||||
|
||||
if (dpkt->size > l) {
|
||||
Error(_("codec: error more than one frame data\n"));
|
||||
}
|
||||
}
|
||||
|
||||
index += n;
|
||||
}
|
||||
|
||||
#if 1
|
||||
// or av_free_packet, make no difference here
|
||||
av_destruct_packet(spkt);
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/**
|
||||
** Decode an audio packet.
|
||||
**
|
||||
** PTS must be handled self.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
** @param avpkt audio packet
|
||||
*/
|
||||
void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
{
|
||||
int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE] __attribute__ ((aligned(16)));
|
||||
AVCodecContext *audio_ctx;
|
||||
int index;
|
||||
|
||||
//#define spkt avpkt
|
||||
#if 1
|
||||
AVPacket spkt[1];
|
||||
|
||||
// av_new_packet reserves FF_INPUT_BUFFER_PADDING_SIZE and clears it
|
||||
if (av_new_packet(spkt, avpkt->size)) {
|
||||
Error(_("codec: out of memory\n"));
|
||||
return;
|
||||
}
|
||||
memcpy(spkt->data, avpkt->data, avpkt->size);
|
||||
spkt->pts = avpkt->pts;
|
||||
spkt->dts = avpkt->dts;
|
||||
#endif
|
||||
audio_ctx = audio_decoder->AudioCtx;
|
||||
index = 0;
|
||||
while (spkt->size > index) {
|
||||
int n;
|
||||
int buf_sz;
|
||||
AVPacket dpkt[1];
|
||||
|
||||
av_init_packet(dpkt);
|
||||
dpkt->data = spkt->data + index;
|
||||
dpkt->size = spkt->size - index;
|
||||
|
||||
buf_sz = sizeof(buf);
|
||||
n = avcodec_decode_audio3(audio_ctx, buf, &buf_sz, dpkt);
|
||||
if (n < 0) { // no audio frame could be decompressed
|
||||
Error(_("codec: error audio data at %d\n"), index);
|
||||
break;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
Debug(4, "codec/audio: -> %d\n", buf_sz);
|
||||
if ((unsigned)buf_sz > sizeof(buf)) {
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
#ifdef notyetFF_API_OLD_DECODE_AUDIO
|
||||
// FIXME: ffmpeg git comeing
|
||||
int got_frame;
|
||||
|
||||
avcodec_decode_audio4(audio_ctx, frame, &got_frame, dpkt);
|
||||
#else
|
||||
#endif
|
||||
// FIXME: see above, old code removed
|
||||
|
||||
index += n;
|
||||
}
|
||||
|
||||
#if 1
|
||||
// or av_free_packet, make no difference here
|
||||
av_destruct_packet(spkt);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
** Flush the audio decoder.
|
||||
@@ -1137,7 +1296,6 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
*/
|
||||
void CodecAudioFlushBuffers(AudioDecoder * decoder)
|
||||
{
|
||||
// FIXME: reset audio parser
|
||||
avcodec_flush_buffers(decoder->AudioCtx);
|
||||
}
|
||||
|
||||
|
||||
3
codec.h
3
codec.h
@@ -67,6 +67,9 @@ extern void CodecAudioOpen(AudioDecoder *, const char *, int);
|
||||
/// Close audio codec.
|
||||
extern void CodecAudioClose(AudioDecoder *);
|
||||
|
||||
/// Decode an audio packet.
|
||||
extern void CodecAudioDecodeOld(AudioDecoder *, const AVPacket *);
|
||||
|
||||
/// Decode an audio packet.
|
||||
extern void CodecAudioDecode(AudioDecoder *, const AVPacket *);
|
||||
|
||||
|
||||
1213
softhddev.c
1213
softhddev.c
File diff suppressed because it is too large
Load Diff
12
softhddev.h
12
softhddev.h
@@ -37,8 +37,8 @@ extern "C"
|
||||
|
||||
/// C plugin play audio packet
|
||||
extern int PlayAudio(const uint8_t *, int, uint8_t);
|
||||
/// C plugin mute audio
|
||||
extern void Mute(void);
|
||||
/// C plugin play TS audio packet
|
||||
extern int PlayTsAudio(const uint8_t *, int);
|
||||
/// C plugin set audio volume
|
||||
extern void SetVolumeDevice(int);
|
||||
|
||||
@@ -50,13 +50,17 @@ extern "C"
|
||||
extern uint8_t *GrabImage(int *, int, int, int, int);
|
||||
|
||||
/// C plugin set play mode
|
||||
extern void SetPlayMode(void);
|
||||
extern int SetPlayMode(int);
|
||||
/// C plugin set trick speed
|
||||
extern void TrickSpeed(int);
|
||||
/// C plugin clears all video and audio data from the device
|
||||
extern void Clear(void);
|
||||
/// C plugin sets the device into play mode
|
||||
extern void Play(void);
|
||||
/// C plugin sets the device into "freeze frame" mode
|
||||
extern void Freeze(void);
|
||||
/// C plugin mute audio
|
||||
extern void Mute(void);
|
||||
/// C plugin display I-frame as a still picture.
|
||||
extern void StillPicture(const uint8_t *, int);
|
||||
/// C plugin poll if ready
|
||||
@@ -72,7 +76,7 @@ extern "C"
|
||||
/// C plugin exit + cleanup
|
||||
extern void SoftHdDeviceExit(void);
|
||||
/// C plugin start code
|
||||
extern void Start(void);
|
||||
extern int Start(void);
|
||||
/// C plugin stop code
|
||||
extern void Stop(void);
|
||||
/// C plugin main thread hook
|
||||
|
||||
418
softhddevice.cpp
418
softhddevice.cpp
@@ -38,15 +38,20 @@ extern "C"
|
||||
#include "video.h"
|
||||
extern void AudioPoller(void);
|
||||
extern void CodecSetAudioPassthrough(int);
|
||||
extern void CodecSetAudioDownmix(int);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static const char *const VERSION = "0.4.8";
|
||||
static const char *const VERSION = "0.4.9"
|
||||
#ifdef GIT_REV
|
||||
"-GIT" GIT_REV
|
||||
#endif
|
||||
;
|
||||
static const char *const DESCRIPTION =
|
||||
trNOOP("A software and GPU emulated HD device");
|
||||
|
||||
static const char *MAINMENUENTRY = trNOOP("Suspend Soft-HD-Device");
|
||||
static const char *MAINMENUENTRY = trNOOP("SoftHdDevice");
|
||||
static class cSoftHdDevice *MyDevice;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
@@ -61,8 +66,10 @@ static const char *const Resolution[RESOLUTIONS] = {
|
||||
static char ConfigMakePrimary; ///< config primary wanted
|
||||
static char ConfigHideMainMenuEntry; ///< config hide main menu entry
|
||||
|
||||
static uint32_t ConfigVideoBackground; ///< config video background color
|
||||
static int ConfigVideoSkipLines; ///< config skip lines top/bottom
|
||||
static int ConfigVideoStudioLevels; ///< config use studio levels
|
||||
static int ConfigVideo60HzMode; ///< config use 60Hz display mode
|
||||
|
||||
/// config deinterlace
|
||||
static int ConfigVideoDeinterlace[RESOLUTIONS];
|
||||
@@ -84,6 +91,7 @@ static int ConfigVideoScaling[RESOLUTIONS];
|
||||
|
||||
static int ConfigVideoAudioDelay; ///< config audio delay
|
||||
static int ConfigAudioPassthrough; ///< config audio pass-through
|
||||
static int ConfigAudioDownmix; ///< config audio downmix
|
||||
|
||||
static int ConfigAutoCropInterval; ///< auto crop detection interval
|
||||
static int ConfigAutoCropDelay; ///< auto crop detection delay
|
||||
@@ -94,6 +102,11 @@ static char ConfigSuspendX11; ///< suspend should stop x11
|
||||
|
||||
static volatile char DoMakePrimary; ///< flag switch primary
|
||||
|
||||
#define SUSPEND_EXTERNAL -1 ///< play external suspend mode
|
||||
#define SUSPEND_NORMAL 0 ///< normal suspend mode
|
||||
#define SUSPEND_DETACHED 1 ///< detached suspend mode
|
||||
static char SuspendMode; ///< suspend mode
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
@@ -137,16 +150,23 @@ extern "C" void FeedKeyPress(const char *keymap, const char *key, int repeat,
|
||||
}
|
||||
|
||||
//dsyslog("[softhddev]%s %s, %s\n", __FUNCTION__, keymap, key);
|
||||
if (key[1]) { // no single character
|
||||
csoft->Put(key, repeat, release);
|
||||
} else if (!csoft->Put(key, repeat, release)) {
|
||||
cRemote::Put(KBDKEY(key[0])); // feed it for edit mode
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// OSD
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
** Soft device plugin OSD class.
|
||||
*/
|
||||
class cSoftOsd:public cOsd
|
||||
{
|
||||
int Level; ///< level: subtitle
|
||||
//int Level; ///< level: subtitle
|
||||
|
||||
public:
|
||||
cSoftOsd(int, int, uint);
|
||||
@@ -167,7 +187,7 @@ static volatile char OsdDirty; ///< flag force redraw everything
|
||||
*/
|
||||
void cSoftOsd::SetActive(bool on)
|
||||
{
|
||||
dsyslog("[softhddev]%s: %d\n", __FUNCTION__, on);
|
||||
//dsyslog("[softhddev]%s: %d\n", __FUNCTION__, on);
|
||||
|
||||
if (Active() == on) {
|
||||
return; // already active, no action
|
||||
@@ -188,7 +208,7 @@ cSoftOsd::cSoftOsd(int left, int top, uint level)
|
||||
OsdHeight(), left, top, level);
|
||||
*/
|
||||
|
||||
this->Level = level;
|
||||
//this->Level = level;
|
||||
SetActive(true);
|
||||
}
|
||||
|
||||
@@ -285,7 +305,7 @@ void cSoftOsd::Flush(void)
|
||||
}
|
||||
#ifdef DEBUG
|
||||
if (w > bitmap->Width() || h > bitmap->Height()) {
|
||||
esyslog(tr("softhdev: dirty area too big\n"));
|
||||
esyslog(tr("[softhddev]: dirty area too big\n"));
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
@@ -334,6 +354,9 @@ void cSoftOsd::Flush(void)
|
||||
// OSD provider
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
** Soft device plugin OSD provider class.
|
||||
*/
|
||||
class cSoftOsdProvider:public cOsdProvider
|
||||
{
|
||||
private:
|
||||
@@ -357,12 +380,13 @@ cOsd *cSoftOsdProvider::CreateOsd(int left, int top, uint level)
|
||||
{
|
||||
//dsyslog("[softhddev]%s: %d, %d, %d\n", __FUNCTION__, left, top, level);
|
||||
|
||||
Osd = new cSoftOsd(left, top, level);
|
||||
return Osd;
|
||||
return Osd = new cSoftOsd(left, top, level);
|
||||
}
|
||||
|
||||
/**
|
||||
** Returns true if this OSD provider is able to handle a true color OSD.
|
||||
** Check if this OSD provider is able to handle a true color OSD.
|
||||
**
|
||||
** @returns true we are able to handle a true color OSD.
|
||||
*/
|
||||
bool cSoftOsdProvider::ProvidesTrueColor(void)
|
||||
{
|
||||
@@ -382,11 +406,19 @@ cSoftOsdProvider::cSoftOsdProvider(void)
|
||||
// cMenuSetupPage
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
** Soft device plugin menu setup page class.
|
||||
*/
|
||||
class cMenuSetupSoft:public cMenuSetupPage
|
||||
{
|
||||
protected:
|
||||
///
|
||||
/// local copies of global setup variables:
|
||||
/// @{
|
||||
int MakePrimary;
|
||||
int HideMainMenuEntry;
|
||||
uint32_t Background;
|
||||
uint32_t BackgroundAlpha;
|
||||
int SkipLines;
|
||||
int StudioLevels;
|
||||
int Scaling[RESOLUTIONS];
|
||||
@@ -397,11 +429,13 @@ class cMenuSetupSoft:public cMenuSetupPage
|
||||
int Sharpen[RESOLUTIONS];
|
||||
int AudioDelay;
|
||||
int AudioPassthrough;
|
||||
int AudioDownmix;
|
||||
int AutoCropInterval;
|
||||
int AutoCropDelay;
|
||||
int AutoCropTolerance;
|
||||
int SuspendClose;
|
||||
int SuspendX11;
|
||||
/// @}
|
||||
protected:
|
||||
virtual void Store(void);
|
||||
public:
|
||||
@@ -456,6 +490,13 @@ cMenuSetupSoft::cMenuSetupSoft(void)
|
||||
//
|
||||
Add(SeparatorItem(tr("Video")));
|
||||
|
||||
// no unsigned int menu item supported, split background color/alpha
|
||||
Background = ConfigVideoBackground >> 8;
|
||||
BackgroundAlpha = ConfigVideoBackground & 0xFF;
|
||||
Add(new cMenuEditIntItem(tr("video background color (RGB)"),
|
||||
(int *)&Background, 0, 0x00FFFFFF));
|
||||
Add(new cMenuEditIntItem(tr("video background color (Alpha)"),
|
||||
(int *)&BackgroundAlpha, 0, 0xFF));
|
||||
SkipLines = ConfigVideoSkipLines;
|
||||
Add(new cMenuEditIntItem(tr("Skip lines top+bot (pixel)"), &SkipLines, 0,
|
||||
64));
|
||||
@@ -478,10 +519,10 @@ cMenuSetupSoft::cMenuSetupSoft(void)
|
||||
&InverseTelecine[i], trVDR("no"), trVDR("yes")));
|
||||
Denoise[i] = ConfigVideoDenoise[i];
|
||||
Add(new cMenuEditIntItem(tr("Denoise (0..1000) (vdpau)"), &Denoise[i],
|
||||
0, 1000));
|
||||
0, 1000, tr("off"), tr("max")));
|
||||
Sharpen[i] = ConfigVideoSharpen[i];
|
||||
Add(new cMenuEditIntItem(tr("Sharpen (-1000..1000) (vdpau)"),
|
||||
&Sharpen[i], -1000, 1000));
|
||||
&Sharpen[i], -1000, 1000, tr("blur max"), tr("sharpen max")));
|
||||
}
|
||||
//
|
||||
// audio
|
||||
@@ -493,13 +534,16 @@ cMenuSetupSoft::cMenuSetupSoft(void)
|
||||
AudioPassthrough = ConfigAudioPassthrough;
|
||||
Add(new cMenuEditStraItem(tr("Audio pass-through"), &AudioPassthrough, 2,
|
||||
passthrough));
|
||||
AudioDownmix = ConfigAudioDownmix;
|
||||
Add(new cMenuEditBoolItem(tr("Enable AC-3 downmix"), &AudioDownmix,
|
||||
trVDR("no"), trVDR("yes")));
|
||||
//
|
||||
// auto-crop
|
||||
//
|
||||
Add(SeparatorItem(tr("Auto-crop")));
|
||||
AutoCropInterval = ConfigAutoCropInterval;
|
||||
Add(new cMenuEditIntItem(tr("autocrop interval (frames)"),
|
||||
&AutoCropInterval, 0, 200));
|
||||
&AutoCropInterval, 0, 200, tr("off")));
|
||||
AutoCropDelay = ConfigAutoCropDelay;
|
||||
Add(new cMenuEditIntItem(tr("autocrop delay (n * interval)"),
|
||||
&AutoCropDelay, 0, 200));
|
||||
@@ -529,6 +573,9 @@ void cMenuSetupSoft::Store(void)
|
||||
SetupStore("HideMainMenuEntry", ConfigHideMainMenuEntry =
|
||||
HideMainMenuEntry);
|
||||
|
||||
ConfigVideoBackground = Background << 8 | (BackgroundAlpha & 0xFF);
|
||||
SetupStore("Background", ConfigVideoBackground);
|
||||
VideoSetBackground(ConfigVideoBackground);
|
||||
SetupStore("SkipLines", ConfigVideoSkipLines = SkipLines);
|
||||
VideoSetSkipLines(ConfigVideoSkipLines);
|
||||
SetupStore("StudioLevels", ConfigVideoStudioLevels = StudioLevels);
|
||||
@@ -563,6 +610,8 @@ void cMenuSetupSoft::Store(void)
|
||||
VideoSetAudioDelay(ConfigVideoAudioDelay);
|
||||
SetupStore("AudioPassthrough", ConfigAudioPassthrough = AudioPassthrough);
|
||||
CodecSetAudioPassthrough(ConfigAudioPassthrough);
|
||||
SetupStore("AudioDownmix", ConfigAudioDownmix = AudioDownmix);
|
||||
CodecSetAudioDownmix(ConfigAudioDownmix);
|
||||
|
||||
SetupStore("AutoCrop.Interval", ConfigAutoCropInterval = AutoCropInterval);
|
||||
SetupStore("AutoCrop.Delay", ConfigAutoCropDelay = AutoCropDelay);
|
||||
@@ -629,23 +678,31 @@ cSoftHdPlayer *cSoftHdControl::Player;
|
||||
*/
|
||||
eOSState cSoftHdControl::ProcessKey(eKeys key)
|
||||
{
|
||||
if (!ISMODELESSKEY(key) || key == kBack || key == kStop) {
|
||||
if (SuspendMode == SUSPEND_NORMAL && (!ISMODELESSKEY(key)
|
||||
|| key == kMenu || key == kBack || key == kStop)) {
|
||||
if (Player) {
|
||||
delete Player;
|
||||
|
||||
Player = NULL;
|
||||
}
|
||||
Resume();
|
||||
SuspendMode = 0;
|
||||
return osEnd;
|
||||
}
|
||||
return osContinue;
|
||||
}
|
||||
|
||||
/**
|
||||
** Player control constructor.
|
||||
*/
|
||||
cSoftHdControl::cSoftHdControl(void)
|
||||
: cControl(Player = new cSoftHdPlayer)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
** Player control destructor.
|
||||
*/
|
||||
cSoftHdControl::~cSoftHdControl()
|
||||
{
|
||||
if (Player) {
|
||||
@@ -653,7 +710,140 @@ cSoftHdControl::~cSoftHdControl()
|
||||
|
||||
Player = NULL;
|
||||
}
|
||||
Resume();
|
||||
|
||||
dsyslog("[softhddev]%s: resume\n", __FUNCTION__);
|
||||
//Resume();
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// cOsdMenu
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
** Soft device plugin menu class.
|
||||
*/
|
||||
class cSoftHdMenu:public cOsdMenu
|
||||
{
|
||||
int HotkeyState; ///< current hot-key state
|
||||
int HotkeyCode; ///< current hot-key code
|
||||
public:
|
||||
cSoftHdMenu(const char *, int = 0, int = 0, int = 0, int = 0, int = 0);
|
||||
virtual ~ cSoftHdMenu();
|
||||
virtual eOSState ProcessKey(eKeys);
|
||||
};
|
||||
|
||||
/**
|
||||
** Soft device menu constructor.
|
||||
*/
|
||||
cSoftHdMenu::cSoftHdMenu(const char *title, int c0, int c1, int c2, int c3,
|
||||
int c4)
|
||||
:cOsdMenu(title, c0, c1, c2, c3, c4)
|
||||
{
|
||||
HotkeyState = 0;
|
||||
|
||||
SetHasHotkeys();
|
||||
Add(new cOsdItem(hk(tr("Suspend SoftHdDevice")), osUser1));
|
||||
}
|
||||
|
||||
/**
|
||||
** Soft device menu destructor.
|
||||
*/
|
||||
cSoftHdMenu::~cSoftHdMenu()
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
** Handle hot key commands.
|
||||
**
|
||||
** @param code numeric hot key code
|
||||
*/
|
||||
static void HandleHotkey(int code)
|
||||
{
|
||||
switch (code) {
|
||||
case 10: // disable pass-through
|
||||
CodecSetAudioPassthrough(ConfigAudioPassthrough = 0);
|
||||
break;
|
||||
case 11: // enable pass-through
|
||||
CodecSetAudioPassthrough(ConfigAudioPassthrough = 1);
|
||||
break;
|
||||
case 12: // toggle pass-through
|
||||
CodecSetAudioPassthrough(ConfigAudioPassthrough ^= 1);
|
||||
break;
|
||||
default:
|
||||
esyslog(tr("[softhddev]: hot key %d is not supported\n"), code);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
** Handle key event.
|
||||
**
|
||||
** @param key key event
|
||||
*/
|
||||
eOSState cSoftHdMenu::ProcessKey(eKeys key)
|
||||
{
|
||||
eOSState state;
|
||||
|
||||
//dsyslog("[softhddev]%s: %x\n", __FUNCTION__, key);
|
||||
|
||||
switch (HotkeyState) {
|
||||
case 0: // initial state, waiting for hot key
|
||||
if (key == kBlue) {
|
||||
HotkeyState = 1;
|
||||
return osContinue;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (k0 <= key && key <= k9) {
|
||||
HotkeyCode = key - k0;
|
||||
HotkeyState = 2;
|
||||
return osContinue;
|
||||
}
|
||||
HotkeyState = 0;
|
||||
break;
|
||||
case 2:
|
||||
if (k0 <= key && key <= k9) {
|
||||
HotkeyCode *= 10;
|
||||
HotkeyCode += key - k0;
|
||||
HotkeyState = 0;
|
||||
dsyslog("[softhddev]%s: hot-key %d\n", __FUNCTION__,
|
||||
HotkeyCode);
|
||||
HandleHotkey(HotkeyCode);
|
||||
return osEnd;
|
||||
}
|
||||
if (key == kOk) {
|
||||
HotkeyState = 0;
|
||||
dsyslog("[softhddev]%s: hot-key %d\n", __FUNCTION__,
|
||||
HotkeyCode);
|
||||
HandleHotkey(HotkeyCode);
|
||||
return osEnd;
|
||||
}
|
||||
HotkeyState = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
// call standard function
|
||||
state = cOsdMenu::ProcessKey(key);
|
||||
|
||||
switch (state) {
|
||||
case osUser1:
|
||||
if (!cSoftHdControl::Player) { // not already suspended
|
||||
cControl::Launch(new cSoftHdControl);
|
||||
cControl::Attach();
|
||||
Suspend(ConfigSuspendClose, ConfigSuspendClose,
|
||||
ConfigSuspendX11);
|
||||
SuspendMode = SUSPEND_NORMAL;
|
||||
if (ShutdownHandler.GetUserInactiveTime()) {
|
||||
dsyslog("[softhddev]%s: set user inactive\n",
|
||||
__FUNCTION__);
|
||||
ShutdownHandler.SetUserInactive();
|
||||
}
|
||||
}
|
||||
return osEnd;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
@@ -679,12 +869,15 @@ class cSoftHdDevice:public cDevice
|
||||
virtual bool Flush(int = 0);
|
||||
virtual int64_t GetSTC(void);
|
||||
virtual void SetVideoDisplayFormat(eVideoDisplayFormat);
|
||||
virtual void SetVideoFormat(bool);
|
||||
virtual void GetVideoSize(int &, int &, double &);
|
||||
virtual void GetOsdSize(int &, int &, double &);
|
||||
virtual int PlayVideo(const uchar *, int);
|
||||
|
||||
//virtual int PlayTsVideo(const uchar *, int);
|
||||
#ifndef USE_AUDIO_THREAD // FIXME: testing none threaded
|
||||
virtual int PlayAudio(const uchar *, int, uchar);
|
||||
#ifdef USE_TS_VIDEO
|
||||
virtual int PlayTsVideo(const uchar *, int);
|
||||
#endif
|
||||
#if !defined(USE_AUDIO_THREAD) || !defined(NO_TS_AUDIO)
|
||||
virtual int PlayTsAudio(const uchar *, int);
|
||||
#endif
|
||||
virtual void SetAudioChannelDevice(int);
|
||||
@@ -692,7 +885,6 @@ class cSoftHdDevice:public cDevice
|
||||
virtual void SetDigitalAudioDevice(bool);
|
||||
virtual void SetAudioTrackDevice(eTrackType);
|
||||
virtual void SetVolumeDevice(int);
|
||||
virtual int PlayAudio(const uchar *, int, uchar);
|
||||
|
||||
// Image Grab facilities
|
||||
|
||||
@@ -717,7 +909,6 @@ cSoftHdDevice::cSoftHdDevice(void)
|
||||
#if 0
|
||||
spuDecoder = NULL;
|
||||
#endif
|
||||
SetVideoDisplayFormat(eVideoDisplayFormat(Setup.VideoDisplayFormat));
|
||||
}
|
||||
|
||||
cSoftHdDevice::~cSoftHdDevice(void)
|
||||
@@ -737,6 +928,13 @@ void cSoftHdDevice::MakePrimaryDevice(bool on)
|
||||
cDevice::MakePrimaryDevice(on);
|
||||
if (on) {
|
||||
new cSoftOsdProvider();
|
||||
if (SuspendMode == SUSPEND_DETACHED) {
|
||||
Resume();
|
||||
SuspendMode = 0;
|
||||
}
|
||||
} else if (!SuspendMode) {
|
||||
Suspend(1, 1, 0);
|
||||
SuspendMode = SUSPEND_DETACHED;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -769,6 +967,8 @@ bool cSoftHdDevice::CanReplay(void) const
|
||||
|
||||
/**
|
||||
** Sets the device into the given play mode.
|
||||
**
|
||||
** @param play_mode new play mode (Audio/Video/External...)
|
||||
*/
|
||||
bool cSoftHdDevice::SetPlayMode(ePlayMode play_mode)
|
||||
{
|
||||
@@ -787,14 +987,23 @@ bool cSoftHdDevice::SetPlayMode(ePlayMode play_mode)
|
||||
case pmExtern_THIS_SHOULD_BE_AVOIDED:
|
||||
dsyslog("[softhddev] play mode external\n");
|
||||
Suspend(1, 1, 0);
|
||||
SuspendMode = SUSPEND_EXTERNAL;
|
||||
return true;
|
||||
default:
|
||||
dsyslog("[softhddev] playmode not implemented... %d\n", play_mode);
|
||||
break;
|
||||
}
|
||||
::SetPlayMode();
|
||||
|
||||
if (SuspendMode) {
|
||||
if (SuspendMode != SUSPEND_EXTERNAL) {
|
||||
return true;
|
||||
}
|
||||
Resume();
|
||||
SuspendMode = 0;
|
||||
}
|
||||
|
||||
return::SetPlayMode(play_mode);
|
||||
}
|
||||
|
||||
/**
|
||||
** Gets the current System Time Counter, which can be used to
|
||||
@@ -810,13 +1019,21 @@ int64_t cSoftHdDevice::GetSTC(void)
|
||||
/**
|
||||
** Set trick play speed.
|
||||
**
|
||||
** Every single frame shall then be displayed the given number of
|
||||
** times.
|
||||
**
|
||||
** @param speed trick speed
|
||||
*/
|
||||
void cSoftHdDevice::TrickSpeed(int speed)
|
||||
{
|
||||
dsyslog("[softhddev]%s: %d\n", __FUNCTION__, speed);
|
||||
|
||||
::TrickSpeed(speed);
|
||||
}
|
||||
|
||||
/**
|
||||
** Clears all video and audio data from the device.
|
||||
*/
|
||||
void cSoftHdDevice::Clear(void)
|
||||
{
|
||||
dsyslog("[softhddev]%s:\n", __FUNCTION__);
|
||||
@@ -825,6 +1042,9 @@ void cSoftHdDevice::Clear(void)
|
||||
::Clear();
|
||||
}
|
||||
|
||||
/**
|
||||
** Sets the device into play mode (after a previous trick mode)
|
||||
*/
|
||||
void cSoftHdDevice::Play(void)
|
||||
{
|
||||
dsyslog("[softhddev]%s:\n", __FUNCTION__);
|
||||
@@ -902,11 +1122,9 @@ bool cSoftHdDevice::Flush(int timeout_ms)
|
||||
/**
|
||||
** Sets the video display format to the given one (only useful if this
|
||||
** device has an MPEG decoder).
|
||||
**
|
||||
** @note this function isn't called on the initial channel
|
||||
*/
|
||||
void cSoftHdDevice::SetVideoDisplayFormat(
|
||||
eVideoDisplayFormat video_display_format)
|
||||
void cSoftHdDevice:: SetVideoDisplayFormat(eVideoDisplayFormat
|
||||
video_display_format)
|
||||
{
|
||||
static int last = -1;
|
||||
|
||||
@@ -923,6 +1141,23 @@ void cSoftHdDevice::SetVideoDisplayFormat(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
** Sets the output video format to either 16:9 or 4:3 (only useful
|
||||
** if this device has an MPEG decoder).
|
||||
**
|
||||
** Should call SetVideoDisplayFormat.
|
||||
**
|
||||
** @param video_format16_9 flag true 16:9.
|
||||
*/
|
||||
void cSoftHdDevice::SetVideoFormat(bool video_format16_9)
|
||||
{
|
||||
dsyslog("[softhddev]%s: %d\n", __FUNCTION__, video_format16_9);
|
||||
|
||||
// FIXME: 4:3 / 16:9 video format not supported.
|
||||
|
||||
SetVideoDisplayFormat(eVideoDisplayFormat(Setup.VideoDisplayFormat));
|
||||
}
|
||||
|
||||
/**
|
||||
** Returns the width, height and video_aspect ratio of the currently
|
||||
** displayed video material.
|
||||
@@ -948,6 +1183,10 @@ void cSoftHdDevice::GetOsdSize(int &width, int &height, double &pixel_aspect)
|
||||
|
||||
/**
|
||||
** Play a audio packet.
|
||||
**
|
||||
** @param data exactly one complete PES packet (which is incomplete)
|
||||
** @param length length of PES packet
|
||||
** @param id type of audio data this packet holds
|
||||
*/
|
||||
int cSoftHdDevice::PlayAudio(const uchar * data, int length, uchar id)
|
||||
{
|
||||
@@ -995,6 +1234,9 @@ void cSoftHdDevice::SetVolumeDevice(int volume)
|
||||
|
||||
/**
|
||||
** Play a video packet.
|
||||
**
|
||||
** @param data exactly one complete PES packet (which is incomplete)
|
||||
** @param length length of PES packet
|
||||
*/
|
||||
int cSoftHdDevice::PlayVideo(const uchar * data, int length)
|
||||
{
|
||||
@@ -1003,31 +1245,37 @@ int cSoftHdDevice::PlayVideo(const uchar * data, int length)
|
||||
return::PlayVideo(data, length);
|
||||
}
|
||||
|
||||
#if 0
|
||||
#ifdef USE_TS_VIDEO
|
||||
|
||||
///
|
||||
/// Play a TS video packet.
|
||||
///
|
||||
/// @param data ts data buffer
|
||||
/// @param length ts packet length (188)
|
||||
///
|
||||
int cSoftHdDevice::PlayTsVideo(const uchar * data, int length)
|
||||
{
|
||||
// many code to repeat
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef USE_AUDIO_THREAD // FIXME: testing none threaded
|
||||
#if !defined(USE_AUDIO_THREAD) || !defined(NO_TS_AUDIO)
|
||||
|
||||
///
|
||||
/// Play a TS audio packet.
|
||||
///
|
||||
/// misuse this function as audio poller
|
||||
///
|
||||
/// @param data ts data buffer
|
||||
/// @param length ts packet length
|
||||
/// @param length ts packet length (188)
|
||||
///
|
||||
int cSoftHdDevice::PlayTsAudio(const uchar * data, int length)
|
||||
{
|
||||
#ifndef NO_TS_AUDIO
|
||||
return::PlayTsAudio(data, length);
|
||||
#else
|
||||
AudioPoller();
|
||||
|
||||
return cDevice::PlayTsAudio(data, length);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1103,6 +1351,11 @@ cPluginSoftHdDevice::~cPluginSoftHdDevice(void)
|
||||
::SoftHdDeviceExit();
|
||||
}
|
||||
|
||||
/**
|
||||
** Return plugin version number.
|
||||
**
|
||||
** @returns version number as constant string.
|
||||
*/
|
||||
const char *cPluginSoftHdDevice::Version(void)
|
||||
{
|
||||
return VERSION;
|
||||
@@ -1161,7 +1414,11 @@ bool cPluginSoftHdDevice::Start(void)
|
||||
}
|
||||
}
|
||||
|
||||
::Start();
|
||||
if (!::Start()) {
|
||||
cControl::Launch(new cSoftHdControl);
|
||||
cControl::Attach();
|
||||
SuspendMode = SUSPEND_NORMAL;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -1204,11 +1461,13 @@ cOsdObject *cPluginSoftHdDevice::MainMenuAction(void)
|
||||
{
|
||||
//dsyslog("[softhddev]%s:\n", __FUNCTION__);
|
||||
|
||||
#if 0
|
||||
//MyDevice->StopReplay();
|
||||
if (!cSoftHdControl::Player) { // not already suspended
|
||||
cControl::Launch(new cSoftHdControl);
|
||||
cControl::Attach();
|
||||
Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11);
|
||||
SuspendMode = SUSPEND_NORMAL;
|
||||
if (ShutdownHandler.GetUserInactiveTime()) {
|
||||
dsyslog("[softhddev]%s: set user inactive\n", __FUNCTION__);
|
||||
ShutdownHandler.SetUserInactive();
|
||||
@@ -1216,6 +1475,8 @@ cOsdObject *cPluginSoftHdDevice::MainMenuAction(void)
|
||||
}
|
||||
|
||||
return NULL;
|
||||
#endif
|
||||
return new cSoftHdMenu("SoftHdDevice");
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1235,6 +1496,7 @@ void cPluginSoftHdDevice::MainThreadHook(void)
|
||||
if (ShutdownHandler.IsUserInactive()) {
|
||||
// this is regular called, but guarded against double calls
|
||||
Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11);
|
||||
SuspendMode = SUSPEND_NORMAL;
|
||||
}
|
||||
|
||||
::MainThreadHook();
|
||||
@@ -1264,94 +1526,106 @@ bool cPluginSoftHdDevice::SetupParse(const char *name, const char *value)
|
||||
|
||||
//dsyslog("[softhddev]%s: '%s' = '%s'\n", __FUNCTION__, name, value);
|
||||
|
||||
if (!strcmp(name, "MakePrimary")) {
|
||||
if (!strcasecmp(name, "MakePrimary")) {
|
||||
ConfigMakePrimary = atoi(value);
|
||||
return true;
|
||||
}
|
||||
if (!strcmp(name, "HideMainMenuEntry")) {
|
||||
if (!strcasecmp(name, "HideMainMenuEntry")) {
|
||||
ConfigHideMainMenuEntry = atoi(value);
|
||||
return true;
|
||||
}
|
||||
if (!strcmp(name, "SkipLines")) {
|
||||
if (!strcasecmp(name, "Background")) {
|
||||
VideoSetBackground(ConfigVideoBackground = strtoul(value, NULL, 0));
|
||||
return true;
|
||||
}
|
||||
if (!strcasecmp(name, "SkipLines")) {
|
||||
VideoSetSkipLines(ConfigVideoSkipLines = atoi(value));
|
||||
return true;
|
||||
}
|
||||
if (!strcmp(name, "StudioLevels")) {
|
||||
if (!strcasecmp(name, "StudioLevels")) {
|
||||
VideoSetStudioLevels(ConfigVideoStudioLevels = atoi(value));
|
||||
return true;
|
||||
}
|
||||
if (!strcasecmp(name, "60HzMode")) {
|
||||
VideoSet60HzMode(ConfigVideo60HzMode = atoi(value));
|
||||
return true;
|
||||
}
|
||||
for (i = 0; i < RESOLUTIONS; ++i) {
|
||||
char buf[128];
|
||||
|
||||
snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Scaling");
|
||||
if (!strcmp(name, buf)) {
|
||||
if (!strcasecmp(name, buf)) {
|
||||
ConfigVideoScaling[i] = atoi(value);
|
||||
VideoSetScaling(ConfigVideoScaling);
|
||||
return true;
|
||||
}
|
||||
snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Deinterlace");
|
||||
if (!strcmp(name, buf)) {
|
||||
if (!strcasecmp(name, buf)) {
|
||||
ConfigVideoDeinterlace[i] = atoi(value);
|
||||
VideoSetDeinterlace(ConfigVideoDeinterlace);
|
||||
return true;
|
||||
}
|
||||
snprintf(buf, sizeof(buf), "%s.%s", Resolution[i],
|
||||
"SkipChromaDeinterlace");
|
||||
if (!strcmp(name, buf)) {
|
||||
if (!strcasecmp(name, buf)) {
|
||||
ConfigVideoSkipChromaDeinterlace[i] = atoi(value);
|
||||
VideoSetSkipChromaDeinterlace(ConfigVideoSkipChromaDeinterlace);
|
||||
return true;
|
||||
}
|
||||
snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "InverseTelecine");
|
||||
if (!strcmp(name, buf)) {
|
||||
if (!strcasecmp(name, buf)) {
|
||||
ConfigVideoInverseTelecine[i] = atoi(value);
|
||||
VideoSetInverseTelecine(ConfigVideoInverseTelecine);
|
||||
return true;
|
||||
}
|
||||
snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Denoise");
|
||||
if (!strcmp(name, buf)) {
|
||||
if (!strcasecmp(name, buf)) {
|
||||
ConfigVideoDenoise[i] = atoi(value);
|
||||
VideoSetDenoise(ConfigVideoDenoise);
|
||||
return true;
|
||||
}
|
||||
snprintf(buf, sizeof(buf), "%s.%s", Resolution[i], "Sharpen");
|
||||
if (!strcmp(name, buf)) {
|
||||
if (!strcasecmp(name, buf)) {
|
||||
ConfigVideoSharpen[i] = atoi(value);
|
||||
VideoSetSharpen(ConfigVideoSharpen);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!strcmp(name, "AudioDelay")) {
|
||||
if (!strcasecmp(name, "AudioDelay")) {
|
||||
VideoSetAudioDelay(ConfigVideoAudioDelay = atoi(value));
|
||||
return true;
|
||||
}
|
||||
if (!strcmp(name, "AudioPassthrough")) {
|
||||
if (!strcasecmp(name, "AudioPassthrough")) {
|
||||
CodecSetAudioPassthrough(ConfigAudioPassthrough = atoi(value));
|
||||
return true;
|
||||
}
|
||||
if (!strcasecmp(name, "AudioDownmix")) {
|
||||
CodecSetAudioDownmix(ConfigAudioDownmix = atoi(value));
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!strcmp(name, "AutoCrop.Interval")) {
|
||||
if (!strcasecmp(name, "AutoCrop.Interval")) {
|
||||
VideoSetAutoCrop(ConfigAutoCropInterval =
|
||||
atoi(value), ConfigAutoCropDelay, ConfigAutoCropTolerance);
|
||||
return true;
|
||||
}
|
||||
if (!strcmp(name, "AutoCrop.Delay")) {
|
||||
if (!strcasecmp(name, "AutoCrop.Delay")) {
|
||||
VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay =
|
||||
atoi(value), ConfigAutoCropTolerance);
|
||||
return true;
|
||||
}
|
||||
if (!strcmp(name, "AutoCrop.Tolerance")) {
|
||||
if (!strcasecmp(name, "AutoCrop.Tolerance")) {
|
||||
VideoSetAutoCrop(ConfigAutoCropInterval, ConfigAutoCropDelay,
|
||||
ConfigAutoCropTolerance = atoi(value));
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!strcmp(name, "Suspend.Close")) {
|
||||
if (!strcasecmp(name, "Suspend.Close")) {
|
||||
ConfigSuspendClose = atoi(value);
|
||||
return true;
|
||||
}
|
||||
if (!strcmp(name, "Suspend.X11")) {
|
||||
if (!strcasecmp(name, "Suspend.X11")) {
|
||||
ConfigSuspendX11 = atoi(value);
|
||||
return true;
|
||||
}
|
||||
@@ -1385,6 +1659,9 @@ const char **cPluginSoftHdDevice::SVDRPHelpPages(void)
|
||||
static const char *text[] = {
|
||||
"SUSP\n" " Suspend plugin.\n",
|
||||
"RESU\n" " Resume plugin.\n",
|
||||
"DETA\n" " Detach plugin.\n",
|
||||
"ATTA\n" " Attach plugin.\n",
|
||||
"HOTK key\n" " Execute hotkey.\n",
|
||||
NULL
|
||||
};
|
||||
|
||||
@@ -1393,6 +1670,10 @@ const char **cPluginSoftHdDevice::SVDRPHelpPages(void)
|
||||
|
||||
/**
|
||||
** Handle SVDRP commands.
|
||||
**
|
||||
** @param command SVDRP command
|
||||
** @param option all command arguments
|
||||
** @param reply_code reply code
|
||||
*/
|
||||
cString cPluginSoftHdDevice::SVDRPCommand(const char *command,
|
||||
__attribute__ ((unused)) const char *option,
|
||||
@@ -1403,12 +1684,16 @@ cString cPluginSoftHdDevice::SVDRPCommand(const char *command,
|
||||
return "SoftHdDevice already suspended";
|
||||
}
|
||||
// should be after suspend, but SetPlayMode resumes
|
||||
Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11);
|
||||
SuspendMode = SUSPEND_NORMAL;
|
||||
cControl::Launch(new cSoftHdControl);
|
||||
cControl::Attach();
|
||||
Suspend(ConfigSuspendClose, ConfigSuspendClose, ConfigSuspendX11);
|
||||
return "SoftHdDevice is suspended";
|
||||
}
|
||||
if (!strcasecmp(command, "RESU")) {
|
||||
if (SuspendMode != SUSPEND_NORMAL) {
|
||||
return "can't resume SoftHdDevice";
|
||||
}
|
||||
if (ShutdownHandler.GetUserInactiveTime()) {
|
||||
ShutdownHandler.SetUserInactiveTimeout();
|
||||
}
|
||||
@@ -1416,8 +1701,43 @@ cString cPluginSoftHdDevice::SVDRPCommand(const char *command,
|
||||
cControl::Shutdown(); // not need, if not suspended
|
||||
}
|
||||
Resume();
|
||||
SuspendMode = 0;
|
||||
return "SoftHdDevice is resumed";
|
||||
}
|
||||
if (!strcasecmp(command, "DETA")) {
|
||||
if (cSoftHdControl::Player) { // already suspended
|
||||
if (SuspendMode == SUSPEND_DETACHED) {
|
||||
return "SoftHdDevice already detached";
|
||||
}
|
||||
return "can't suspend SoftHdDevice already suspended";
|
||||
}
|
||||
Suspend(1, 1, 0);
|
||||
SuspendMode = SUSPEND_DETACHED;
|
||||
cControl::Launch(new cSoftHdControl);
|
||||
cControl::Attach();
|
||||
return "SoftHdDevice is detached";
|
||||
}
|
||||
if (!strcasecmp(command, "ATTA")) {
|
||||
if (SuspendMode != SUSPEND_DETACHED) {
|
||||
return "can't attach SoftHdDevice not detached";
|
||||
}
|
||||
if (ShutdownHandler.GetUserInactiveTime()) {
|
||||
ShutdownHandler.SetUserInactiveTimeout();
|
||||
}
|
||||
if (cSoftHdControl::Player) { // suspended
|
||||
cControl::Shutdown(); // not need, if not suspended
|
||||
}
|
||||
Resume();
|
||||
SuspendMode = 0;
|
||||
return "SoftHdDevice is attached";
|
||||
}
|
||||
if (!strcasecmp(command, "HOTK")) {
|
||||
int hotk;
|
||||
|
||||
hotk = strtol(option, NULL, 0);
|
||||
HandleHotkey(hotk);
|
||||
return "hot-key executed";
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
280
video.c
280
video.c
@@ -41,10 +41,13 @@
|
||||
#define USE_GRAB ///< experimental grab code
|
||||
#define noUSE_GLX ///< outdated GLX code
|
||||
#define noUSE_DOUBLEBUFFER ///< use GLX double buffers
|
||||
|
||||
//#define USE_VAAPI ///< enable vaapi support
|
||||
//#define USE_VDPAU ///< enable vdpau support
|
||||
#define noUSE_BITMAP ///< use vdpau bitmap surface
|
||||
//#define AV_INFO ///< log a/v sync informations
|
||||
#ifndef AV_INFO_TIME
|
||||
#define AV_INFO_TIME (50 * 60) ///< a/v info every minute
|
||||
#endif
|
||||
|
||||
#define USE_VIDEO_THREAD ///< run decoder in an own thread
|
||||
|
||||
@@ -70,6 +73,7 @@
|
||||
#endif
|
||||
#include <pthread.h>
|
||||
#include <time.h>
|
||||
#include <signal.h>
|
||||
#ifndef HAVE_PTHREAD_NAME
|
||||
/// only available with newer glibc
|
||||
#define pthread_setname_np(thread, name)
|
||||
@@ -127,6 +131,11 @@ typedef enum
|
||||
#ifdef USE_GLX
|
||||
#include <va/va_glx.h>
|
||||
#endif
|
||||
#ifndef VA_SURFACE_ATTRIB_SETTABLE
|
||||
/// make source compatible with old libva
|
||||
#define vaCreateSurfaces(d, f, w, h, s, ns, a, na) \
|
||||
vaCreateSurfaces(d, w, h, f, ns, s)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef USE_VDPAU
|
||||
@@ -225,6 +234,7 @@ typedef struct _video_module_
|
||||
void (*const RenderFrame) (VideoHwDecoder *, const AVCodecContext *,
|
||||
const AVFrame *);
|
||||
uint8_t *(*const GrabOutput)(int *, int *, int *);
|
||||
void (*const SetBackground) (uint32_t);
|
||||
void (*const SetVideoMode) (void);
|
||||
void (*const ResetAutoCrop) (void);
|
||||
|
||||
@@ -262,6 +272,8 @@ typedef struct _video_module_
|
||||
// Variables
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
char VideoIgnoreRepeatPict; ///< disable repeat pict warning
|
||||
|
||||
static Display *XlibDisplay; ///< Xlib X11 display
|
||||
static xcb_connection_t *Connection; ///< xcb connection
|
||||
static xcb_colormap_t VideoColormap; ///< video colormap
|
||||
@@ -282,6 +294,7 @@ static char VideoSurfaceModesChanged; ///< flag surface modes changed
|
||||
/// flag use transparent OSD.
|
||||
static const char VideoTransparentOsd = 1;
|
||||
|
||||
static uint32_t VideoBackground; ///< video background color
|
||||
static int VideoSkipLines; ///< skip video lines top/bottom
|
||||
static char VideoStudioLevels; ///< flag use studio levels
|
||||
|
||||
@@ -318,7 +331,7 @@ int VideoAudioDelay;
|
||||
/// Default zoom mode
|
||||
static VideoZoomModes Video4to3ZoomMode;
|
||||
|
||||
//static char VideoSoftStartSync; ///< soft start sync audio/video
|
||||
static char VideoSoftStartSync = 1; ///< soft start sync audio/video
|
||||
|
||||
static char Video60HzMode; ///< handle 60hz displays
|
||||
|
||||
@@ -327,7 +340,7 @@ static xcb_atom_t NetWmState; ///< wm-state message atom
|
||||
static xcb_atom_t NetWmStateFullscreen; ///< fullscreen wm-state message atom
|
||||
|
||||
extern uint32_t VideoSwitch; ///< ticks for channel switch
|
||||
extern atomic_t VideoPacketsFilled; ///< how many of the buffer is used
|
||||
extern void AudioVideoReady(void); ///< tell audio video is ready
|
||||
|
||||
#ifdef USE_VIDEO_THREAD
|
||||
|
||||
@@ -354,6 +367,7 @@ static int64_t VideoDeltaPTS; ///< FIXME: fix pts
|
||||
|
||||
static void VideoThreadLock(void); ///< lock video thread
|
||||
static void VideoThreadUnlock(void); ///< unlock video thread
|
||||
static void VideoThreadExit(void); ///< exit/kill video thread
|
||||
|
||||
#if defined(DEBUG) || defined(AV_INFO)
|
||||
///
|
||||
@@ -368,6 +382,9 @@ static const char *VideoTimeStampString(int64_t ts)
|
||||
int ss;
|
||||
int uu;
|
||||
|
||||
if (ts == (int64_t) AV_NOPTS_VALUE) {
|
||||
return "--:--:--.---";
|
||||
}
|
||||
idx ^= 1; // support two static buffers
|
||||
ts = ts / 90;
|
||||
uu = ts % 1000;
|
||||
@@ -394,19 +411,20 @@ static void VideoSetPts(int64_t * pts_p, int interlaced, const AVFrame * frame)
|
||||
int64_t pts;
|
||||
|
||||
// update video clock
|
||||
if ((uint64_t) * pts_p != AV_NOPTS_VALUE) {
|
||||
if (*pts_p != (int64_t) AV_NOPTS_VALUE) {
|
||||
*pts_p += interlaced ? 40 * 90 : 20 * 90;
|
||||
}
|
||||
//av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
|
||||
//pts = frame->best_effort_timestamp;
|
||||
pts = frame->pkt_pts;
|
||||
if ((uint64_t) pts == AV_NOPTS_VALUE || !pts) {
|
||||
if (pts == (int64_t) AV_NOPTS_VALUE || !pts) {
|
||||
// libav: 0.8pre didn't set pts
|
||||
pts = frame->pkt_dts;
|
||||
}
|
||||
// libav: sets only pkt_dts which can be 0
|
||||
if (pts && (uint64_t) pts != AV_NOPTS_VALUE) {
|
||||
if (pts && pts != (int64_t) AV_NOPTS_VALUE) {
|
||||
// build a monotonic pts
|
||||
if ((uint64_t) * pts_p != AV_NOPTS_VALUE) {
|
||||
if (*pts_p != (int64_t) AV_NOPTS_VALUE) {
|
||||
int64_t delta;
|
||||
|
||||
delta = pts - *pts_p;
|
||||
@@ -1329,6 +1347,7 @@ struct _vaapi_decoder_
|
||||
struct timespec FrameTime; ///< time of last display
|
||||
int64_t PTS; ///< video PTS clock
|
||||
|
||||
int StartCounter; ///< number of start frames
|
||||
int FramesDuped; ///< number of frames duplicated
|
||||
int FramesMissed; ///< number of frames missed
|
||||
int FramesDropped; ///< number of frames dropped
|
||||
@@ -1345,6 +1364,9 @@ static void VaapiBlackSurface(VaapiDecoder *);
|
||||
/// forward destroy deinterlace images
|
||||
static void VaapiDestroyDeinterlaceImages(VaapiDecoder *);
|
||||
|
||||
/// forward definition release surface
|
||||
static void VaapiReleaseSurface(VaapiDecoder *, VASurfaceID);
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// VA-API Functions
|
||||
//----------------------------------------------------------------------------
|
||||
@@ -1457,9 +1479,9 @@ static void VaapiCreateSurfaces(VaapiDecoder * decoder, int width, int height)
|
||||
|
||||
decoder->SurfaceFreeN = decoder->SurfacesNeeded;
|
||||
// VA_RT_FORMAT_YUV420 VA_RT_FORMAT_YUV422 VA_RT_FORMAT_YUV444
|
||||
if (vaCreateSurfaces(decoder->VaDisplay, width, height,
|
||||
VA_RT_FORMAT_YUV420, decoder->SurfaceFreeN,
|
||||
decoder->SurfacesFree) != VA_STATUS_SUCCESS) {
|
||||
if (vaCreateSurfaces(decoder->VaDisplay, VA_RT_FORMAT_YUV420, width,
|
||||
height, decoder->SurfacesFree, decoder->SurfaceFreeN, NULL,
|
||||
0) != VA_STATUS_SUCCESS) {
|
||||
Fatal(_("video/vaapi: can't create %d surfaces\n"),
|
||||
decoder->SurfaceFreeN);
|
||||
// FIXME: write error handler / fallback
|
||||
@@ -1498,9 +1520,6 @@ static void VaapiDestroySurfaces(VaapiDecoder * decoder)
|
||||
// FIXME surfaces used for output
|
||||
}
|
||||
|
||||
/// forward definition release surface
|
||||
static void VaapiReleaseSurface(VaapiDecoder *, VASurfaceID);
|
||||
|
||||
///
|
||||
/// Get a free surface.
|
||||
///
|
||||
@@ -1800,10 +1819,13 @@ static void VaapiCleanup(VaapiDecoder * decoder)
|
||||
if (decoder->DeintImages[0].image_id != VA_INVALID_ID) {
|
||||
VaapiDestroyDeinterlaceImages(decoder);
|
||||
}
|
||||
decoder->SurfaceRead = 0;
|
||||
decoder->SurfaceWrite = 0;
|
||||
|
||||
decoder->SurfaceField = 1;
|
||||
|
||||
//decoder->FrameCounter = 0;
|
||||
decoder->StartCounter = 0;
|
||||
decoder->PTS = AV_NOPTS_VALUE;
|
||||
VideoDeltaPTS = 0;
|
||||
}
|
||||
@@ -1923,8 +1945,8 @@ static void Vaapi1080i(void)
|
||||
Error(_("codec: can't create config"));
|
||||
return;
|
||||
}
|
||||
if (vaCreateSurfaces(VaDisplay, 1920, 1080, VA_RT_FORMAT_YUV420, 32,
|
||||
surfaces) != VA_STATUS_SUCCESS) {
|
||||
if (vaCreateSurfaces(VaDisplay, VA_RT_FORMAT_YUV420, 1920, 1080, surfaces,
|
||||
32, NULL, 0) != VA_STATUS_SUCCESS) {
|
||||
Error(_("video/vaapi: can't create surfaces\n"));
|
||||
return;
|
||||
}
|
||||
@@ -2058,6 +2080,17 @@ static int VaapiInit(const char *display_name)
|
||||
attr.value ? _("direct mapped") : _("copied"));
|
||||
// FIXME: handle the cases: new liba: Don't use it.
|
||||
|
||||
attr.type = VADisplayAttribBackgroundColor;
|
||||
attr.flags = VA_DISPLAY_ATTRIB_SETTABLE;
|
||||
if (vaGetDisplayAttributes(VaDisplay, &attr, 1) != VA_STATUS_SUCCESS) {
|
||||
Error(_("video/vaapi: Can't get background-color attribute\n"));
|
||||
attr.value = 1;
|
||||
}
|
||||
Info(_("video/vaapi: background-color is %s\n"),
|
||||
attr.value ? _("supported") : _("unsupported"));
|
||||
|
||||
// FIXME: VaapiSetBackground(VideoBackground);
|
||||
|
||||
#if 0
|
||||
//
|
||||
// check the chroma format
|
||||
@@ -2970,10 +3003,10 @@ static void VaapiQueueSurface(VaapiDecoder * decoder, VASurfaceID surface,
|
||||
if ((old = decoder->SurfacesRb[decoder->SurfaceWrite])
|
||||
!= VA_INVALID_ID) {
|
||||
|
||||
#if 0
|
||||
if (vaSyncSurface(decoder->VaDisplay, old) != VA_STATUS_SUCCESS) {
|
||||
Error(_("video/vaapi: vaSyncSurface failed\n"));
|
||||
}
|
||||
#if 0
|
||||
VASurfaceStatus status;
|
||||
|
||||
if (vaQuerySurfaceStatus(decoder->VaDisplay, old, &status)
|
||||
@@ -3053,9 +3086,9 @@ static void VaapiBlackSurface(VaapiDecoder * decoder)
|
||||
}
|
||||
|
||||
if (decoder->BlackSurface == VA_INVALID_ID) {
|
||||
if (vaCreateSurfaces(decoder->VaDisplay, VideoWindowWidth,
|
||||
VideoWindowHeight, VA_RT_FORMAT_YUV420, 1,
|
||||
&decoder->BlackSurface) != VA_STATUS_SUCCESS) {
|
||||
if (vaCreateSurfaces(decoder->VaDisplay, VA_RT_FORMAT_YUV420,
|
||||
VideoWindowWidth, VideoWindowHeight, &decoder->BlackSurface, 1,
|
||||
NULL, 0) != VA_STATUS_SUCCESS) {
|
||||
Error(_("video/vaapi: can't create a surface\n"));
|
||||
return;
|
||||
}
|
||||
@@ -4222,7 +4255,7 @@ static void VaapiAdvanceFrame(void)
|
||||
Warning(_
|
||||
("video: display buffer empty, duping frame (%d/%d) %d\n"),
|
||||
decoder->FramesDuped, decoder->FrameCounter,
|
||||
atomic_read(&VideoPacketsFilled));
|
||||
VideoGetBuffers());
|
||||
}
|
||||
last_warned_frame = decoder->FrameCounter;
|
||||
if (!(decoder->FramesDisplayed % 300)) {
|
||||
@@ -4355,22 +4388,30 @@ static void VaapiSyncDisplayFrame(VaapiDecoder * decoder)
|
||||
filled = atomic_read(&decoder->SurfacesFilled);
|
||||
// FIXME: audio not known assume 333ms delay
|
||||
|
||||
decoder->StartCounter++;
|
||||
if (!VideoSoftStartSync && decoder->StartCounter < 60
|
||||
&& (audio_clock == (int64_t) AV_NOPTS_VALUE
|
||||
|| video_clock > audio_clock + VideoAudioDelay + 120 * 90)) {
|
||||
Debug(3, "video: initial slow down %d\n", decoder->StartCounter);
|
||||
decoder->DupNextFrame = 2;
|
||||
}
|
||||
|
||||
if (decoder->DupNextFrame) {
|
||||
++decoder->FramesDuped;
|
||||
decoder->DupNextFrame--;
|
||||
} else if ((uint64_t) audio_clock != AV_NOPTS_VALUE
|
||||
&& (uint64_t) video_clock != AV_NOPTS_VALUE) {
|
||||
++decoder->FramesDuped;
|
||||
} else if (audio_clock != (int64_t) AV_NOPTS_VALUE
|
||||
&& video_clock != (int64_t) AV_NOPTS_VALUE) {
|
||||
// both clocks are known
|
||||
|
||||
if (abs(video_clock - audio_clock) > 5000 * 90) {
|
||||
Debug(3, "video: pts difference too big\n");
|
||||
} else if (video_clock > audio_clock + VideoAudioDelay + 80 * 90) {
|
||||
} else if (video_clock > audio_clock + VideoAudioDelay + 100 * 90) {
|
||||
Debug(3, "video: slow down video\n");
|
||||
decoder->DupNextFrame += 2;
|
||||
} else if (video_clock > audio_clock + VideoAudioDelay + 30 * 90) {
|
||||
} else if (video_clock > audio_clock + VideoAudioDelay + 45 * 90) {
|
||||
Debug(3, "video: slow down video\n");
|
||||
decoder->DupNextFrame++;
|
||||
} else if (audio_clock + VideoAudioDelay > video_clock + 40 * 90
|
||||
} else if (audio_clock + VideoAudioDelay > video_clock + 15 * 90
|
||||
&& filled > 1) {
|
||||
Debug(3, "video: speed up video\n");
|
||||
decoder->DropNextFrame = 1;
|
||||
@@ -4379,11 +4420,12 @@ static void VaapiSyncDisplayFrame(VaapiDecoder * decoder)
|
||||
#if defined(DEBUG) || defined(AV_INFO)
|
||||
// debug audio/video sync
|
||||
if (decoder->DupNextFrame || decoder->DropNextFrame
|
||||
|| !(decoder->FramesDisplayed % (50 * 10))) {
|
||||
|| !(decoder->FramesDisplayed % AV_INFO_TIME)) {
|
||||
Info("video: %s%+5" PRId64 " %4" PRId64 " %3d/\\ms %3d v-buf\n",
|
||||
VideoTimeStampString(video_clock),
|
||||
(video_clock - audio_clock) / 90, AudioGetDelay() / 90,
|
||||
(int)VideoDeltaPTS / 90, atomic_read(&VideoPacketsFilled));
|
||||
abs((video_clock - audio_clock) / 90) <
|
||||
9999 ? ((video_clock - audio_clock) / 90) : 88888,
|
||||
AudioGetDelay() / 90, (int)VideoDeltaPTS / 90, VideoGetBuffers());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -4459,7 +4501,7 @@ static void VaapiSyncRenderFrame(VaapiDecoder * decoder,
|
||||
static int64_t VaapiGetClock(const VaapiDecoder * decoder)
|
||||
{
|
||||
// pts is the timestamp of the latest decoded frame
|
||||
if (!decoder || (uint64_t) decoder->PTS == AV_NOPTS_VALUE) {
|
||||
if (!decoder || decoder->PTS == (int64_t) AV_NOPTS_VALUE) {
|
||||
return AV_NOPTS_VALUE;
|
||||
}
|
||||
// subtract buffered decoded frames
|
||||
@@ -4472,6 +4514,16 @@ static int64_t VaapiGetClock(const VaapiDecoder * decoder)
|
||||
2);
|
||||
}
|
||||
|
||||
///
|
||||
/// Set VA-API background color.
|
||||
///
|
||||
/// @param rgba 32 bit RGBA color.
|
||||
///
|
||||
static void VaapiSetBackground( __attribute__ ((unused)) uint32_t rgba)
|
||||
{
|
||||
Error(_("video/vaapi: FIXME: SetBackground not supported\n"));
|
||||
}
|
||||
|
||||
///
|
||||
/// Set VA-API video mode.
|
||||
///
|
||||
@@ -4769,6 +4821,7 @@ static const VideoModule VaapiModule = {
|
||||
.RenderFrame = (void (*const) (VideoHwDecoder *,
|
||||
const AVCodecContext *, const AVFrame *))VaapiSyncRenderFrame,
|
||||
.GrabOutput = NULL,
|
||||
.SetBackground = VaapiSetBackground,
|
||||
.SetVideoMode = VaapiSetVideoMode,
|
||||
.ResetAutoCrop = VaapiResetAutoCrop,
|
||||
.Thread = VaapiDisplayHandlerThread,
|
||||
@@ -4850,6 +4903,7 @@ typedef struct _vdpau_decoder_
|
||||
struct timespec FrameTime; ///< time of last display
|
||||
int64_t PTS; ///< video PTS clock
|
||||
|
||||
int StartCounter; ///< number of start frames
|
||||
int FramesDuped; ///< number of frames duplicated
|
||||
int FramesMissed; ///< number of frames missed
|
||||
int FramesDropped; ///< number of frames dropped
|
||||
@@ -4868,8 +4922,9 @@ static VdpGetProcAddress *VdpauGetProcAddress; ///< entry point to use
|
||||
/// presentation queue target
|
||||
static VdpPresentationQueueTarget VdpauQueueTarget;
|
||||
static VdpPresentationQueue VdpauQueue; ///< presentation queue
|
||||
static VdpColor VdpauBackgroundColor[1]; ///< queue background color
|
||||
static VdpColor VdpauQueueBackgroundColor[1]; ///< queue background color
|
||||
|
||||
static int VdpauBackground; ///< background supported
|
||||
static int VdpauHqScalingMax; ///< highest supported scaling level
|
||||
static int VdpauTemporal; ///< temporal deinterlacer supported
|
||||
static int VdpauTemporalSpatial; ///< temporal spatial deint. supported
|
||||
@@ -5139,9 +5194,10 @@ static void VdpauMixerSetup(VdpauDecoder * decoder)
|
||||
VdpVideoMixerFeature features[15];
|
||||
VdpBool enables[15];
|
||||
int feature_n;
|
||||
VdpVideoMixerAttribute attributes[4];
|
||||
void const *attribute_value_ptrs[4];
|
||||
VdpVideoMixerAttribute attributes[5];
|
||||
void const *attribute_value_ptrs[5];
|
||||
int attribute_n;
|
||||
VdpColor background_color[1];
|
||||
uint8_t skip_chroma_value;
|
||||
float noise_reduction_level;
|
||||
float sharpness_level;
|
||||
@@ -5217,7 +5273,20 @@ static void VdpauMixerSetup(VdpauDecoder * decoder)
|
||||
VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MIN_LUMA
|
||||
VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MAX_LUMA
|
||||
*/
|
||||
|
||||
attribute_n = 0;
|
||||
// none video-area background color
|
||||
if (VdpauBackground) {
|
||||
background_color->red = (VideoBackground >> 24) / 255.0;
|
||||
background_color->green = ((VideoBackground >> 16) & 0xFF) / 255.0;
|
||||
background_color->blue = ((VideoBackground >> 8) & 0xFF) / 255.0;
|
||||
background_color->alpha = (VideoBackground & 0xFF) / 255.0;
|
||||
attributes[attribute_n] = VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR;
|
||||
attribute_value_ptrs[attribute_n++] = background_color;
|
||||
Debug(3, "video/vdpau: background color %f/%f/%f/%f\n",
|
||||
background_color->red, background_color->green,
|
||||
background_color->blue, background_color->alpha);
|
||||
}
|
||||
if (VdpauSkipChroma) {
|
||||
skip_chroma_value = VideoSkipChromaDeinterlace[decoder->Resolution];
|
||||
attributes[attribute_n]
|
||||
@@ -5511,6 +5580,8 @@ static void VdpauCleanup(VdpauDecoder * decoder)
|
||||
|
||||
decoder->SurfaceField = 0;
|
||||
|
||||
//decoder->FrameCounter = 0;
|
||||
decoder->StartCounter = 0;
|
||||
decoder->PTS = AV_NOPTS_VALUE;
|
||||
VideoDeltaPTS = 0;
|
||||
}
|
||||
@@ -5588,11 +5659,12 @@ static void VdpauInitOutputQueue(void)
|
||||
return;
|
||||
}
|
||||
|
||||
VdpauBackgroundColor->red = 0.01;
|
||||
VdpauBackgroundColor->green = 0.02;
|
||||
VdpauBackgroundColor->blue = 0.03;
|
||||
VdpauBackgroundColor->alpha = 1.00;
|
||||
VdpauPresentationQueueSetBackgroundColor(VdpauQueue, VdpauBackgroundColor);
|
||||
VdpauQueueBackgroundColor->red = 0.01;
|
||||
VdpauQueueBackgroundColor->green = 0.02;
|
||||
VdpauQueueBackgroundColor->blue = 0.03;
|
||||
VdpauQueueBackgroundColor->alpha = 1.00;
|
||||
VdpauPresentationQueueSetBackgroundColor(VdpauQueue,
|
||||
VdpauQueueBackgroundColor);
|
||||
|
||||
//
|
||||
// Create display output surfaces
|
||||
@@ -5888,6 +5960,16 @@ static int VdpauInit(const char *display_name)
|
||||
//
|
||||
// Cache some features
|
||||
//
|
||||
status =
|
||||
VdpauVideoMixerQueryFeatureSupport(VdpauDevice,
|
||||
VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR, &flag);
|
||||
if (status != VDP_STATUS_OK) {
|
||||
Error(_("video/vdpau: can't query feature '%s': %s\n"),
|
||||
"background-color", VdpauGetErrorString(status));
|
||||
} else {
|
||||
VdpauBackground = flag;
|
||||
}
|
||||
|
||||
status =
|
||||
VdpauVideoMixerQueryFeatureSupport(VdpauDevice,
|
||||
VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL, &flag);
|
||||
@@ -5948,8 +6030,6 @@ static int VdpauInit(const char *display_name)
|
||||
VdpauSkipChroma = flag;
|
||||
}
|
||||
|
||||
// VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR
|
||||
|
||||
if (VdpauHqScalingMax) {
|
||||
Info(_("video/vdpau: highest supported high quality scaling %d\n"),
|
||||
VdpauHqScalingMax -
|
||||
@@ -7231,7 +7311,7 @@ static void VdpauAdvanceFrame(void)
|
||||
Warning(_
|
||||
("video: display buffer empty, duping frame (%d/%d) %d\n"),
|
||||
decoder->FramesDuped, decoder->FrameCounter,
|
||||
atomic_read(&VideoPacketsFilled));
|
||||
VideoGetBuffers());
|
||||
}
|
||||
last_warned_frame = decoder->FrameCounter;
|
||||
if (!(decoder->FramesDisplayed % 300)) {
|
||||
@@ -7365,21 +7445,30 @@ static void VdpauSyncDisplayFrame(VdpauDecoder * decoder)
|
||||
filled = atomic_read(&decoder->SurfacesFilled);
|
||||
// FIXME: audio not known assume 333ms delay
|
||||
|
||||
decoder->StartCounter++;
|
||||
if (!VideoSoftStartSync && decoder->StartCounter < 60
|
||||
&& (audio_clock == (int64_t) AV_NOPTS_VALUE
|
||||
|| video_clock > audio_clock + VideoAudioDelay + 120 * 90)) {
|
||||
Debug(3, "video: initial slow down %d\n", decoder->StartCounter);
|
||||
decoder->DupNextFrame = 2;
|
||||
}
|
||||
|
||||
if (decoder->DupNextFrame) {
|
||||
decoder->DupNextFrame--;
|
||||
} else if ((uint64_t) audio_clock != AV_NOPTS_VALUE
|
||||
&& (uint64_t) video_clock != AV_NOPTS_VALUE) {
|
||||
++decoder->FramesDuped;
|
||||
} else if (audio_clock != (int64_t) AV_NOPTS_VALUE
|
||||
&& video_clock != (int64_t) AV_NOPTS_VALUE) {
|
||||
// both clocks are known
|
||||
|
||||
if (abs(video_clock - audio_clock) > 5000 * 90) {
|
||||
Debug(3, "video: pts difference too big\n");
|
||||
} else if (video_clock > audio_clock + VideoAudioDelay + 80 * 90) {
|
||||
} else if (video_clock > audio_clock + VideoAudioDelay + 100 * 90) {
|
||||
Debug(3, "video: slow down video\n");
|
||||
decoder->DupNextFrame += 2;
|
||||
} else if (video_clock > audio_clock + VideoAudioDelay + 30 * 90) {
|
||||
} else if (video_clock > audio_clock + VideoAudioDelay + 45 * 90) {
|
||||
Debug(3, "video: slow down video\n");
|
||||
decoder->DupNextFrame++;
|
||||
} else if (audio_clock + VideoAudioDelay > video_clock + 40 * 90
|
||||
} else if (audio_clock + VideoAudioDelay > video_clock + 15 * 90
|
||||
&& filled > 1 + 2 * decoder->Interlaced) {
|
||||
Debug(3, "video: speed up video\n");
|
||||
decoder->DropNextFrame = 1;
|
||||
@@ -7388,11 +7477,12 @@ static void VdpauSyncDisplayFrame(VdpauDecoder * decoder)
|
||||
#if defined(DEBUG) || defined(AV_INFO)
|
||||
// debug audio/video sync
|
||||
if (decoder->DupNextFrame || decoder->DropNextFrame
|
||||
|| !(decoder->FramesDisplayed % (50 * 10))) {
|
||||
|| !(decoder->FramesDisplayed % AV_INFO_TIME)) {
|
||||
Info("video: %s%+5" PRId64 " %4" PRId64 " %3d/\\ms %3d v-buf\n",
|
||||
VideoTimeStampString(video_clock),
|
||||
(video_clock - audio_clock) / 90, AudioGetDelay() / 90,
|
||||
(int)VideoDeltaPTS / 90, atomic_read(&VideoPacketsFilled));
|
||||
abs((video_clock - audio_clock) / 90) <
|
||||
9999 ? ((video_clock - audio_clock) / 90) : 88888,
|
||||
AudioGetDelay() / 90, (int)VideoDeltaPTS / 90, VideoGetBuffers());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -7409,14 +7499,9 @@ static void VdpauSyncRenderFrame(VdpauDecoder * decoder,
|
||||
{
|
||||
VideoSetPts(&decoder->PTS, decoder->Interlaced, frame);
|
||||
|
||||
if (VdpauPreemption) { // display preempted
|
||||
return;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
if (!atomic_read(&decoder->SurfacesFilled)) {
|
||||
Debug(3, "video: new stream frame %d\n", GetMsTicks() - VideoSwitch);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (decoder->DropNextFrame) { // drop frame requested
|
||||
++decoder->FramesDropped;
|
||||
@@ -7428,6 +7513,9 @@ static void VdpauSyncRenderFrame(VdpauDecoder * decoder,
|
||||
decoder->DropNextFrame--;
|
||||
return;
|
||||
}
|
||||
if (VdpauPreemption) { // display preempted
|
||||
return;
|
||||
}
|
||||
// if video output buffer is full, wait and display surface.
|
||||
// loop for interlace
|
||||
while (atomic_read(&decoder->SurfacesFilled) >= VIDEO_SURFACES_MAX) {
|
||||
@@ -7474,7 +7562,7 @@ static void VdpauSyncRenderFrame(VdpauDecoder * decoder,
|
||||
static int64_t VdpauGetClock(const VdpauDecoder * decoder)
|
||||
{
|
||||
// pts is the timestamp of the latest decoded frame
|
||||
if (!decoder || (uint64_t) decoder->PTS == AV_NOPTS_VALUE) {
|
||||
if (!decoder || decoder->PTS == (int64_t) AV_NOPTS_VALUE) {
|
||||
return AV_NOPTS_VALUE;
|
||||
}
|
||||
// subtract buffered decoded frames
|
||||
@@ -7536,7 +7624,16 @@ static int VdpauPreemptionRecover(void)
|
||||
}
|
||||
|
||||
///
|
||||
/// Set VA-API video mode.
|
||||
/// Set VDPAU background color.
|
||||
///
|
||||
/// @param rgba 32 bit RGBA color.
|
||||
///
|
||||
static void VdpauSetBackground( __attribute__ ((unused)) uint32_t rgba)
|
||||
{
|
||||
}
|
||||
|
||||
///
|
||||
/// Set VDPAU video mode.
|
||||
///
|
||||
static void VdpauSetVideoMode(void)
|
||||
{
|
||||
@@ -7876,6 +7973,7 @@ static const VideoModule VdpauModule = {
|
||||
.RenderFrame = (void (*const) (VideoHwDecoder *,
|
||||
const AVCodecContext *, const AVFrame *))VdpauSyncRenderFrame,
|
||||
.GrabOutput = VdpauGrabOutputSurface,
|
||||
.SetBackground = VdpauSetBackground,
|
||||
.SetVideoMode = VdpauSetVideoMode,
|
||||
.ResetAutoCrop = VdpauResetAutoCrop,
|
||||
.Thread = VdpauDisplayHandlerThread,
|
||||
@@ -8194,6 +8292,37 @@ static void VideoDisplayFrame(void)
|
||||
/// C callback feed key press
|
||||
extern void FeedKeyPress(const char *, const char *, int, int);
|
||||
|
||||
///
|
||||
/// Handle XLib I/O Errors.
|
||||
///
|
||||
/// @param display display with i/o error
|
||||
///
|
||||
static int VideoIOErrorHandler( __attribute__ ((unused)) Display * display)
|
||||
{
|
||||
|
||||
Error(_("video: fatal i/o error\n"));
|
||||
// should be called from VideoThread
|
||||
if (VideoThread && VideoThread == pthread_self()) {
|
||||
Debug(3, "video: called from video thread\n");
|
||||
VideoUsedModule = NULL; // FIXME: NoopModule;
|
||||
XlibDisplay = NULL;
|
||||
VideoWindow = XCB_NONE;
|
||||
#ifdef USE_VIDEO_THREAD
|
||||
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
|
||||
pthread_cond_destroy(&VideoWakeupCond);
|
||||
pthread_mutex_destroy(&VideoLockMutex);
|
||||
pthread_mutex_destroy(&VideoMutex);
|
||||
VideoThread = 0;
|
||||
pthread_exit("video thread exit");
|
||||
#endif
|
||||
}
|
||||
do {
|
||||
sleep(1000);
|
||||
} while (1); // let other threads running
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
///
|
||||
/// Handle X11 events.
|
||||
///
|
||||
@@ -8503,6 +8632,7 @@ void VideoReleaseSurface(VideoHwDecoder * decoder, unsigned surface)
|
||||
enum PixelFormat Video_get_format(VideoHwDecoder * decoder,
|
||||
AVCodecContext * video_ctx, const enum PixelFormat *fmt)
|
||||
{
|
||||
AudioVideoReady();
|
||||
if (VideoUsedModule) {
|
||||
return VideoUsedModule->get_format(decoder, video_ctx, fmt);
|
||||
}
|
||||
@@ -8519,8 +8649,9 @@ enum PixelFormat Video_get_format(VideoHwDecoder * decoder,
|
||||
void VideoRenderFrame(VideoHwDecoder * decoder,
|
||||
const AVCodecContext * video_ctx, const AVFrame * frame)
|
||||
{
|
||||
if (frame->repeat_pict) {
|
||||
Warning(_("video: repeated pict found, but not handled\n"));
|
||||
if (frame->repeat_pict && !VideoIgnoreRepeatPict) {
|
||||
Warning(_("video: repeated pict %d found, but not handled\n"),
|
||||
frame->repeat_pict);
|
||||
}
|
||||
if (VideoUsedModule) {
|
||||
VideoUsedModule->RenderFrame(decoder, video_ctx, frame);
|
||||
@@ -8596,7 +8727,6 @@ void VideoDrawRenderState(VideoHwDecoder * hw_decoder,
|
||||
return;
|
||||
}
|
||||
Error(_("video/vdpau: draw render state, without vdpau enabled\n"));
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -8898,6 +9028,17 @@ int VideoSetGeometry(const char *geometry)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Set 60hz display mode.
|
||||
///
|
||||
/// Pull up 50 Hz video for 60 Hz display.
|
||||
///
|
||||
/// @param onoff enable / disable the 60 Hz mode.
|
||||
///
|
||||
void VideoSet60HzMode(int onoff)
|
||||
{
|
||||
Video60HzMode = onoff;
|
||||
}
|
||||
|
||||
///
|
||||
/// Set video output position.
|
||||
///
|
||||
@@ -9018,6 +9159,10 @@ void VideoSetFullscreen(int onoff)
|
||||
{
|
||||
xcb_client_message_event_t event;
|
||||
|
||||
if (!XlibDisplay) { // needs running connection
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.response_type = XCB_CLIENT_MESSAGE;
|
||||
event.format = 32;
|
||||
@@ -9134,6 +9279,19 @@ void VideoSetStudioLevels(int onoff)
|
||||
VideoStudioLevels = onoff;
|
||||
}
|
||||
|
||||
///
|
||||
/// Set background color.
|
||||
///
|
||||
/// @param rgba 32 bit RGBA color.
|
||||
///
|
||||
void VideoSetBackground(uint32_t rgba)
|
||||
{
|
||||
VideoBackground = rgba; // save for later start
|
||||
if (VideoUsedModule) {
|
||||
VideoUsedModule->SetBackground(rgba);
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Set audio delay.
|
||||
///
|
||||
@@ -9198,6 +9356,9 @@ void VideoInit(const char *display_name)
|
||||
return;
|
||||
}
|
||||
// XInitThreads();
|
||||
// Register error handler
|
||||
XSetIOErrorHandler(VideoIOErrorHandler);
|
||||
|
||||
// Convert XLIB display to XCB connection
|
||||
if (!(Connection = XGetXCBConnection(XlibDisplay))) {
|
||||
Error(_("video: Can't convert XLIB display to XCB connection\n"));
|
||||
@@ -9318,6 +9479,7 @@ void VideoExit(void)
|
||||
if (VideoUsedModule) {
|
||||
VideoUsedModule->Exit();
|
||||
}
|
||||
VideoUsedModule = NULL; // FIXME: NoopModule;
|
||||
#ifdef USE_GLX
|
||||
if (GlxEnabled) {
|
||||
GlxExit();
|
||||
|
||||
13
video.h
13
video.h
@@ -30,6 +30,12 @@
|
||||
/// Video hardware decoder typedef
|
||||
typedef struct _video_hw_decoder_ VideoHwDecoder;
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Variables
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
extern char VideoIgnoreRepeatPict; ///< disable repeat pict warning
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Prototypes
|
||||
//----------------------------------------------------------------------------
|
||||
@@ -74,6 +80,9 @@ extern void VideoDisplayWakeup(void);
|
||||
/// Set video geometry.
|
||||
extern int VideoSetGeometry(const char *);
|
||||
|
||||
/// Set 60Hz display mode.
|
||||
extern void VideoSet60HzMode(int);
|
||||
|
||||
/// Set video output position.
|
||||
extern void VideoSetOutputPosition(int, int, int, int);
|
||||
|
||||
@@ -110,6 +119,9 @@ extern void VideoSetSkipLines(int);
|
||||
/// Set studio levels.
|
||||
extern void VideoSetStudioLevels(int);
|
||||
|
||||
/// Set background.
|
||||
extern void VideoSetBackground(uint32_t);
|
||||
|
||||
/// Set audio delay.
|
||||
extern void VideoSetAudioDelay(int);
|
||||
|
||||
@@ -138,5 +150,6 @@ extern void VideoExit(void); ///< Cleanup and exit video module.
|
||||
|
||||
extern void VideoFlushInput(void); ///< Flush video input buffers.
|
||||
extern int VideoDecode(void); ///< Decode video input buffers.
|
||||
extern int VideoGetBuffers(void); ///< Get number of input buffers.
|
||||
|
||||
/// @}
|
||||
|
||||
Reference in New Issue
Block a user