mirror of
https://github.com/jojo61/vdr-plugin-softhdcuvid.git
synced 2023-10-10 13:37:41 +02:00
Fix issue #42 and changes for faster a/v sync
This commit is contained in:
parent
fecb81486d
commit
a6e65d953e
80
audio.c
80
audio.c
@ -86,6 +86,8 @@
|
||||
#define __USE_GNU
|
||||
#endif
|
||||
#include <pthread.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/resource.h>
|
||||
#ifndef HAVE_PTHREAD_NAME
|
||||
/// only available with newer glibc
|
||||
#define pthread_setname_np(thread, name)
|
||||
@ -688,7 +690,7 @@ static int AudioRingAdd(unsigned sample_rate, int channels, int passthrough)
|
||||
AudioRing[AudioRingWrite].InChannels = channels;
|
||||
AudioRing[AudioRingWrite].HwSampleRate = sample_rate;
|
||||
AudioRing[AudioRingWrite].HwChannels = AudioChannelMatrix[u][channels];
|
||||
AudioRing[AudioRingWrite].PTS = INT64_C(0x8000000000000000);
|
||||
AudioRing[AudioRingWrite].PTS = AV_NOPTS_VALUE;
|
||||
RingBufferReset(AudioRing[AudioRingWrite].RingBuffer);
|
||||
|
||||
Debug(3, "audio: %d ring buffer prepared\n", atomic_read(&AudioRingFilled) + 1);
|
||||
@ -1156,7 +1158,7 @@ static int64_t AlsaGetDelay(void)
|
||||
//Debug(3, "audio/alsa: %ld frames delay ok, but not running\n", delay);
|
||||
#endif
|
||||
}
|
||||
//Debug(3, "audio/alsa: %ld frames hw delay\n", delay);
|
||||
Debug(4, "audio/alsa: %ld frames hw delay\n", delay);
|
||||
|
||||
// delay can be negative, when underrun occur
|
||||
if (delay < 0) {
|
||||
@ -1291,7 +1293,6 @@ static int AlsaSetup(int *freq, int *channels, int passthrough)
|
||||
Info(_("audio/alsa: start delay %ums\n"), (AudioStartThreshold * 1000)
|
||||
/ (*freq * *channels * AudioBytesProSample));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1989,7 +1990,7 @@ static int AudioNextRing(void)
|
||||
|
||||
// stop, if not enough in next buffer
|
||||
used = RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer);
|
||||
if (AudioStartThreshold * 10 < used || (AudioVideoIsReady && AudioStartThreshold < used)) {
|
||||
if (AudioStartThreshold * 4 < used || (AudioVideoIsReady && AudioStartThreshold < used)) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
@ -2004,6 +2005,7 @@ static void *AudioPlayHandlerThread(void *dummy)
|
||||
{
|
||||
Debug(3, "audio: play thread started\n");
|
||||
prctl(PR_SET_NAME, "cuvid audio", 0, 0, 0);
|
||||
|
||||
for (;;) {
|
||||
// check if we should stop the thread
|
||||
if (AudioThreadStop) {
|
||||
@ -2020,9 +2022,9 @@ static void *AudioPlayHandlerThread(void *dummy)
|
||||
} while (!AudioRunning);
|
||||
pthread_mutex_unlock(&AudioMutex);
|
||||
|
||||
Debug(3, "audio: ----> %dms start\n", (AudioUsedBytes() * 1000)
|
||||
Debug(3, "audio: ----> %dms %d start\n", (AudioUsedBytes() * 1000)
|
||||
/ (!AudioRing[AudioRingWrite].HwSampleRate + !AudioRing[AudioRingWrite].HwChannels +
|
||||
AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample));
|
||||
AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),AudioUsedBytes());
|
||||
|
||||
do {
|
||||
int filled;
|
||||
@ -2056,10 +2058,8 @@ static void *AudioPlayHandlerThread(void *dummy)
|
||||
AudioUsedModule->FlushBuffers();
|
||||
atomic_sub(flush, &AudioRingFilled);
|
||||
if (AudioNextRing()) {
|
||||
Debug(3, "audio: HandlerThread break after flush\n");
|
||||
break;
|
||||
}
|
||||
Debug(3, "audio: continue after flush\n");
|
||||
}
|
||||
// try to play some samples
|
||||
err = 0;
|
||||
@ -2252,7 +2252,7 @@ void AudioEnqueue(const void *samples, int count)
|
||||
AudioNormalizer(buffer, count);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
n = RingBufferWrite(AudioRing[AudioRingWrite].RingBuffer, buffer, count);
|
||||
if (n != (size_t)count) {
|
||||
Error(_("audio: can't place %d samples in ring buffer\n"), count);
|
||||
@ -2284,18 +2284,18 @@ void AudioEnqueue(const void *samples, int count)
|
||||
}
|
||||
// forced start or enough video + audio buffered
|
||||
// for some exotic channels * 4 too small
|
||||
if (AudioStartThreshold * 10 < n || (AudioVideoIsReady
|
||||
if (AudioStartThreshold * 4 < n || (AudioVideoIsReady
|
||||
// if ((AudioVideoIsReady
|
||||
&& AudioStartThreshold < n)) {
|
||||
// restart play-back
|
||||
// no lock needed, can wakeup next time
|
||||
AudioRunning = 1;
|
||||
pthread_cond_signal(&AudioStartCond);
|
||||
Debug(3, "Start on AudioEnque\n");
|
||||
Debug(3, "Start on AudioEnque Threshold %d n %d\n",AudioStartThreshold,n);
|
||||
}
|
||||
}
|
||||
// Update audio clock (stupid gcc developers thinks INT64_C is unsigned)
|
||||
if (AudioRing[AudioRingWrite].PTS != (int64_t) INT64_C(0x8000000000000000)) {
|
||||
if (AudioRing[AudioRingWrite].PTS != (int64_t) AV_NOPTS_VALUE) {
|
||||
AudioRing[AudioRingWrite].PTS += ((int64_t) count * 90 * 1000)
|
||||
/ (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample);
|
||||
}
|
||||
@ -2311,13 +2311,13 @@ void AudioVideoReady(int64_t pts)
|
||||
int64_t audio_pts;
|
||||
size_t used;
|
||||
|
||||
if (pts == (int64_t) INT64_C(0x8000000000000000)) {
|
||||
if (pts == (int64_t) AV_NOPTS_VALUE) {
|
||||
Debug(3, "audio: a/v start, no valid video\n");
|
||||
return;
|
||||
}
|
||||
// no valid audio known
|
||||
if (!AudioRing[AudioRingWrite].HwSampleRate || !AudioRing[AudioRingWrite].HwChannels
|
||||
|| AudioRing[AudioRingWrite].PTS == (int64_t) INT64_C(0x8000000000000000)) {
|
||||
|| AudioRing[AudioRingWrite].PTS == (int64_t) AV_NOPTS_VALUE) {
|
||||
Debug(3, "audio: a/v start, no valid audio\n");
|
||||
AudioVideoIsReady = 1;
|
||||
return;
|
||||
@ -2325,7 +2325,7 @@ void AudioVideoReady(int64_t pts)
|
||||
// Audio.PTS = next written sample time stamp
|
||||
|
||||
used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer);
|
||||
audio_pts =
|
||||
audio_pts =
|
||||
AudioRing[AudioRingWrite].PTS -
|
||||
(used * 90 * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels *
|
||||
AudioBytesProSample);
|
||||
@ -2337,12 +2337,11 @@ void AudioVideoReady(int64_t pts)
|
||||
|
||||
if (!AudioRunning) {
|
||||
int skip;
|
||||
|
||||
// buffer ~15 video frames
|
||||
// FIXME: HDTV can use smaller video buffer
|
||||
skip = pts - 15 * 20 * 90 - AudioBufferTime * 90 - audio_pts + VideoAudioDelay;
|
||||
skip = pts - 0 * 20 * 90 - AudioBufferTime * 90 - audio_pts + VideoAudioDelay;
|
||||
#ifdef DEBUG
|
||||
fprintf(stderr, "%dms %dms %dms\n", (int)(pts - audio_pts) / 90, VideoAudioDelay / 90, skip / 90);
|
||||
// fprintf(stderr, "a/v-diff %dms a/v-delay %dms skip %dms Audiobuffer %d\n", (int)(pts - audio_pts) / 90, VideoAudioDelay / 90, skip / 90,AudioBufferTime);
|
||||
#endif
|
||||
// guard against old PTS
|
||||
if (skip > 0 && skip < 4000 * 90) {
|
||||
@ -2353,9 +2352,9 @@ void AudioVideoReady(int64_t pts)
|
||||
AudioSkip = skip - used;
|
||||
skip = used;
|
||||
}
|
||||
Debug(3, "audio: sync advance %dms %d/%zd\n",
|
||||
Debug(3, "audio: sync advance %dms %d/%zd Rest %d\n",
|
||||
(skip * 1000) / (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels *
|
||||
AudioBytesProSample), skip, used);
|
||||
AudioBytesProSample), skip, used, AudioSkip);
|
||||
RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, skip);
|
||||
|
||||
used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer);
|
||||
@ -2363,7 +2362,6 @@ void AudioVideoReady(int64_t pts)
|
||||
Debug(3, "No audio skip -> should skip %d\n", skip / 90);
|
||||
}
|
||||
// FIXME: skip<0 we need bigger audio buffer
|
||||
|
||||
// enough video + audio buffered
|
||||
if (AudioStartThreshold < used) {
|
||||
AudioRunning = 1;
|
||||
@ -2373,38 +2371,7 @@ void AudioVideoReady(int64_t pts)
|
||||
}
|
||||
|
||||
AudioVideoIsReady = 1;
|
||||
#if 0
|
||||
if (AudioRing[AudioRingWrite].HwSampleRate && AudioRing[AudioRingWrite].HwChannels) {
|
||||
if (pts != (int64_t) INT64_C(0x8000000000000000)
|
||||
&& AudioRing[AudioRingWrite].PTS != (int64_t) INT64_C(0x8000000000000000)) {
|
||||
Debug(3, "audio: a/v %d %s\n", (int)(pts - AudioRing[AudioRingWrite].PTS) / 90,
|
||||
AudioRunning ? "running" : "stopped");
|
||||
}
|
||||
Debug(3, "audio: start %4zdms %s|%s video ready\n",
|
||||
(RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer) * 1000)
|
||||
/ (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels * AudioBytesProSample),
|
||||
Timestamp2String(pts), Timestamp2String(AudioRing[AudioRingWrite].PTS));
|
||||
|
||||
if (!AudioRunning) {
|
||||
size_t used;
|
||||
|
||||
used = RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer);
|
||||
// enough video + audio buffered
|
||||
if (AudioStartThreshold < used) {
|
||||
// too much audio buffered, skip it
|
||||
if (AudioStartThreshold < used) {
|
||||
Debug(3, "audio: start %4zdms skip video ready\n", ((used - AudioStartThreshold) * 1000)
|
||||
/ (AudioRing[AudioRingWrite].HwSampleRate * AudioRing[AudioRingWrite].HwChannels *
|
||||
AudioBytesProSample));
|
||||
RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer, used - AudioStartThreshold);
|
||||
}
|
||||
AudioRunning = 1;
|
||||
pthread_cond_signal(&AudioStartCond);
|
||||
}
|
||||
}
|
||||
}
|
||||
AudioVideoIsReady = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2439,7 +2406,7 @@ void AudioFlushBuffers(void)
|
||||
AudioRing[AudioRingWrite].HwChannels = AudioRing[old].HwChannels;
|
||||
AudioRing[AudioRingWrite].InSampleRate = AudioRing[old].InSampleRate;
|
||||
AudioRing[AudioRingWrite].InChannels = AudioRing[old].InChannels;
|
||||
AudioRing[AudioRingWrite].PTS = INT64_C(0x8000000000000000);
|
||||
AudioRing[AudioRingWrite].PTS = AV_NOPTS_VALUE;
|
||||
RingBufferReadAdvance(AudioRing[AudioRingWrite].RingBuffer,
|
||||
RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer));
|
||||
Debug(3, "audio: reset video ready\n");
|
||||
@ -2512,7 +2479,7 @@ int64_t AudioGetDelay(void)
|
||||
pts += ((int64_t) RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer)
|
||||
* 90 * 1000) / (AudioRing[AudioRingRead].HwSampleRate * AudioRing[AudioRingRead].HwChannels *
|
||||
AudioBytesProSample);
|
||||
Debug(4, "audio: hw+sw delay %zd %" PRId64 "ms\n", RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer),
|
||||
Debug(4,"audio: hw+sw delay %zd %" PRId64 "ms\n", RingBufferUsedBytes(AudioRing[AudioRingRead].RingBuffer),
|
||||
pts / 90);
|
||||
|
||||
return pts;
|
||||
@ -2529,6 +2496,7 @@ void AudioSetClock(int64_t pts)
|
||||
Debug(4, "audio: set clock %s -> %s pts\n", Timestamp2String(AudioRing[AudioRingWrite].PTS),
|
||||
Timestamp2String(pts));
|
||||
}
|
||||
// printf("Audiosetclock pts %#012" PRIx64 " %d\n",pts,RingBufferUsedBytes(AudioRing[AudioRingWrite].RingBuffer));
|
||||
AudioRing[AudioRingWrite].PTS = pts;
|
||||
}
|
||||
|
||||
@ -2540,7 +2508,7 @@ void AudioSetClock(int64_t pts)
|
||||
int64_t AudioGetClock(void)
|
||||
{
|
||||
// (cast) needed for the evil gcc
|
||||
if (AudioRing[AudioRingRead].PTS != (int64_t) INT64_C(0x8000000000000000)) {
|
||||
if (AudioRing[AudioRingRead].PTS != (int64_t) AV_NOPTS_VALUE) {
|
||||
int64_t delay;
|
||||
|
||||
// delay zero, if no valid time stamp
|
||||
@ -2551,7 +2519,7 @@ int64_t AudioGetClock(void)
|
||||
return AudioRing[AudioRingRead].PTS + 0 * 90 - delay;
|
||||
}
|
||||
}
|
||||
return INT64_C(0x8000000000000000);
|
||||
return AV_NOPTS_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
421
codec.c
421
codec.c
@ -96,7 +96,6 @@ static pthread_mutex_t CodecLockMutex;
|
||||
/// Flag prefer fast channel switch
|
||||
char CodecUsePossibleDefectFrames;
|
||||
AVBufferRef *hw_device_ctx;
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Video
|
||||
//----------------------------------------------------------------------------
|
||||
@ -495,7 +494,7 @@ void DisplayPts(AVCodecContext * video_ctx, AVFrame * frame)
|
||||
*/
|
||||
extern int CuvidTestSurfaces();
|
||||
|
||||
#ifdef YADIF
|
||||
#if defined YADIF || defined (VAAPI)
|
||||
extern int init_filters(AVCodecContext * dec_ctx, void *decoder, AVFrame * frame);
|
||||
extern int push_filters(AVCodecContext * dec_ctx, void *decoder, AVFrame * frame);
|
||||
#endif
|
||||
@ -599,6 +598,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
|
||||
}
|
||||
// printf("got %s packet from decoder\n",got_frame?"1":"no");
|
||||
if (got_frame) { // frame completed
|
||||
// printf("video frame pts %#012" PRIx64 " %dms\n",frame->pts,(int)(apts - frame->pts) / 90);
|
||||
#ifdef YADIF
|
||||
if (decoder->filter) {
|
||||
if (decoder->filter == 1) {
|
||||
@ -647,7 +647,7 @@ void CodecVideoDecode(VideoDecoder * decoder, const AVPacket * avpkt)
|
||||
void CodecVideoFlushBuffers(VideoDecoder * decoder)
|
||||
{
|
||||
if (decoder->VideoCtx) {
|
||||
avcodec_flush_buffers(decoder->VideoCtx);
|
||||
avcodec_flush_buffers(decoder->VideoCtx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -679,9 +679,6 @@ struct _audio_decoder_
|
||||
|
||||
AVFrame *Frame; ///< decoded audio frame buffer
|
||||
|
||||
#if !defined(USE_SWRESAMPLE) && !defined(USE_AVRESAMPLE)
|
||||
ReSampleContext *ReSample; ///< old resampling context
|
||||
#endif
|
||||
#ifdef USE_SWRESAMPLE
|
||||
#if LIBSWRESAMPLE_VERSION_INT < AV_VERSION_INT(0, 15, 100)
|
||||
struct SwrContext *Resample; ///< ffmpeg software resample context
|
||||
@ -704,16 +701,6 @@ struct _audio_decoder_
|
||||
int Drift; ///< accumulated audio drift
|
||||
int DriftCorr; ///< audio drift correction value
|
||||
int DriftFrac; ///< audio drift fraction for ac3
|
||||
|
||||
#if !defined(USE_SWRESAMPLE) && !defined(USE_AVRESAMPLE)
|
||||
struct AVResampleContext *AvResample; ///< second audio resample context
|
||||
#define MAX_CHANNELS 8 ///< max number of channels supported
|
||||
int16_t *Buffer[MAX_CHANNELS]; ///< deinterleave sample buffers
|
||||
int BufferSize; ///< size of sample buffer
|
||||
int16_t *Remain[MAX_CHANNELS]; ///< filter remaining samples
|
||||
int RemainSize; ///< size of remain buffer
|
||||
int RemainCount; ///< number of remaining samples
|
||||
#endif
|
||||
};
|
||||
|
||||
///
|
||||
@ -832,27 +819,7 @@ void CodecAudioOpen(AudioDecoder * audio_decoder, int codec_id)
|
||||
void CodecAudioClose(AudioDecoder * audio_decoder)
|
||||
{
|
||||
// FIXME: output any buffered data
|
||||
#if !defined(USE_SWRESAMPLE) && !defined(USE_AVRESAMPLE)
|
||||
if (audio_decoder->AvResample) {
|
||||
int ch;
|
||||
|
||||
av_resample_close(audio_decoder->AvResample);
|
||||
audio_decoder->AvResample = NULL;
|
||||
audio_decoder->RemainCount = 0;
|
||||
audio_decoder->BufferSize = 0;
|
||||
audio_decoder->RemainSize = 0;
|
||||
for (ch = 0; ch < MAX_CHANNELS; ++ch) {
|
||||
free(audio_decoder->Buffer[ch]);
|
||||
audio_decoder->Buffer[ch] = NULL;
|
||||
free(audio_decoder->Remain[ch]);
|
||||
audio_decoder->Remain[ch] = NULL;
|
||||
}
|
||||
}
|
||||
if (audio_decoder->ReSample) {
|
||||
audio_resample_close(audio_decoder->ReSample);
|
||||
audio_decoder->ReSample = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef USE_SWRESAMPLE
|
||||
if (audio_decoder->Resample) {
|
||||
swr_free(&audio_decoder->Resample);
|
||||
@ -1144,385 +1111,7 @@ static int CodecAudioPassthroughHelper(AudioDecoder * audio_decoder, const AVPac
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !defined(USE_SWRESAMPLE) && !defined(USE_AVRESAMPLE)
|
||||
|
||||
/**
|
||||
** Set/update audio pts clock.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
** @param pts presentation timestamp
|
||||
*/
|
||||
static void CodecAudioSetClock(AudioDecoder * audio_decoder, int64_t pts)
|
||||
{
|
||||
struct timespec nowtime;
|
||||
int64_t delay;
|
||||
int64_t tim_diff;
|
||||
int64_t pts_diff;
|
||||
int drift;
|
||||
int corr;
|
||||
|
||||
AudioSetClock(pts);
|
||||
|
||||
delay = AudioGetDelay();
|
||||
if (!delay) {
|
||||
return;
|
||||
}
|
||||
clock_gettime(CLOCK_MONOTONIC, &nowtime);
|
||||
if (!audio_decoder->LastDelay) {
|
||||
audio_decoder->LastTime = nowtime;
|
||||
audio_decoder->LastPTS = pts;
|
||||
audio_decoder->LastDelay = delay;
|
||||
audio_decoder->Drift = 0;
|
||||
audio_decoder->DriftFrac = 0;
|
||||
Debug(3, "codec/audio: inital drift delay %" PRId64 "ms\n", delay / 90);
|
||||
return;
|
||||
}
|
||||
// collect over some time
|
||||
pts_diff = pts - audio_decoder->LastPTS;
|
||||
if (pts_diff < 10 * 1000 * 90) {
|
||||
return;
|
||||
}
|
||||
|
||||
tim_diff = (nowtime.tv_sec - audio_decoder->LastTime.tv_sec)
|
||||
* 1000 * 1000 * 1000 + (nowtime.tv_nsec - audio_decoder->LastTime.tv_nsec);
|
||||
|
||||
drift = (tim_diff * 90) / (1000 * 1000) - pts_diff + delay - audio_decoder->LastDelay;
|
||||
|
||||
// adjust rounding error
|
||||
nowtime.tv_nsec -= nowtime.tv_nsec % (1000 * 1000 / 90);
|
||||
audio_decoder->LastTime = nowtime;
|
||||
audio_decoder->LastPTS = pts;
|
||||
audio_decoder->LastDelay = delay;
|
||||
|
||||
if (0) {
|
||||
Debug(3, "codec/audio: interval P:%5" PRId64 "ms T:%5" PRId64 "ms D:%4" PRId64 "ms %f %d\n", pts_diff / 90,
|
||||
tim_diff / (1000 * 1000), delay / 90, drift / 90.0, audio_decoder->DriftCorr);
|
||||
}
|
||||
// underruns and av_resample have the same time :(((
|
||||
if (abs(drift) > 10 * 90) {
|
||||
// drift too big, pts changed?
|
||||
Debug(3, "codec/audio: drift(%6d) %3dms reset\n", audio_decoder->DriftCorr, drift / 90);
|
||||
audio_decoder->LastDelay = 0;
|
||||
#ifdef DEBUG
|
||||
corr = 0; // keep gcc happy
|
||||
#endif
|
||||
} else {
|
||||
|
||||
drift += audio_decoder->Drift;
|
||||
audio_decoder->Drift = drift;
|
||||
corr = (10 * audio_decoder->HwSampleRate * drift) / (90 * 1000);
|
||||
// SPDIF/HDMI passthrough
|
||||
if ((CodecAudioDrift & CORRECT_AC3) && (!(CodecPassthrough & CodecAC3)
|
||||
|| audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_AC3)
|
||||
&& (!(CodecPassthrough & CodecEAC3)
|
||||
|| audio_decoder->AudioCtx->codec_id != AV_CODEC_ID_EAC3)) {
|
||||
audio_decoder->DriftCorr = -corr;
|
||||
}
|
||||
|
||||
if (audio_decoder->DriftCorr < -20000) { // limit correction
|
||||
audio_decoder->DriftCorr = -20000;
|
||||
} else if (audio_decoder->DriftCorr > 20000) {
|
||||
audio_decoder->DriftCorr = 20000;
|
||||
}
|
||||
}
|
||||
// FIXME: this works with libav 0.8, and only with >10ms with ffmpeg 0.10
|
||||
if (audio_decoder->AvResample && audio_decoder->DriftCorr) {
|
||||
int distance;
|
||||
|
||||
// try workaround for buggy ffmpeg 0.10
|
||||
if (abs(audio_decoder->DriftCorr) < 2000) {
|
||||
distance = (pts_diff * audio_decoder->HwSampleRate) / (900 * 1000);
|
||||
} else {
|
||||
distance = (pts_diff * audio_decoder->HwSampleRate) / (90 * 1000);
|
||||
}
|
||||
av_resample_compensate(audio_decoder->AvResample, audio_decoder->DriftCorr / 10, distance);
|
||||
}
|
||||
if (1) {
|
||||
static int c;
|
||||
|
||||
if (!(c++ % 10)) {
|
||||
Debug(3, "codec/audio: drift(%6d) %8dus %5d\n", audio_decoder->DriftCorr, drift * 1000 / 90, corr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
** Handle audio format changes.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
**
|
||||
** @note this is the old not good supported version
|
||||
*/
|
||||
static void CodecAudioUpdateFormat(AudioDecoder * audio_decoder)
|
||||
{
|
||||
int passthrough;
|
||||
const AVCodecContext *audio_ctx;
|
||||
int err;
|
||||
|
||||
if (audio_decoder->ReSample) {
|
||||
audio_resample_close(audio_decoder->ReSample);
|
||||
audio_decoder->ReSample = NULL;
|
||||
}
|
||||
if (audio_decoder->AvResample) {
|
||||
av_resample_close(audio_decoder->AvResample);
|
||||
audio_decoder->AvResample = NULL;
|
||||
audio_decoder->RemainCount = 0;
|
||||
}
|
||||
|
||||
audio_ctx = audio_decoder->AudioCtx;
|
||||
if ((err = CodecAudioUpdateHelper(audio_decoder, &passthrough))) {
|
||||
|
||||
Debug(3, "codec/audio: resample %dHz *%d -> %dHz *%d err %d\n", audio_ctx->sample_rate, audio_ctx->channels,
|
||||
audio_decoder->HwSampleRate, audio_decoder->HwChannels, err);
|
||||
|
||||
if (err == 1) {
|
||||
audio_decoder->ReSample =
|
||||
av_audio_resample_init(audio_decoder->HwChannels, audio_ctx->channels, audio_decoder->HwSampleRate,
|
||||
audio_ctx->sample_rate, audio_ctx->sample_fmt, audio_ctx->sample_fmt, 16, 10, 0, 0.8);
|
||||
// libav-0.8_pre didn't support 6 -> 2 channels
|
||||
if (!audio_decoder->ReSample) {
|
||||
Error(_("codec/audio: resample setup error\n"));
|
||||
audio_decoder->HwChannels = 0;
|
||||
audio_decoder->HwSampleRate = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
Debug(3, "codec/audio: audio setup error\n");
|
||||
// FIXME: handle errors
|
||||
audio_decoder->HwChannels = 0;
|
||||
audio_decoder->HwSampleRate = 0;
|
||||
return;
|
||||
}
|
||||
if (passthrough) { // pass-through no conversion allowed
|
||||
return;
|
||||
}
|
||||
// prepare audio drift resample
|
||||
#ifdef USE_AUDIO_DRIFT_CORRECTION
|
||||
if (CodecAudioDrift & CORRECT_PCM) {
|
||||
if (audio_decoder->AvResample) {
|
||||
Error(_("codec/audio: overwrite resample\n"));
|
||||
}
|
||||
audio_decoder->AvResample =
|
||||
av_resample_init(audio_decoder->HwSampleRate, audio_decoder->HwSampleRate, 16, 10, 0, 0.8);
|
||||
if (!audio_decoder->AvResample) {
|
||||
Error(_("codec/audio: AvResample setup error\n"));
|
||||
} else {
|
||||
// reset drift to some default value
|
||||
audio_decoder->DriftCorr /= 2;
|
||||
audio_decoder->DriftFrac = 0;
|
||||
av_resample_compensate(audio_decoder->AvResample, audio_decoder->DriftCorr / 10,
|
||||
10 * audio_decoder->HwSampleRate);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
** Codec enqueue audio samples.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
** @param data samples data
|
||||
** @param count number of bytes in sample data
|
||||
*/
|
||||
void CodecAudioEnqueue(AudioDecoder * audio_decoder, int16_t * data, int count)
|
||||
{
|
||||
#ifdef USE_AUDIO_DRIFT_CORRECTION
|
||||
if ((CodecAudioDrift & CORRECT_PCM) && audio_decoder->AvResample) {
|
||||
int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + AV_INPUT_BUFFER_PADDING_SIZE]
|
||||
__attribute__((aligned(16)));
|
||||
int16_t buftmp[MAX_CHANNELS][(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4];
|
||||
int consumed;
|
||||
int i;
|
||||
int n;
|
||||
int ch;
|
||||
int bytes_n;
|
||||
|
||||
bytes_n = count / audio_decoder->HwChannels;
|
||||
// resize sample buffer, if needed
|
||||
if (audio_decoder->RemainCount + bytes_n > audio_decoder->BufferSize) {
|
||||
audio_decoder->BufferSize = audio_decoder->RemainCount + bytes_n;
|
||||
for (ch = 0; ch < MAX_CHANNELS; ++ch) {
|
||||
audio_decoder->Buffer[ch] = realloc(audio_decoder->Buffer[ch], audio_decoder->BufferSize);
|
||||
}
|
||||
}
|
||||
// copy remaining bytes into sample buffer
|
||||
for (ch = 0; ch < audio_decoder->HwChannels; ++ch) {
|
||||
memcpy(audio_decoder->Buffer[ch], audio_decoder->Remain[ch], audio_decoder->RemainCount);
|
||||
}
|
||||
// deinterleave samples into sample buffer
|
||||
for (i = 0; i < bytes_n / 2; i++) {
|
||||
for (ch = 0; ch < audio_decoder->HwChannels; ++ch) {
|
||||
audio_decoder->Buffer[ch][audio_decoder->RemainCount / 2 + i]
|
||||
= data[i * audio_decoder->HwChannels + ch];
|
||||
}
|
||||
}
|
||||
|
||||
bytes_n += audio_decoder->RemainSize;
|
||||
n = 0; // keep gcc lucky
|
||||
// resample the sample buffer into tmp buffer
|
||||
for (ch = 0; ch < audio_decoder->HwChannels; ++ch) {
|
||||
n = av_resample(audio_decoder->AvResample, buftmp[ch], audio_decoder->Buffer[ch], &consumed, bytes_n / 2,
|
||||
sizeof(buftmp[ch]) / 2, ch == audio_decoder->HwChannels - 1);
|
||||
// fixme remaining channels
|
||||
if (bytes_n - consumed * 2 > audio_decoder->RemainSize) {
|
||||
audio_decoder->RemainSize = bytes_n - consumed * 2;
|
||||
}
|
||||
audio_decoder->Remain[ch] = realloc(audio_decoder->Remain[ch], audio_decoder->RemainSize);
|
||||
memcpy(audio_decoder->Remain[ch], audio_decoder->Buffer[ch] + consumed, audio_decoder->RemainSize);
|
||||
audio_decoder->RemainCount = audio_decoder->RemainSize;
|
||||
}
|
||||
|
||||
// interleave samples from sample buffer
|
||||
for (i = 0; i < n; i++) {
|
||||
for (ch = 0; ch < audio_decoder->HwChannels; ++ch) {
|
||||
buf[i * audio_decoder->HwChannels + ch] = buftmp[ch][i];
|
||||
}
|
||||
}
|
||||
n *= 2;
|
||||
|
||||
n *= audio_decoder->HwChannels;
|
||||
if (!(audio_decoder->Passthrough & CodecPCM)) {
|
||||
CodecReorderAudioFrame(buf, n, audio_decoder->HwChannels);
|
||||
}
|
||||
AudioEnqueue(buf, n);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if (!(audio_decoder->Passthrough & CodecPCM)) {
|
||||
CodecReorderAudioFrame(data, count, audio_decoder->HwChannels);
|
||||
}
|
||||
AudioEnqueue(data, count);
|
||||
}
|
||||
|
||||
int myavcodec_decode_audio3(AVCodecContext * avctx, int16_t * samples, int *frame_size_ptr, AVPacket * avpkt)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret, got_frame = 0;
|
||||
|
||||
if (!frame)
|
||||
return AVERROR(ENOMEM);
|
||||
#if 0
|
||||
ret = avcodec_decode_audio4(avctx, frame, &got_frame, avpkt);
|
||||
#else
|
||||
// SUGGESTION
|
||||
// Now that avcodec_decode_audio4 is deprecated and replaced
|
||||
// by 2 calls (receive frame and send packet), this could be optimized
|
||||
// into separate routines or separate threads.
|
||||
// Also now that it always consumes a whole buffer some code
|
||||
// in the caller may be able to be optimized.
|
||||
ret = avcodec_receive_frame(avctx, frame);
|
||||
if (ret == 0)
|
||||
got_frame = 1;
|
||||
if (ret == AVERROR(EAGAIN))
|
||||
ret = 0;
|
||||
if (ret == 0)
|
||||
ret = avcodec_send_packet(avctx, avpkt);
|
||||
if (ret == AVERROR(EAGAIN))
|
||||
ret = 0;
|
||||
else if (ret < 0) {
|
||||
// Debug(3, "codec/audio: audio decode error: %1 (%2)\n",av_make_error_string(error, sizeof(error), ret),got_frame);
|
||||
return ret;
|
||||
} else
|
||||
ret = avpkt->size;
|
||||
#endif
|
||||
if (ret >= 0 && got_frame) {
|
||||
int i, ch;
|
||||
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
|
||||
int data_size = av_get_bytes_per_sample(avctx->sample_fmt);
|
||||
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
for (i = 0; i < frame->nb_samples; i++) {
|
||||
for (ch = 0; ch < avctx->channels; ch++) {
|
||||
memcpy(samples, frame->extended_data[ch] + data_size * i, data_size);
|
||||
samples = (char *)samples + data_size;
|
||||
}
|
||||
}
|
||||
// Debug(3,"data_size %d nb_samples %d sample_fmt %d channels %d planar %d\n",data_size,frame->nb_samples,avctx->sample_fmt,avctx->channels,planar);
|
||||
*frame_size_ptr = data_size * avctx->channels * frame->nb_samples;
|
||||
} else {
|
||||
*frame_size_ptr = 0;
|
||||
}
|
||||
av_frame_free(&frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
** Decode an audio packet.
|
||||
**
|
||||
** PTS must be handled self.
|
||||
**
|
||||
** @param audio_decoder audio decoder data
|
||||
** @param avpkt audio packet
|
||||
*/
|
||||
void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
{
|
||||
int16_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + AV_INPUT_BUFFER_PADDING_SIZE] __attribute__((aligned(16)));
|
||||
int buf_sz;
|
||||
int l;
|
||||
AVCodecContext *audio_ctx;
|
||||
|
||||
audio_ctx = audio_decoder->AudioCtx;
|
||||
|
||||
// FIXME: don't need to decode pass-through codecs
|
||||
buf_sz = sizeof(buf);
|
||||
l = myavcodec_decode_audio3(audio_ctx, buf, &buf_sz, (AVPacket *) avpkt);
|
||||
if (avpkt->size != l) {
|
||||
if (l == AVERROR(EAGAIN)) {
|
||||
Error(_("codec: latm\n"));
|
||||
return;
|
||||
}
|
||||
if (l < 0) { // no audio frame could be decompressed
|
||||
Error(_("codec: error audio data\n"));
|
||||
return;
|
||||
}
|
||||
Error(_("codec: error more than one frame data\n"));
|
||||
}
|
||||
// update audio clock
|
||||
if (avpkt->pts != (int64_t) AV_NOPTS_VALUE) {
|
||||
CodecAudioSetClock(audio_decoder, avpkt->pts);
|
||||
}
|
||||
// FIXME: must first play remainings bytes, than change and play new.
|
||||
if (audio_decoder->Passthrough != CodecPassthrough || audio_decoder->SampleRate != audio_ctx->sample_rate
|
||||
|| audio_decoder->Channels != audio_ctx->channels) {
|
||||
CodecAudioUpdateFormat(audio_decoder);
|
||||
}
|
||||
|
||||
if (audio_decoder->HwSampleRate && audio_decoder->HwChannels) {
|
||||
// need to resample audio
|
||||
if (audio_decoder->ReSample) {
|
||||
int16_t outbuf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 4 + AV_INPUT_BUFFER_PADDING_SIZE]
|
||||
__attribute__((aligned(16)));
|
||||
int outlen;
|
||||
|
||||
// FIXME: libav-0.7.2 crash here
|
||||
outlen = audio_resample(audio_decoder->ReSample, outbuf, buf, buf_sz);
|
||||
#ifdef DEBUG
|
||||
if (outlen != buf_sz) {
|
||||
Debug(3, "codec/audio: possible fixed ffmpeg\n");
|
||||
}
|
||||
#endif
|
||||
if (outlen) {
|
||||
// outlen seems to be wrong in ffmpeg-0.9
|
||||
outlen /= audio_decoder->Channels * av_get_bytes_per_sample(audio_ctx->sample_fmt);
|
||||
outlen *= audio_decoder->HwChannels * av_get_bytes_per_sample(audio_ctx->sample_fmt);
|
||||
Debug(4, "codec/audio: %d -> %d\n", buf_sz, outlen);
|
||||
CodecAudioEnqueue(audio_decoder, outbuf, outlen);
|
||||
}
|
||||
} else {
|
||||
if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) {
|
||||
return;
|
||||
}
|
||||
|
||||
CodecAudioEnqueue(audio_decoder, buf, buf_sz);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(USE_SWRESAMPLE) || defined(USE_AVRESAMPLE)
|
||||
|
||||
@ -1757,7 +1346,6 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
if (audio_decoder->Resample) {
|
||||
uint8_t outbuf[8192 * 2 * 8];
|
||||
uint8_t *out[1];
|
||||
|
||||
out[0] = outbuf;
|
||||
ret =
|
||||
swr_convert(audio_decoder->Resample, out, sizeof(outbuf) / (2 * audio_decoder->HwChannels),
|
||||
@ -1784,6 +1372,7 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
|
||||
*/
|
||||
void CodecAudioFlushBuffers(AudioDecoder * decoder)
|
||||
{
|
||||
|
||||
avcodec_flush_buffers(decoder->AudioCtx);
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ extern "C"
|
||||
/// vdr-plugin version number.
|
||||
/// Makefile extracts the version number for generating the file name
|
||||
/// for the distribution archive.
|
||||
static const char *const VERSION = "3.0.0"
|
||||
static const char *const VERSION = "3.1.0"
|
||||
#ifdef GIT_REV
|
||||
"-GIT" GIT_REV
|
||||
#endif
|
||||
|
106
video.c
106
video.c
@ -329,7 +329,7 @@ typedef struct
|
||||
#define CODEC_SURFACES_MAX 12 //
|
||||
|
||||
#define VIDEO_SURFACES_MAX 6 ///< video output surfaces for queue
|
||||
// #define OUTPUT_SURFACES_MAX 4 ///< output surfaces for flip page
|
||||
|
||||
#if defined VAAPI && !defined RASPI
|
||||
#define PIXEL_FORMAT AV_PIX_FMT_VAAPI
|
||||
#define SWAP_BUFFER_SIZE 3
|
||||
@ -528,7 +528,6 @@ EGLContext OSDcontext;
|
||||
static GLuint OsdGlTextures[2]; ///< gl texture for OSD
|
||||
static int OsdIndex = 0; ///< index into OsdGlTextures
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Common Functions
|
||||
//----------------------------------------------------------------------------
|
||||
@ -1605,7 +1604,7 @@ static void CuvidDestroySurfaces(CuvidDecoder * decoder)
|
||||
pl_renderer_destroy(&p->renderer);
|
||||
p->renderer = pl_renderer_create(p->ctx, p->gpu);
|
||||
#else
|
||||
glDeleteTextures(CODEC_SURFACES_MAX * 2, (GLuint *) & decoder->gl_textures);
|
||||
glDeleteTextures(CODEC_SURFACES_MAX * 2, (GLuint *) &decoder->gl_textures);
|
||||
GlxCheck();
|
||||
|
||||
if (CuvidDecoderN == 1) { // only wenn last decoder closes
|
||||
@ -2490,7 +2489,7 @@ void createTextureDst(CuvidDecoder * decoder, int anz, unsigned int size_x, unsi
|
||||
desc.layers[n].pitch[plane]); \
|
||||
} while (0)
|
||||
|
||||
void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame, int image_width, int image_height)
|
||||
void generateVAAPIImage(CuvidDecoder * decoder, VASurfaceID index, const AVFrame * frame, int image_width, int image_height)
|
||||
{
|
||||
VAStatus status;
|
||||
|
||||
@ -2499,14 +2498,14 @@ void generateVAAPIImage(CuvidDecoder * decoder, int index, const AVFrame * frame
|
||||
VADRMPRIMESurfaceDescriptor desc;
|
||||
|
||||
status =
|
||||
vaExportSurfaceHandle(decoder->VaDisplay, (VASurfaceID)frame->data[3], VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
|
||||
vaExportSurfaceHandle(decoder->VaDisplay, (VASurfaceID)(uintptr_t)frame->data[3], VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
|
||||
VA_EXPORT_SURFACE_READ_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &desc);
|
||||
|
||||
if (status != VA_STATUS_SUCCESS) {
|
||||
printf("Fehler beim export VAAPI Handle\n");
|
||||
return;
|
||||
}
|
||||
vaSyncSurface(decoder->VaDisplay, (VASurfaceID)frame->data[3]);
|
||||
vaSyncSurface(decoder->VaDisplay, (VASurfaceID)(uintptr_t)frame->data[3]);
|
||||
#endif
|
||||
#ifdef RASPI
|
||||
AVDRMFrameDescriptor desc;
|
||||
@ -2868,7 +2867,7 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex
|
||||
Fatal(_("video: no valid profile found\n"));
|
||||
}
|
||||
|
||||
decoder->newchannel = 1;
|
||||
// decoder->newchannel = 1;
|
||||
#ifdef VAAPI
|
||||
init_generic_hwaccel(decoder, PIXEL_FORMAT,video_ctx);
|
||||
#endif
|
||||
@ -2877,7 +2876,7 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex
|
||||
}
|
||||
|
||||
ist->GetFormatDone = 1;
|
||||
|
||||
|
||||
Debug(3, "video: create decoder 16bit?=%d %dx%d old %d %d\n", bitformat16, video_ctx->width, video_ctx->height,
|
||||
decoder->InputWidth, decoder->InputHeight);
|
||||
|
||||
@ -2893,13 +2892,14 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex
|
||||
ist->hwaccel_output_format = AV_PIX_FMT_NV12;
|
||||
}
|
||||
|
||||
// if ((video_ctx->width != decoder->InputWidth
|
||||
// || video_ctx->height != decoder->InputHeight) && decoder->TrickSpeed == 0) {
|
||||
if ((video_ctx->width != decoder->InputWidth
|
||||
|| video_ctx->height != decoder->InputHeight) && decoder->TrickSpeed == 0) {
|
||||
|
||||
if (decoder->TrickSpeed == 0) {
|
||||
// if (decoder->TrickSpeed == 0) {
|
||||
#ifdef PLACEBO
|
||||
VideoThreadLock();
|
||||
#endif
|
||||
decoder->newchannel = 1;
|
||||
CuvidCleanup(decoder);
|
||||
decoder->InputAspect = video_ctx->sample_aspect_ratio;
|
||||
decoder->InputWidth = video_ctx->width;
|
||||
@ -2910,7 +2910,6 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex
|
||||
#ifdef PLACEBO
|
||||
VideoThreadUnlock();
|
||||
// dont show first frame
|
||||
decoder->newchannel = 1;
|
||||
#endif
|
||||
#ifdef YADIF
|
||||
if (VideoDeinterlace[decoder->Resolution] == VideoDeinterlaceYadif) {
|
||||
@ -2925,6 +2924,14 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder, AVCodecContex
|
||||
Fatal(_("codec: can't set option deint to video codec!\n"));
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
decoder->SyncCounter = 0;
|
||||
decoder->FrameCounter = 0;
|
||||
decoder->FramesDisplayed = 0;
|
||||
decoder->StartCounter = 0;
|
||||
decoder->Closing = 0;
|
||||
decoder->PTS = AV_NOPTS_VALUE;
|
||||
VideoDeltaPTS = 0;
|
||||
}
|
||||
|
||||
CuvidMessage(2, "GetFormat Init ok %dx%d\n", video_ctx->width, video_ctx->height);
|
||||
@ -3351,9 +3358,11 @@ static void CuvidRenderFrame(CuvidDecoder * decoder, const AVCodecContext * vide
|
||||
av_frame_free(&frame);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
if (!decoder->Closing) {
|
||||
VideoSetPts(&decoder->PTS, decoder->Interlaced, video_ctx, frame);
|
||||
}
|
||||
|
||||
// update aspect ratio changes
|
||||
if (decoder->InputWidth && decoder->InputHeight && av_cmp_q(decoder->InputAspect, frame->sample_aspect_ratio)) {
|
||||
Debug(3, "video/cuvid: aspect ratio changed\n");
|
||||
@ -3406,11 +3415,6 @@ Debug(3,"fmt %02d:%02d width %d:%d hight %d:%d\n",decoder->ColorSpace,frame->co
|
||||
av_frame_free(&frame);
|
||||
return;
|
||||
}
|
||||
#if 0
|
||||
if (!decoder->Closing) {
|
||||
VideoSetPts(&decoder->PTS, decoder->Interlaced, video_ctx, frame);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined (VAAPI) && defined (PLACEBO)
|
||||
if (p->has_dma_buf) { // Vulkan supports DMA_BUF no copy required
|
||||
@ -3621,15 +3625,16 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused))
|
||||
ycropf = (float)decoder->CropY / (float)decoder->InputHeight;
|
||||
|
||||
current = decoder->SurfacesRb[decoder->SurfaceRead];
|
||||
|
||||
#ifdef USE_DRM
|
||||
if (!decoder->Closing) {
|
||||
frame = decoder->frames[current];
|
||||
VideoSetPts(&decoder->PTS, decoder->Interlaced, 0, frame);
|
||||
#ifdef USE_DRM
|
||||
AVFrameSideData *sd1 = av_frame_get_side_data (frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
|
||||
AVFrameSideData *sd2 = av_frame_get_side_data (frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
|
||||
set_hdr_metadata(frame->color_primaries,frame->color_trc,sd1,sd2);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Render Progressive frame
|
||||
#ifndef PLACEBO
|
||||
@ -3798,12 +3803,13 @@ static void CuvidMixVideo(CuvidDecoder * decoder, __attribute__((unused))
|
||||
colors.contrast = 0.0f;
|
||||
if (!pl_render_image(p->renderer, &decoder->pl_images[current], target, &render_params)) {
|
||||
Debug(3, "Failed rendering frame!\n");
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
decoder->newchannel = 0;
|
||||
|
||||
|
||||
|
||||
if (!pl_render_image(p->renderer, &decoder->pl_images[current], target, &render_params)) {
|
||||
Debug(3, "Failed rendering frame!\n");
|
||||
}
|
||||
@ -4221,7 +4227,7 @@ static int64_t CuvidGetClock(const CuvidDecoder * decoder)
|
||||
return decoder->PTS - 20 * 90 * (2 * atomic_read(&decoder->SurfacesFilled) - decoder->SurfaceField - 2 + 2);
|
||||
}
|
||||
// + 2 in driver queue
|
||||
return decoder->PTS - 20 * 90 * (atomic_read(&decoder->SurfacesFilled) + SWAP_BUFFER_SIZE - 1); // +2
|
||||
return decoder->PTS - 20 * 90 * (atomic_read(&decoder->SurfacesFilled) + SWAP_BUFFER_SIZE + 1); // +2
|
||||
}
|
||||
|
||||
///
|
||||
@ -4301,15 +4307,15 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder)
|
||||
int64_t audio_clock;
|
||||
int64_t video_clock;
|
||||
int err = 0;
|
||||
static uint64_t last_time;
|
||||
static int speedup=3;
|
||||
|
||||
#ifdef GAMMA
|
||||
Get_Gamma();
|
||||
#endif
|
||||
|
||||
|
||||
// video_clock = CuvidGetClock(decoder);
|
||||
video_clock = decoder->PTS - (90 * 20 * 1); // 1 Frame in Output
|
||||
video_clock = CuvidGetClock(decoder);
|
||||
|
||||
filled = atomic_read(&decoder->SurfacesFilled);
|
||||
|
||||
if (!decoder->SyncOnAudio) {
|
||||
@ -4318,7 +4324,7 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder)
|
||||
goto skip_sync;
|
||||
}
|
||||
audio_clock = AudioGetClock();
|
||||
// printf("Diff %d %ld %ld filled %d \n",(video_clock - audio_clock - VideoAudioDelay)/90,video_clock,audio_clock,filled);
|
||||
// printf("Diff %d %#012" PRIx64 " %#012" PRIx64" filled %d \n",(video_clock - audio_clock - VideoAudioDelay)/90,video_clock,audio_clock,filled);
|
||||
// 60Hz: repeat every 5th field
|
||||
if (Video60HzMode && !(decoder->FramesDisplayed % 6)) {
|
||||
if (audio_clock == (int64_t) AV_NOPTS_VALUE || video_clock == (int64_t) AV_NOPTS_VALUE) {
|
||||
@ -4357,12 +4363,9 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder)
|
||||
int diff;
|
||||
|
||||
diff = video_clock - audio_clock - VideoAudioDelay;
|
||||
diff = (decoder->LastAVDiff + diff) / 2;
|
||||
// diff = (decoder->LastAVDiff + diff) / 2;
|
||||
decoder->LastAVDiff = diff;
|
||||
|
||||
// if (CuvidDecoderN) {
|
||||
// CuvidDecoders[0]->Frameproc = (float)(diff / 90);
|
||||
// }
|
||||
#if 0
|
||||
if (abs(diff / 90) > 0) {
|
||||
printf(" Diff %d filled %d \n", diff / 90, filled);
|
||||
@ -4372,29 +4375,37 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder)
|
||||
err = CuvidMessage(2, "video: audio/video difference too big %d\n",diff/90);
|
||||
// decoder->SyncCounter = 1;
|
||||
// usleep(10);
|
||||
// goto out;
|
||||
goto skip_sync;
|
||||
|
||||
} else if (diff > 100 * 90) {
|
||||
// FIXME: this quicker sync step, did not work with new code!
|
||||
err = CuvidMessage(4, "video: slow down video, duping frame %d\n", diff / 90);
|
||||
++decoder->FramesDuped;
|
||||
decoder->SyncCounter = 1;
|
||||
if (speedup && --speedup)
|
||||
decoder->SyncCounter = 1;
|
||||
else
|
||||
decoder->SyncCounter = 0;
|
||||
goto out;
|
||||
} else if (diff > 55 * 90) {
|
||||
|
||||
} else if (diff > 25 * 90) {
|
||||
err = CuvidMessage(3, "video: slow down video, duping frame %d \n", diff / 90);
|
||||
++decoder->FramesDuped;
|
||||
decoder->SyncCounter = 1;
|
||||
goto out;
|
||||
} else if ((diff < -35 * 90)) {
|
||||
} else if ((diff < -100 * 90)) {
|
||||
if (filled > 2) {
|
||||
err = CuvidMessage(3, "video: speed up video, droping frame %d\n", diff / 90);
|
||||
++decoder->FramesDropped;
|
||||
CuvidAdvanceDecoderFrame(decoder);
|
||||
} else if ((diff < -65 * 90)) { // give it some time to get frames to drop
|
||||
} else if ((diff < -100 * 90)) { // give it some time to get frames to drop
|
||||
Debug(3, "Delay Audio %d ms\n", abs(diff / 90));
|
||||
AudioDelayms(abs(diff / 90));
|
||||
}
|
||||
decoder->SyncCounter = 1;
|
||||
}
|
||||
else {
|
||||
speedup = 3;
|
||||
}
|
||||
#if defined(DEBUG) || defined(AV_INFO)
|
||||
if (!decoder->SyncCounter && decoder->StartCounter < 1000) {
|
||||
#ifdef DEBUG
|
||||
@ -4572,15 +4583,12 @@ static void CuvidDisplayHandlerThread(void)
|
||||
//
|
||||
filled = atomic_read(&decoder->SurfacesFilled);
|
||||
//if (filled <= 1 + 2 * decoder->Interlaced) {
|
||||
if (filled < 4) {
|
||||
if (filled < 5) {
|
||||
// FIXME: hot polling
|
||||
// fetch+decode or reopen
|
||||
allfull = 0;
|
||||
err = VideoDecodeInput(decoder->Stream);
|
||||
} else {
|
||||
|
||||
usleep(1000);
|
||||
|
||||
err = VideoPollInput(decoder->Stream);
|
||||
}
|
||||
// decoder can be invalid here
|
||||
@ -4593,25 +4601,17 @@ static void CuvidDisplayHandlerThread(void)
|
||||
decoder->Closing = -1;
|
||||
}
|
||||
}
|
||||
|
||||
usleep(1000);
|
||||
|
||||
usleep(10 * 1000);
|
||||
continue;
|
||||
}
|
||||
decoded = 1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
if (!decoded) { // nothing decoded, sleep
|
||||
// FIXME: sleep on wakeup
|
||||
usleep(1 * 1000);
|
||||
}
|
||||
|
||||
|
||||
usleep(1000);
|
||||
|
||||
|
||||
// all decoder buffers are full
|
||||
// and display is not preempted
|
||||
// speed up filling display queue, wait on display queue empty
|
||||
|
Loading…
Reference in New Issue
Block a user