improve records cutting

This commit is contained in:
jojo61 2018-12-10 13:10:58 +01:00
parent b3d3caec41
commit 333b720c65
3 changed files with 317 additions and 367 deletions

101
codec.c
View File

@ -490,8 +490,9 @@ next_part:
if (ret >= 0) { // one is avail.
got_frame = 1;
}
else
else {
got_frame = 0;
}
if (got_frame) { // frame completed
#ifdef FFMPEG_WORKAROUND_ARTIFACTS
@ -1357,33 +1358,10 @@ int myavcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
if (!frame)
return AVERROR(ENOMEM);
#if 0
if (avctx->get_buffer != avcodec_default_get_buffer) {
av_log(avctx, AV_LOG_ERROR, "Custom get_buffer() for use with"
"avcodec_decode_audio3() detected.ar *) samples Overriding with avcodec_default_get_buffer\n");
av_log(avctx, AV_LOG_ERROR, "Please port your application to "
"avcodec_decode_audio4()\n");
avctx->get_buffer = avcodec_default_get_buffer;
avctx->release_buffer = avcodec_default_release_buffer;
}
#endif
ret = avcodec_decode_audio4(avctx, frame, &got_frame, avpkt);
if (ret >= 0 && got_frame) {
#if 0
int ch, plane_size;
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
frame->nb_samples,
avctx->sample_fmt, 1);
if (*frame_size_ptr < data_size) {
Debug(3, "output buffer size is too small for "
"the current frame (%d < %d)\n", *frame_size_ptr, data_size);
av_frame_free(&frame);
return AVERROR(EINVAL);
}
#endif
#if 1
int i,ch;
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
int data_size = av_get_bytes_per_sample(avctx->sample_fmt);
@ -1392,27 +1370,13 @@ int myavcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
fprintf(stderr, "Failed to calculate data size\n");
exit(1);
}
for (i=0; i<frame->nb_samples; i++)
for (i=0; i<frame->nb_samples; i++) {
for (ch=0; ch < avctx->channels; ch++) {
memcpy(samples,frame->extended_data[ch]+data_size*i,data_size);
samples = (char *) samples + data_size;
}
#endif
}
//Debug(3,"data_size %d nb_samples %d sample_fmt %d channels %d planar %d\n",data_size,frame->nb_samples,avctx->sample_fmt,avctx->channels,planar);
#if 0
memcpy(samples, frame->extended_data[0], plane_size);
// memcpy(samples, frame->data[0], plane_size);
if (planar && avctx->channels > 1) {
uint8_t *out = ((uint8_t *)samples) + plane_size;
for (ch = 1; ch < avctx->channels; ch++) {
memcpy(out, frame->extended_data[ch], plane_size);
// memcpy(out, frame->data[ch], plane_size);
out += plane_size;
}
}
#endif
*frame_size_ptr = data_size * avctx->channels * frame->nb_samples;
} else {
*frame_size_ptr = 0;
@ -1474,8 +1438,7 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
int outlen;
// FIXME: libav-0.7.2 crash here
outlen =
audio_resample(audio_decoder->ReSample, outbuf, buf, buf_sz);
outlen = audio_resample(audio_decoder->ReSample, outbuf, buf, buf_sz);
#ifdef DEBUG
if (outlen != buf_sz) {
Debug(3, "codec/audio: possible fixed ffmpeg\n");
@ -1495,57 +1458,7 @@ void CodecAudioDecode(AudioDecoder * audio_decoder, const AVPacket * avpkt)
if (CodecAudioPassthroughHelper(audio_decoder, avpkt)) {
return;
}
#if 0
//
// old experimental code
//
if (1) {
// FIXME: need to detect dts
// copy original data for output
// FIXME: buf is sint
buf[0] = 0x72;
buf[1] = 0xF8;
buf[2] = 0x1F;
buf[3] = 0x4E;
buf[4] = 0x00;
switch (avpkt->size) {
case 512:
buf[5] = 0x0B;
break;
case 1024:
buf[5] = 0x0C;
break;
case 2048:
buf[5] = 0x0D;
break;
default:
Debug(3,
"codec/audio: dts sample burst not supported\n");
buf[5] = 0x00;
break;
}
buf[6] = (avpkt->size * 8);
buf[7] = (avpkt->size * 8) >> 8;
//buf[8] = 0x0B;
//buf[9] = 0x77;
//printf("%x %x\n", avpkt->data[0],avpkt->data[1]);
// swab?
memcpy(buf + 8, avpkt->data, avpkt->size);
memset(buf + 8 + avpkt->size, 0, buf_sz - 8 - avpkt->size);
} else if (1) {
// FIXME: need to detect mp2
// FIXME: mp2 passthrough
// see softhddev.c version/layer
// 0x04 mpeg1 layer1
// 0x05 mpeg1 layer23
// 0x06 mpeg2 ext
// 0x07 mpeg2.5 layer 1
// 0x08 mpeg2.5 layer 2
// 0x09 mpeg2.5 layer 3
}
// DTS HD?
// True HD?
#endif
CodecAudioEnqueue(audio_decoder, buf, buf_sz);
}
}

View File

@ -2705,7 +2705,7 @@ void StillPicture(const uint8_t * data, int size)
#ifdef STILL_DEBUG
fprintf(stderr, "still-picture\n");
#endif
for (i = 0; i < (MyVideoStream->CodecID == AV_CODEC_ID_HEVC ? 8 : 12); ++i) {
for (i = 0; i < (MyVideoStream->CodecID == AV_CODEC_ID_HEVC ? 8 : 8); ++i) {
const uint8_t *split;
int n;
@ -2761,11 +2761,10 @@ void StillPicture(const uint8_t * data, int size)
}
// wait for empty buffers
for (i = 0; VideoGetBuffers(MyVideoStream) && i < 30; ++i) {
for (i = 0; VideoGetBuffers(MyVideoStream) && i < 50; ++i) {
usleep(10 * 1000);
}
Debug(3, "[softhddev]%s: buffers %d %dms\n", __FUNCTION__,
VideoGetBuffers(MyVideoStream), i * 10);
Debug(3, "[softhddev]%s: buffers %d %dms\n", __FUNCTION__, VideoGetBuffers(MyVideoStream), i * 10);
#ifdef STILL_DEBUG
InStillPicture = 0;
#endif

64
video.c
View File

@ -429,6 +429,7 @@ static float VideoColorBlindnessFaktor = 1.0f;
static xcb_atom_t WmDeleteWindowAtom; ///< WM delete message atom
static xcb_atom_t NetWmState; ///< wm-state message atom
static xcb_atom_t NetWmStateFullscreen; ///< fullscreen wm-state message atom
static xcb_atom_t NetWmStateAbove;
#ifdef DEBUG
extern uint32_t VideoSwitch; ///< ticks for channel switch
@ -2203,7 +2204,6 @@ createTextureDst(CuvidDecoder * decoder,int anz, unsigned int size_x, unsigned i
struct pl_tex *tex;
struct pl_image *img;
struct pl_plane *pl;
const float black[4] = { 0.0f,0.0f,0.0f,1.0f};
//printf("Create textures and planes %d %d\n",size_x,size_y);
Debug(3,"video/vulkan: create %d Textures Format %s w %d h %d \n",anz,PixFmt==AV_PIX_FMT_NV12?"NV12":"P010",size_x,size_y);
@ -2584,7 +2584,7 @@ static enum AVPixelFormat Cuvid_get_format(CuvidDecoder * decoder,
decoder->SurfacesNeeded = VIDEO_SURFACES_MAX + 1;
CuvidSetupOutput(decoder);
#ifdef PLACEBO // dont show first frame
// decoder->newchannel = 1;
decoder->newchannel = 1;
#endif
}
@ -3146,7 +3146,7 @@ extern void P016ToBgra32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int n
extern void ResizeNv12(unsigned char *dpDstNv12, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcNv12, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstNv12UV);
extern void ResizeP016(unsigned char *dpDstP016, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcP016, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstP016UV);
extern void cudaLaunchNV12toARGBDrv(uint32_t *d_srcNV12, size_t nSourcePitch,uint32_t *d_dstARGB, size_t nDestPitch,uint32_t width, uint32_t height,CUstream streamID);
void VideoSetAbove();
///
/// Render a ffmpeg frame.
///
@ -3250,6 +3250,7 @@ Debug(3,"fmt %02d:%02d width %d:%d hight %d:%d\n",decoder->ColorSpace,frame->co
av_frame_free(&output);
}
#endif
// copy to texture
generateCUDAImage(decoder,surface,frame,w,h,decoder->PixFmt==AV_PIX_FMT_NV12?1:2);
@ -3375,9 +3376,8 @@ static void CuvidMixVideo(CuvidDecoder * decoder, int level)
VkImage Image;
struct pl_image *img;
bool ok;
const float black[4] = { 0.0f,0.0f,0.0f,1.0f};
#endif
static int last;
int current;
VdpRect video_src_rect;
VdpRect dst_rect;
@ -3489,7 +3489,7 @@ static void CuvidMixVideo(CuvidDecoder * decoder, int level)
}
// Source crop
if (VideoScalerTest) { // right side defnied scaler
pl_tex_clear(p->gpu,target->fbo,black); // clear frame
pl_tex_clear(p->gpu,target->fbo,(float[4]){0}); // clear frame
img->src_rect.x0 = video_src_rect.x1/2+1;
img->src_rect.y0 = video_src_rect.y0;
img->src_rect.x1 = video_src_rect.x1;
@ -3550,7 +3550,7 @@ static void CuvidMixVideo(CuvidDecoder * decoder, int level)
colors.gamma = VideoGamma;
if (ovl) {
pl_tex_clear(p->gpu,target->fbo,black); // clear frame
pl_tex_clear(p->gpu,target->fbo,(float[4]){0}); // clear frame
target->overlays = ovl;
target->num_overlays = 1;
} else {
@ -3558,6 +3558,11 @@ static void CuvidMixVideo(CuvidDecoder * decoder, int level)
target->num_overlays = 0;
}
if (decoder->newchannel && current == 0 )
return;
decoder->newchannel = 0;
if (!pl_render_image(p->renderer, &decoder->pl_images[current], target, &render_params)) {
Fatal(_("Failed rendering frame!\n"));
}
@ -3606,7 +3611,7 @@ void make_osd_overlay(int x, int y, int width, int height) {
pl = &osdoverlay;
if (pl->plane.texture) {
// pl_tex_clear(p->gpu,pl->plane.texture,black);
pl_tex_clear(p->gpu,pl->plane.texture,(float[4]){0});
pl_tex_destroy(p->gpu,&pl->plane.texture);
}
@ -3624,7 +3629,7 @@ void make_osd_overlay(int x, int y, int width, int height) {
});
// make overlay
pl_tex_clear(p->gpu,pl->plane.texture,black);
pl_tex_clear(p->gpu,pl->plane.texture,(float[4]){0});
pl->plane.components = 4;
pl->plane.shift_x = 0.0f;
pl->plane.shift_y = 0.0f;
@ -3661,7 +3666,7 @@ static void CuvidDisplayFrame(void)
struct pl_swapchain_frame frame;
struct pl_render_target target;
bool ok;
const float black[4] = { 0.0f,0.0f,0.0f,1.0f};
#endif
@ -3758,6 +3763,7 @@ static void CuvidDisplayFrame(void)
}
#ifdef PLACEBO
if (OsdShown == 1) { // New OSD opened
// VideoSetAbove();
pthread_mutex_lock(&OSDMutex);
make_osd_overlay(OSDx,OSDy,OSDxsize,OSDysize);
if (posd) {
@ -3771,6 +3777,7 @@ static void CuvidDisplayFrame(void)
}
if (OsdShown == 2) {
// VideoSetAbove();
CuvidMixVideo(decoder, i, &target, &osdoverlay);
} else {
CuvidMixVideo(decoder, i, &target, NULL);
@ -4007,7 +4014,8 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder)
diff = video_clock - audio_clock - VideoAudioDelay;
diff = (decoder->LastAVDiff + diff) / 2;
decoder->LastAVDiff = diff;
//if (abs(diff/90) > 30)
// printf("Diff %d\n",diff/90);
if (abs(diff) > 5000 * 90) { // more than 5s
err = CuvidMessage(2, "video: audio/video difference too big\n");
} else if (diff > 100 * 90) {
@ -4027,7 +4035,7 @@ static void CuvidSyncDecoder(CuvidDecoder * decoder)
CuvidAdvanceDecoderFrame(decoder);
// filled = atomic_read(&decoder->SurfacesFilled);
// Debug(3,"hinter drop frame filled %d\n",atomic_read(&decoder->SurfacesFilled));
decoder->SyncCounter = 1;
decoder->SyncCounter = 1;;;
}
#if defined(DEBUG) || defined(AV_INFO)
if (!decoder->SyncCounter && decoder->StartCounter < 1000) {
@ -4273,7 +4281,7 @@ static void CuvidDisplayHandlerThread(void)
// all decoder buffers are full
// and display is not preempted
// speed up filling display queue, wait on display queue empty
if (!allfull) {
if (!allfull && !decoder->TrickSpeed) {
clock_gettime(CLOCK_MONOTONIC, &nowtime);
// time for one frame over?
if ((nowtime.tv_sec - CuvidFrameTime.tv_sec) * 1000 * 1000 * 1000 +
@ -5745,6 +5753,14 @@ static void VideoCreateWindow(xcb_window_t parent, xcb_visualid_t visual, uint8_
free(reply);
}
if ((reply =
xcb_intern_atom_reply(Connection, xcb_intern_atom(Connection, 0,
sizeof("_NET_WM_STATE_ABOVE") - 1,
"_NET_WM_STATE_ABOVE"), NULL))) {
NetWmStateAbove = reply->atom;
free(reply);
}
xcb_map_window(Connection, VideoWindow);
xcb_flush(Connection);
@ -6080,6 +6096,7 @@ void VideoSetFullscreen(int onoff)
event.data.data32[0] = XCB_EWMH_WM_STATE_REMOVE;
}
event.data.data32[1] = NetWmStateFullscreen;
event.data.data32[2] = NetWmStateAbove;
xcb_send_event(Connection, XCB_SEND_EVENT_DEST_POINTER_WINDOW,
DefaultRootWindow(XlibDisplay),
@ -6090,6 +6107,27 @@ void VideoSetFullscreen(int onoff)
}
}
void VideoSetAbove()
{
if (XlibDisplay) { // needs running connection
xcb_client_message_event_t event;
memset(&event, 0, sizeof(event));
event.response_type = XCB_CLIENT_MESSAGE;
event.format = 32;
event.window = VideoWindow;
event.type = NetWmState;
event.data.data32[0] = XCB_EWMH_WM_STATE_ADD;
event.data.data32[1] = NetWmStateAbove;
xcb_send_event(Connection, XCB_SEND_EVENT_DEST_POINTER_WINDOW,
DefaultRootWindow(XlibDisplay),
XCB_EVENT_MASK_SUBSTRUCTURE_NOTIFY |
XCB_EVENT_MASK_SUBSTRUCTURE_REDIRECT, (void *)&event);
Debug(3, "video/x11: send fullscreen message %x %x\n",
event.data.data32[0], event.data.data32[1]);
}
}
///
/// Set deinterlace mode.
///