X-Git-Url: http://git.cinelerra-gg.org/git/?a=blobdiff_plain;f=cinelerra-5.0%2Fcinelerra%2Fffmpeg.C;h=aa7a9f1f396827759404a688d94bf815c1e23d7d;hb=77e208bf58ba7980f9d5aefb7ffb70ba8af5ee5a;hp=efb11be359a0bc4018000ed7784b3ad1f2bb5385;hpb=fa7f91658c01ba88aab006beff8b167a4bbb7085;p=goodguy%2Fhistory.git diff --git a/cinelerra-5.0/cinelerra/ffmpeg.C b/cinelerra-5.0/cinelerra/ffmpeg.C index efb11be3..aa7a9f1f 100644 --- a/cinelerra-5.0/cinelerra/ffmpeg.C +++ b/cinelerra-5.0/cinelerra/ffmpeg.C @@ -12,9 +12,11 @@ #ifndef INT64_MAX #define INT64_MAX 9223372036854775807LL #endif +#define MAX_RETRY 1000 #include "asset.h" #include "bccmodels.h" +#include "bchash.h" #include "fileffmpeg.h" #include "file.h" #include "ffmpeg.h" @@ -349,7 +351,7 @@ int FFStream::read_packet() int FFStream::decode(AVFrame *frame) { int ret = 0; - int retries = 1000; + int retries = MAX_RETRY; int got_frame = 0; while( ret >= 0 && !flushed && --retries >= 0 && !got_frame ) { @@ -364,7 +366,7 @@ int FFStream::decode(AVFrame *frame) ipkt->data += ret; ipkt->size -= ret; } - retries = 1000; + retries = MAX_RETRY; } if( !got_frame ) { need_packet = 1; @@ -459,33 +461,39 @@ int FFStream::seek(int64_t no, double rate) if( n < 0 ) n = 0; pos = n; plmt = marks[i].pos; - npkts = 1000; + npkts = MAX_RETRY; } } double secs = pos / rate; - int64_t tstmp = secs * st->time_base.den / st->time_base.num; + int64_t pkt_ts, tstmp = secs * st->time_base.den / st->time_base.num; if( nudge != AV_NOPTS_VALUE ) tstmp += nudge; - if( avformat_seek_file(fmt_ctx, st->index, - -INT64_MAX, tstmp, INT64_MAX, AVSEEK_FLAG_ANY) < 0 ) return -1; - avcodec_flush_buffers(st->codec); - need_packet = 0; flushed = 0; - seeked = 1; st_eof(0); - int64_t pkt_ts = AV_NOPTS_VALUE; - int ret = 1, retry = 1000; - + int ret = avformat_seek_file(fmt_ctx, st->index, + -INT64_MAX, tstmp, INT64_MAX, AVSEEK_FLAG_ANY); + if( ret >= 0 ) { + avcodec_flush_buffers(st->codec); + need_packet = 0; flushed = 0; + seeked = 1; st_eof(0); // read up to retry packets, limited to npkts in stream, and not past pkt.pos plmt - while( ret > 0 && npkts > 0 && --retry >= 0 ) { - if( (ret=read_packet()) <= 0 ) break; - if( ipkt->stream_index != st->index ) continue; - if( (pkt_ts=ipkt->dts) == AV_NOPTS_VALUE ) pkt_ts = ipkt->pts; - if( pkt_ts != AV_NOPTS_VALUE && pkt_ts >= tstmp ) break; - if( plmt >= 0 && ipkt->pos >= plmt ) break; - --npkts; + for( int retry=MAX_RETRY; ret>=0 && --retry>=0; ) { + if( read_packet() <= 0 || ( plmt >= 0 && ipkt->pos > plmt ) ) { + ret = -1; break; + } + if( ipkt->stream_index != st->index ) continue; + if( --npkts <= 0 ) break; + if( (pkt_ts=ipkt->dts) == AV_NOPTS_VALUE && + (pkt_ts=ipkt->pts) == AV_NOPTS_VALUE ) continue; + if( pkt_ts >= tstmp ) break; + } } - - if( ret <= 0 || retry < 0 ) return -1; + if( ret < 0 ) { +//printf("** seek fail %ld, %ld\n", pos, tstmp); + seeked = need_packet = 0; + st_eof(flushed=1); + return -1; + } +//printf("seeked pos = %ld, %ld\n", pos, tstmp); seek_pos = curr_pos = pos; - return npkts > 0 ? 1 : 0; + return 0; } FFAudioStream::FFAudioStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx) @@ -627,7 +635,7 @@ int FFAudioStream::load(int64_t pos, int len) if( mbsz < len ) mbsz = len; int64_t end_pos = pos + len; int ret = 0; - for( int i=0; ret>=0 && !flushed && curr_pos=0 && !flushed && curr_pos 0 ) { load_history(&frame->extended_data[0], frame->nb_samples); @@ -710,12 +718,10 @@ FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx) frame_rate = 0; aspect_ratio = 0; length = 0; - convert_ctx = 0; } FFVideoStream::~FFVideoStream() { - if( convert_ctx ) sws_freeContext(convert_ctx); } int FFVideoStream::decode_frame(AVPacket *pkt, AVFrame *frame, int &got_frame) @@ -743,11 +749,11 @@ int FFVideoStream::load(VFrame *vframe, int64_t pos) fprintf(stderr, "FFVideoStream::load: av_frame_alloc failed\n"); return -1; } - for( int i=0; ret>=0 && !flushed && curr_pos<=pos && i<1000; ++i ) { + for( int i=0; ret>=0 && !flushed && curr_pos<=pos && i 0 ) ++curr_pos; } - if( ret > 0 ) { + if( ret >= 0 ) { AVCodecContext *ctx = st->codec; ret = convert_cmodel(vframe, (AVPicture *)frame, ctx->pix_fmt, ctx->width, ctx->height); @@ -816,7 +822,7 @@ int FFVideoStream::encode_frame(AVPacket *pkt, AVFrame *frame, int &got_packet) return ret; } -PixelFormat FFVideoStream::color_model_to_pix_fmt(int color_model) +PixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model) { switch( color_model ) { case BC_YUV422: return AV_PIX_FMT_YUYV422; @@ -837,7 +843,7 @@ PixelFormat FFVideoStream::color_model_to_pix_fmt(int color_model) return AV_PIX_FMT_NB; } -int FFVideoStream::pix_fmt_to_color_model(PixelFormat pix_fmt) +int FFVideoConvert::pix_fmt_to_color_model(PixelFormat pix_fmt) { switch (pix_fmt) { case AV_PIX_FMT_YUYV422: return BC_YUV422; @@ -858,7 +864,7 @@ int FFVideoStream::pix_fmt_to_color_model(PixelFormat pix_fmt) return BC_TRANSPARENCY; } -int FFVideoStream::convert_picture_vframe(VFrame *frame, +int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVPicture *ip, PixelFormat ifmt, int iw, int ih) { AVPicture opic; @@ -885,20 +891,20 @@ int FFVideoStream::convert_picture_vframe(VFrame *frame, convert_ctx = sws_getCachedContext(convert_ctx, iw, ih, ifmt, frame->get_w(), frame->get_h(), ofmt, SWS_BICUBIC, NULL, NULL, NULL); if( !convert_ctx ) { - fprintf(stderr, "FFVideoStream::convert_picture_frame:" + fprintf(stderr, "FFVideoConvert::convert_picture_frame:" " sws_getCachedContext() failed\n"); - return 1; + return -1; } int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ih, opic.data, opic.linesize); if( ret < 0 ) { - ff_err(ret, "FFVideoStream::convert_picture_frame: sws_scale() failed\n"); - return 1; + ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\n"); + return -1; } return 0; } -int FFVideoStream::convert_cmodel(VFrame *frame, +int FFVideoConvert::convert_cmodel(VFrame *frame, AVPicture *ip, PixelFormat ifmt, int iw, int ih) { // try direct transfer @@ -922,7 +928,22 @@ int FFVideoStream::convert_cmodel(VFrame *frame, return 1; } -int FFVideoStream::convert_vframe_picture(VFrame *frame, +int FFVideoConvert::transfer_cmodel(VFrame *frame, + AVFrame *ifp, PixelFormat ifmt, int iw, int ih) +{ + int ret = convert_cmodel(frame, (AVPicture *)ifp, ifmt, iw, ih); + if( ret > 0 ) { + const AVDictionary *src = av_frame_get_metadata(ifp); + AVDictionaryEntry *t = NULL; + BC_Hash *hp = frame->get_params(); + //hp->clear(); + while( (t=av_dict_get(src, "", t, AV_DICT_IGNORE_SUFFIX)) ) + hp->update(t->key, t->value); + } + return ret; +} + +int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVPicture *op, PixelFormat ofmt, int ow, int oh) { AVPicture opic; @@ -949,24 +970,24 @@ int FFVideoStream::convert_vframe_picture(VFrame *frame, convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(), ifmt, ow, oh, ofmt, SWS_BICUBIC, NULL, NULL, NULL); if( !convert_ctx ) { - fprintf(stderr, "FFVideoStream::convert_frame_picture:" + fprintf(stderr, "FFVideoConvert::convert_frame_picture:" " sws_getCachedContext() failed\n"); - return 1; + return -1; } int ret = sws_scale(convert_ctx, opic.data, opic.linesize, 0, frame->get_h(), op->data, op->linesize); if( ret < 0 ) { - ff_err(ret, "FFVideoStream::convert_frame_picture: sws_scale() failed\n"); - return 1; + ff_err(ret, "FFVideoConvert::convert_frame_picture: sws_scale() failed\n"); + return -1; } return 0; } -int FFVideoStream::convert_pixfmt(VFrame *frame, +int FFVideoConvert::convert_pixfmt(VFrame *frame, AVPicture *op, PixelFormat ofmt, int ow, int oh) { // try direct transfer - if( !convert_vframe_picture(frame, op, ofmt, ow, oh) ) return 0; + if( !convert_vframe_picture(frame, op, ofmt, ow, oh) ) return 1; // use indirect transfer int colormodel = frame->get_color_model(); int bits = BC_CModels::calculate_pixelsize(colormodel) * 8; @@ -976,8 +997,24 @@ int FFVideoStream::convert_pixfmt(VFrame *frame, (bits > 8 ? BC_RGB161616: BC_RGB888) ; VFrame vframe(frame->get_w(), frame->get_h(), icolor_model); vframe.transfer_from(frame); - if( convert_vframe_picture(&vframe, op, ofmt, ow, oh) ) return 1; - return 0; + if( !convert_vframe_picture(&vframe, op, ofmt, ow, oh) ) return 1; + return -1; +} + +int FFVideoConvert::transfer_pixfmt(VFrame *frame, + AVFrame *ofp, PixelFormat ofmt, int ow, int oh) +{ + int ret = convert_pixfmt(frame, (AVPicture *)ofp, ofmt, ow, oh); + if( ret > 0 ) { + BC_Hash *hp = frame->get_params(); + AVDictionary **dict = avpriv_frame_get_metadatap(ofp); + //av_dict_free(dict); + for( int i=0; isize(); ++i ) { + char *key = hp->get_key(i), *val = hp->get_value(i); + av_dict_set(dict, key, val, 0); + } + } + return ret; } void FFVideoStream::load_markers() @@ -1224,9 +1261,9 @@ int FFMPEG::read_options(FILE *fp, const char *options, AVDictionary *&opts) if( !ret ) { if( !strcmp(key, "duration") ) opt_duration = strtod(val, 0); - if( !strcmp(key, "video_filter") ) + else if( !strcmp(key, "video_filter") ) opt_video_filter = cstrdup(val); - if( !strcmp(key, "audio_filter") ) + else if( !strcmp(key, "audio_filter") ) opt_audio_filter = cstrdup(val); else if( !strcmp(key, "loglevel") ) set_loglevel(val); @@ -1857,7 +1894,6 @@ int FFMPEG::audio_seek(int stream, int64_t pos) { int aidx = astrm_index[stream].st_idx; FFAudioStream *aud = ffaudio[aidx]; - pos = pos * aud->sample_rate / file_base->asset->sample_rate + 0.5; aud->audio_seek(pos); return 0; } @@ -1866,7 +1902,6 @@ int FFMPEG::video_seek(int stream, int64_t pos) { int vidx = vstrm_index[stream].st_idx; FFVideoStream *vid = ffvideo[vidx]; - pos = pos * vid->frame_rate / file_base->asset->frame_rate + 0.5; vid->video_seek(pos); return 0; } @@ -1877,7 +1912,6 @@ int FFMPEG::decode(int chn, int64_t pos, double *samples, int len) if( !has_audio || chn >= astrm_index.size() ) return -1; int aidx = astrm_index[chn].st_idx; FFAudioStream *aud = ffaudio[aidx]; - pos = pos * aud->sample_rate / file_base->asset->sample_rate + 0.5; if( aud->load(pos, len) < len ) return -1; int ch = astrm_index[chn].st_ch; int ret = aud->read(samples,len,ch); @@ -1889,7 +1923,6 @@ int FFMPEG::decode(int layer, int64_t pos, VFrame *vframe) if( !has_video || layer >= vstrm_index.size() ) return -1; int vidx = vstrm_index[layer].st_idx; FFVideoStream *vid = ffvideo[vidx]; - pos = pos * vid->frame_rate / file_base->asset->frame_rate + 0.5; return vid->load(vframe, pos); } @@ -2214,7 +2247,7 @@ int FFVideoStream::create_filter(const char *filter_spec, snprintf(args, sizeof(args), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", src_ctx->width, src_ctx->height, src_ctx->pix_fmt, - st->time_base.num, st->time_base.den, + src_ctx->time_base.num, src_ctx->time_base.den, src_ctx->sample_aspect_ratio.num, src_ctx->sample_aspect_ratio.den); if( ret >= 0 ) ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", @@ -2248,7 +2281,7 @@ int FFAudioStream::create_filter(const char *filter_spec, int ret = 0; char args[BCTEXTLEN]; snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%jx", - st->time_base.num, st->time_base.den, src_ctx->sample_rate, + src_ctx->time_base.num, src_ctx->time_base.den, src_ctx->sample_rate, av_get_sample_fmt_name(src_ctx->sample_fmt), src_ctx->channel_layout); if( ret >= 0 ) ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",