X-Git-Url: https://git.cinelerra-gg.org/git/?p=goodguy%2Fcinelerra.git;a=blobdiff_plain;f=cinelerra-5.1%2Fcinelerra%2Fffmpeg.C;h=df39763c3e30033db7c0d640d96d2d22a0c745f2;hp=b3915cfc44de133644e71180239b519170199d2d;hb=97ed925c86313b57c13a2db0fb9aa48822fe76ba;hpb=6a85ddeaab7b4a87cffb57f105b7a5a96a6e2ff4 diff --git a/cinelerra-5.1/cinelerra/ffmpeg.C b/cinelerra-5.1/cinelerra/ffmpeg.C index b3915cfc..df39763c 100644 --- a/cinelerra-5.1/cinelerra/ffmpeg.C +++ b/cinelerra-5.1/cinelerra/ffmpeg.C @@ -270,7 +270,8 @@ FFStream::FFStream(FFMPEG *ffmpeg, AVStream *st, int fidx) frm_count = 0; nudge = AV_NOPTS_VALUE; seek_pos = curr_pos = 0; - seeked = 1; eof = 0; + seeking = 0; seeked = 1; + eof = 0; reading = writing = 0; hw_pixfmt = AV_PIX_FMT_NONE; hw_device_ctx = 0; @@ -714,6 +715,7 @@ int FFStream::seek(int64_t no, double rate) } } if( pos == curr_pos ) return 0; + seeking = -1; double secs = pos < 0 ? 0. : pos / rate; AVRational time_base = st->time_base; int64_t tstmp = time_base.num > 0 ? secs * time_base.den/time_base.num : 0; @@ -1211,6 +1213,39 @@ int FFVideoStream::decode_frame(AVFrame *frame) return 1; } +int FFVideoStream::probe(int64_t pos) +{ + int ret = video_seek(pos); + if( ret < 0 ) return -1; + if( !frame && !(frame=av_frame_alloc()) ) { + fprintf(stderr, "FFVideoStream::probe: av_frame_alloc failed\n"); + return -1; + } + + if (ffmpeg->interlace_from_codec) return 1; + + ret = read_frame(frame); + if( ret > 0 ) { + //printf("codec interlace: %i \n",frame->interlaced_frame); + //printf("codec tff: %i \n",frame->top_field_first); + + if (!frame->interlaced_frame) + ffmpeg->interlace_from_codec = AV_FIELD_PROGRESSIVE; + if ((frame->interlaced_frame) && (frame->top_field_first)) + ffmpeg->interlace_from_codec = AV_FIELD_TT; + if ((frame->interlaced_frame) && (!frame->top_field_first)) + ffmpeg->interlace_from_codec = AV_FIELD_BB; + //printf("Interlace mode from codec: %i\n", ffmpeg->interlace_from_codec); + + } + + if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 ) + ret = -1; + + ret = ret > 0 ? 1 : ret < 0 ? -1 : 0; + return ret; +} + int FFVideoStream::load(VFrame *vframe, int64_t pos) { int ret = video_seek(pos); @@ -1219,11 +1254,39 @@ int FFVideoStream::load(VFrame *vframe, int64_t pos) fprintf(stderr, "FFVideoStream::load: av_frame_alloc failed\n"); return -1; } + + int i = MAX_RETRY + pos - curr_pos; + int64_t cache_start = 0; while( ret>=0 && !flushed && curr_pos<=pos && --i>=0 ) { ret = read_frame(frame); - if( ret > 0 ) ++curr_pos; + if( ret > 0 ) { + if( frame->key_frame && seeking < 0 ) { + int use_cache = ffmpeg->get_use_cache(); + if( use_cache < 0 ) { +// for reverse read, reload file frame_cache from keyframe to pos + ffmpeg->purge_cache(); + int count = preferences->cache_size / + vframe->get_data_size() / 2; // try to burn only 1/2 of cache + cache_start = pos - count + 1; + seeking = 1; + } + else + seeking = 0; + } + if( seeking > 0 && curr_pos >= cache_start && curr_pos < pos ) { + int vw =vframe->get_w(), vh = vframe->get_h(); + int vcolor_model = vframe->get_color_model(); +// do not use shm here, puts too much pressure on 32bit systems + VFrame *cache_frame = new VFrame(vw, vh, vcolor_model, 0); + ret = convert_cmodel(cache_frame, frame); + if( ret > 0 ) + ffmpeg->put_cache_frame(cache_frame, curr_pos); + } + ++curr_pos; + } } + seeking = 0; if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 ) ret = -1; if( ret >= 0 ) { @@ -1363,6 +1426,7 @@ int FFVideoStream::encode(VFrame *vframe) int FFVideoStream::drain() { + return 0; } @@ -1748,6 +1812,7 @@ FFMPEG::FFMPEG(FileBase *file_base) flow = 1; decoding = encoding = 0; has_audio = has_video = 0; + interlace_from_codec = 0; opts = 0; opt_duration = -1; opt_video_filter = 0; @@ -2217,6 +2282,21 @@ int FFMPEG::scan_options(const char *options, AVDictionary *&opts, AVStream *st) return ret; } +void FFMPEG::put_cache_frame(VFrame *frame, int64_t position) +{ + file_base->file->put_cache_frame(frame, position, 0); +} + +int FFMPEG::get_use_cache() +{ + return file_base->file->get_use_cache(); +} + +void FFMPEG::purge_cache() +{ + file_base->file->purge_cache(); +} + FFCodecRemap::FFCodecRemap() { old_codec = 0; @@ -2368,6 +2448,10 @@ int FFMPEG::info(char *text, int len) AVPixelFormat pix_fmt = (AVPixelFormat)st->codecpar->format; const char *pfn = av_get_pix_fmt_name(pix_fmt); report(" pix %s\n", pfn ? pfn : unkn); + int interlace = st->codecpar->field_order; + report(" interlace (container level): %i\n", interlace ? interlace : -1); + int interlace_codec = interlace_from_codec; + report(" interlace (codec level): %i\n", interlace_codec ? interlace_codec : -1); enum AVColorSpace space = st->codecpar->color_space; const char *nm = av_color_space_name(space); report(" color space:%s", nm ? nm : unkn); @@ -2846,6 +2930,25 @@ int FFMPEG::open_encoder(const char *type, const char *spec) vid->interlaced = asset->interlace_mode == ILACE_MODE_TOP_FIRST || asset->interlace_mode == ILACE_MODE_BOTTOM_FIRST ? 1 : 0; vid->top_field_first = asset->interlace_mode == ILACE_MODE_TOP_FIRST ? 1 : 0; + switch (asset->interlace_mode) { + case ILACE_MODE_TOP_FIRST: + if (ctx->codec->id == AV_CODEC_ID_MJPEG) + av_dict_set(&sopts, "field_order", "tt", 0); + else + av_dict_set(&sopts, "field_order", "tb", 0); + if (ctx->codec_id != AV_CODEC_ID_MJPEG) + av_dict_set(&sopts, "flags", "+ilme+ildct", 0); + break; + case ILACE_MODE_BOTTOM_FIRST: + if (ctx->codec->id == AV_CODEC_ID_MJPEG) + av_dict_set(&sopts, "field_order", "bb", 0); + else + av_dict_set(&sopts, "field_order", "bt", 0); + if (ctx->codec_id != AV_CODEC_ID_MJPEG) + av_dict_set(&sopts, "flags", "+ilme+ildct", 0); + break; + case ILACE_MODE_NOTINTERLACED: av_dict_set(&sopts, "field_order", "progressive", 0); break; + } break; } default: eprintf(_("not audio/video, %s:%s\n"), codec_name, filename); @@ -3138,6 +3241,33 @@ int FFMPEG::audio_seek(int stream, int64_t pos) return 0; } +int FFMPEG::video_probe(int64_t pos) +{ + int vidx = vstrm_index[0].st_idx; + FFVideoStream *vid = ffvideo[vidx]; + vid->probe(pos); + + int interlace1 = interlace_from_codec; + //printf("interlace from codec: %i\n", interlace1); + + switch (interlace1) + { + case AV_FIELD_TT: + case AV_FIELD_TB: + return ILACE_MODE_TOP_FIRST; + case AV_FIELD_BB: + case AV_FIELD_BT: + return ILACE_MODE_BOTTOM_FIRST; + case AV_FIELD_PROGRESSIVE: + return ILACE_MODE_NOTINTERLACED; + default: + return ILACE_MODE_UNDETECTED; + } + +} + + + int FFMPEG::video_seek(int stream, int64_t pos) { int vidx = vstrm_index[stream].st_idx; @@ -3425,7 +3555,20 @@ int FFMPEG::ff_coded_height(int stream) float FFMPEG::ff_aspect_ratio(int stream) { - return ffvideo[stream]->aspect_ratio; + //return ffvideo[stream]->aspect_ratio; + AVFormatContext *fmt_ctx = ffvideo[stream]->fmt_ctx; + AVStream *strm = ffvideo[stream]->st; + AVCodecParameters *par = ffvideo[stream]->st->codecpar; + AVRational dar; + AVRational sar = av_guess_sample_aspect_ratio(fmt_ctx, strm, NULL); + if (sar.num) { + av_reduce(&dar.num, &dar.den, + par->width * sar.num, + par->height * sar.den, + 1024*1024); + return av_q2d(dar); + } + return ffvideo[stream]->aspect_ratio; } const char* FFMPEG::ff_video_codec(int stream) @@ -3466,6 +3609,30 @@ int FFMPEG::ff_video_mpeg_color_range(int stream) return ffvideo[stream]->st->codecpar->color_range == AVCOL_RANGE_MPEG ? 1 : 0; } +int FFMPEG::ff_interlace(int stream) +{ +// https://ffmpeg.org/doxygen/trunk/structAVCodecParserContext.html +/* reads from demuxer because codec frame not ready */ + int interlace0 = ffvideo[stream]->st->codecpar->field_order; + + switch (interlace0) + { + case AV_FIELD_TT: + case AV_FIELD_TB: + return ILACE_MODE_TOP_FIRST; + case AV_FIELD_BB: + case AV_FIELD_BT: + return ILACE_MODE_BOTTOM_FIRST; + case AV_FIELD_PROGRESSIVE: + return ILACE_MODE_NOTINTERLACED; + default: + return ILACE_MODE_UNDETECTED; + } + +} + + + int FFMPEG::ff_cpus() { return !file_base ? 1 : file_base->file->cpus; @@ -3961,6 +4128,9 @@ double FFMPEG::get_initial_timecode(int data_type, int channel, double frame_rat fidx = aud->fidx; nudge = aud->nudge; st = aud->st; + AVDictionaryEntry *tref = av_dict_get(fmt_ctx->metadata, "time_reference", 0, 0); + if( tref && aud && aud->sample_rate ) + return strtod(tref->value, 0) / aud->sample_rate; break; } case TRACK_VIDEO: { codec_type = AVMEDIA_TYPE_VIDEO;