add MatN appimage build and get rid of some compile warnings
[goodguy/cinelerra.git] / cinelerra-5.1 / cinelerra / ffmpeg.C
index 794added52ba6e9519ccd06f45f96dec2ad6de0f..df39763c3e30033db7c0d640d96d2d22a0c745f2 100644 (file)
@@ -263,18 +263,22 @@ FFStream::FFStream(FFMPEG *ffmpeg, AVStream *st, int fidx)
        fmt_ctx = 0;
        avctx = 0;
        filter_graph = 0;
+       filt_ctx = 0;
+       filt_id = 0;
        buffersrc_ctx = 0;
        buffersink_ctx = 0;
        frm_count = 0;
        nudge = AV_NOPTS_VALUE;
        seek_pos = curr_pos = 0;
-       seeked = 1;  eof = 0;
+       seeking = 0; seeked = 1;
+       eof = 0;
        reading = writing = 0;
        hw_pixfmt = AV_PIX_FMT_NONE;
        hw_device_ctx = 0;
        flushed = 0;
        need_packet = 1;
        frame = fframe = 0;
+       probe_frame = 0;
        bsfc = 0;
        stats_fp = 0;
        stats_filename = 0;
@@ -284,6 +288,7 @@ FFStream::FFStream(FFMPEG *ffmpeg, AVStream *st, int fidx)
 
 FFStream::~FFStream()
 {
+       frm_lock->lock("FFStream::~FFStream");
        if( reading > 0 || writing > 0 ) avcodec_close(avctx);
        if( avctx ) avcodec_free_context(&avctx);
        if( fmt_ctx ) avformat_close_input(&fmt_ctx);
@@ -293,6 +298,8 @@ FFStream::~FFStream()
        if( filter_graph ) avfilter_graph_free(&filter_graph);
        if( frame ) av_frame_free(&frame);
        if( fframe ) av_frame_free(&fframe);
+       if( probe_frame ) av_frame_free(&probe_frame);
+       frm_lock->unlock();
        delete frm_lock;
        if( stats_fp ) fclose(stats_fp);
        if( stats_in ) av_freep(&stats_in);
@@ -333,12 +340,35 @@ int FFStream::encode_activate()
        return writing;
 }
 
+// this is a global parameter that really should be in the context
 static AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE; // protected by ff_lock
+
+// goofy maneuver to attach a hw_format to an av_context
+#define GET_HW_PIXFMT(fn, fmt) \
+static AVPixelFormat get_hw_##fn(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts) { \
+       return fmt; \
+}
+GET_HW_PIXFMT(vaapi, AV_PIX_FMT_VAAPI)
+GET_HW_PIXFMT(vdpau, AV_PIX_FMT_VDPAU)
+GET_HW_PIXFMT(cuda,  AV_PIX_FMT_CUDA)
+GET_HW_PIXFMT(nv12,  AV_PIX_FMT_NV12)
+
 static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
                        const enum AVPixelFormat *pix_fmts)
 {
-       for( const enum AVPixelFormat *p=pix_fmts; *p!=AV_PIX_FMT_NONE; ++p )
-               if( *p == hw_pix_fmt ) return *p;
+       for( const enum AVPixelFormat *p=pix_fmts; *p!=AV_PIX_FMT_NONE; ++p ) {
+               if( *p != hw_pix_fmt ) continue;
+               switch( *p ) {
+               case AV_PIX_FMT_VAAPI: ctx->get_format = get_hw_vaapi; return *p;
+               case AV_PIX_FMT_VDPAU: ctx->get_format = get_hw_vdpau; return *p;
+               case AV_PIX_FMT_CUDA:  ctx->get_format = get_hw_cuda;  return *p;
+               case AV_PIX_FMT_NV12:  ctx->get_format = get_hw_nv12;  return *p;
+               default:
+                       fprintf(stderr, "Unknown HW surface format: %s\n",
+                               av_get_pix_fmt_name(*p));
+                       continue;
+               }
+       }
        fprintf(stderr, "Failed to get HW surface format.\n");
        return hw_pix_fmt = AV_PIX_FMT_NONE;
 }
@@ -396,7 +426,6 @@ int FFStream::decode_activate()
                        }
                        if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
                                ret = decode_hw_format(decoder, hw_type);
-                               if( !ret ) hw_type = AV_HWDEVICE_TYPE_NONE;
                        }
                        if( ret >= 0 ) {
                                avcodec_parameters_to_context(avctx, st->codecpar);
@@ -404,35 +433,35 @@ int FFStream::decode_activate()
                                        avctx->thread_count = ffmpeg->ff_cpus();
                                ret = avcodec_open2(avctx, decoder, &copts);
                        }
+                       AVFrame *hw_frame = 0;
                        if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
-                               if( need_packet ) {
-                                       need_packet = 0;
-                                       ret = read_packet();
-                               }
-                               if( ret >= 0 ) {
-                                       AVPacket *pkt = (AVPacket*)ipkt;
-                                       ret = avcodec_send_packet(avctx, pkt);
-                                       if( ret < 0 || hw_pix_fmt == AV_PIX_FMT_NONE ) {
-                                               ff_err(ret, "HW device init failed, using SW decode.\nfile:%s\n",
-                                                       ffmpeg->fmt_ctx->url);
-                                               avcodec_close(avctx);
-                                               avcodec_free_context(&avctx);
-                                               av_buffer_unref(&hw_device_ctx);
-                                               hw_device_ctx = 0;
-                                               hw_type = AV_HWDEVICE_TYPE_NONE;
-                                               int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
-                                               int idx = st->index;
-                                               av_seek_frame(fmt_ctx, idx, INT64_MIN, flags);
-                                               need_packet = 1;  flushed = 0;
-                                               seeked = 1;  st_eof(0);
-                                               ret = 0;
-                                               continue;
-                                       }
+                               if( !(hw_frame=av_frame_alloc()) ) {
+                                       fprintf(stderr, "FFStream::decode_activate: av_frame_alloc failed\n");
+                                       ret = AVERROR(ENOMEM);
                                }
+                               if( ret >= 0 )
+                                       ret = decode(hw_frame);
                        }
-                       if( ret >= 0 ) {
-                               reading = 1;
+                       if( ret < 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
+                               ff_err(ret, "HW device init failed, using SW decode.\nfile:%s\n",
+                                       ffmpeg->fmt_ctx->url);
+                               avcodec_close(avctx);
+                               avcodec_free_context(&avctx);
+                               av_buffer_unref(&hw_device_ctx);
+                               hw_device_ctx = 0;
+                               av_frame_free(&hw_frame);
+                               hw_type = AV_HWDEVICE_TYPE_NONE;
+                               int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
+                               int idx = st->index;
+                               av_seek_frame(fmt_ctx, idx, 0, flags);
+                               need_packet = 1;  flushed = 0;
+                               seeked = 1;  st_eof(0);
+                               ret = 0;
+                               continue;
                        }
+                       probe_frame = hw_frame;
+                       if( ret >= 0 )
+                               reading = 1;
                        else
                                eprintf(_("open decoder failed\n"));
                }
@@ -460,9 +489,14 @@ int FFStream::read_packet()
 
 int FFStream::decode(AVFrame *frame)
 {
+       if( probe_frame ) { // hw probe reads first frame
+               av_frame_ref(frame, probe_frame);
+               av_frame_free(&probe_frame);
+               return 1;
+       }
        int ret = 0;
        int retries = MAX_RETRY;
-
+       frm_lock->lock("FFStream::decode");
        while( ret >= 0 && !flushed && --retries >= 0 ) {
                if( need_packet ) {
                        if( (ret=read_packet()) < 0 ) break;
@@ -485,6 +519,7 @@ int FFStream::decode(AVFrame *frame)
                        flushed = st_eof();
                }
        }
+       frm_lock->unlock();
 
        if( retries < 0 ) {
                fprintf(stderr, "FFStream::decode: Retry limit\n");
@@ -680,6 +715,7 @@ int FFStream::seek(int64_t no, double rate)
                }
        }
        if( pos == curr_pos ) return 0;
+       seeking = -1;
        double secs = pos < 0 ? 0. : pos / rate;
        AVRational time_base = st->time_base;
        int64_t tstmp = time_base.num > 0 ? secs * time_base.den/time_base.num : 0;
@@ -697,7 +733,8 @@ int FFStream::seek(int64_t no, double rate)
        tstmp = av_rescale_q(tstmp, time_base, AV_TIME_BASE_Q);
        idx = -1;
 #endif
-
+       frm_lock->lock("FFStream::seek");
+       av_frame_free(&probe_frame);
        avcodec_flush_buffers(avctx);
        avformat_flush(fmt_ctx);
 #if 0
@@ -729,7 +766,9 @@ int FFStream::seek(int64_t no, double rate)
                        if( pkt_ts >= tstmp ) break;
                }
                if( retry < 0 ) {
-                       fprintf(stderr,"FFStream::seek: retry limit, pos=%jd tstmp=%jd\n",pos,tstmp);
+                       ff_err(AVERROR(EIO), "FFStream::seek: %s\n"
+                               " retry limit, pos=%jd tstmp=%jd, ",
+                               ffmpeg->fmt_ctx->url, pos, tstmp);
                        ret = -1;
                }
                if( ret < 0 ) break;
@@ -743,6 +782,7 @@ int FFStream::seek(int64_t no, double rate)
                        break;
                }
        }
+       frm_lock->unlock();
        if( ret < 0 ) {
 printf("** seek fail %jd, %jd\n", pos, tstmp);
                seeked = need_packet = 0;
@@ -1010,19 +1050,25 @@ IndexMarks *FFAudioStream::get_markers()
 }
 
 FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx)
- : FFStream(ffmpeg, strm, fidx)
+ : FFStream(ffmpeg, strm, fidx),
+   FFVideoConvert(ffmpeg->ff_prefs())
 {
        this->idx = idx;
        width = height = 0;
+       transpose = 0;
        frame_rate = 0;
        aspect_ratio = 0;
        length = 0;
        interlaced = 0;
        top_field_first = 0;
+       color_space = -1;
+       color_range = -1;
+       fconvert_ctx = 0;
 }
 
 FFVideoStream::~FFVideoStream()
 {
+       if( fconvert_ctx ) sws_freeContext(fconvert_ctx);
 }
 
 AVHWDeviceType FFVideoStream::decode_hw_activate()
@@ -1054,6 +1100,7 @@ int FFVideoStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
                if( !config ) {
                        fprintf(stderr, "Decoder %s does not support device type %s.\n",
                                decoder->name, av_hwdevice_get_type_name(type));
+                       ret = -1;
                        break;
                }
                if( (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) != 0 &&
@@ -1070,9 +1117,11 @@ int FFVideoStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
                        avctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
                        ret = 1;
                }
-               else
+               else {
                        ff_err(ret, "Failed HW device create.\ndev:%s\n",
                                av_hwdevice_get_type_name(type));
+                       ret = -1;
+               }
        }
        return ret;
 }
@@ -1164,6 +1213,39 @@ int FFVideoStream::decode_frame(AVFrame *frame)
        return 1;
 }
 
+int FFVideoStream::probe(int64_t pos)
+{
+       int ret = video_seek(pos);
+       if( ret < 0 ) return -1;
+       if( !frame && !(frame=av_frame_alloc()) ) {
+               fprintf(stderr, "FFVideoStream::probe: av_frame_alloc failed\n");
+               return -1;
+       }
+               
+       if (ffmpeg->interlace_from_codec) return 1;
+
+               ret = read_frame(frame);
+               if( ret > 0 ) {
+                       //printf("codec interlace: %i \n",frame->interlaced_frame);
+                       //printf("codec tff: %i \n",frame->top_field_first);
+
+                       if (!frame->interlaced_frame)
+                               ffmpeg->interlace_from_codec = AV_FIELD_PROGRESSIVE;
+                       if ((frame->interlaced_frame) && (frame->top_field_first))
+                               ffmpeg->interlace_from_codec = AV_FIELD_TT;
+                       if ((frame->interlaced_frame) && (!frame->top_field_first))
+                               ffmpeg->interlace_from_codec = AV_FIELD_BB;
+                       //printf("Interlace mode from codec: %i\n", ffmpeg->interlace_from_codec);
+
+       }
+
+       if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 )
+               ret = -1;
+
+       ret = ret > 0 ? 1 : ret < 0 ? -1 : 0;
+       return ret;
+}
+
 int FFVideoStream::load(VFrame *vframe, int64_t pos)
 {
        int ret = video_seek(pos);
@@ -1172,11 +1254,39 @@ int FFVideoStream::load(VFrame *vframe, int64_t pos)
                fprintf(stderr, "FFVideoStream::load: av_frame_alloc failed\n");
                return -1;
        }
+       
+
        int i = MAX_RETRY + pos - curr_pos;
+       int64_t cache_start = 0;
        while( ret>=0 && !flushed && curr_pos<=pos && --i>=0 ) {
                ret = read_frame(frame);
-               if( ret > 0 ) ++curr_pos;
+               if( ret > 0 ) {
+                       if( frame->key_frame && seeking < 0 ) {
+                               int use_cache = ffmpeg->get_use_cache();
+                               if( use_cache < 0 ) {
+// for reverse read, reload file frame_cache from keyframe to pos
+                                       ffmpeg->purge_cache();
+                                       int count = preferences->cache_size /
+                                               vframe->get_data_size() / 2;  // try to burn only 1/2 of cache
+                                       cache_start = pos - count + 1;
+                                       seeking = 1;
+                               }
+                               else
+                                       seeking = 0;
+                       }
+                       if( seeking > 0 && curr_pos >= cache_start && curr_pos < pos ) {
+                               int vw =vframe->get_w(), vh = vframe->get_h();
+                               int vcolor_model = vframe->get_color_model();
+// do not use shm here, puts too much pressure on 32bit systems
+                               VFrame *cache_frame = new VFrame(vw, vh, vcolor_model, 0);
+                               ret = convert_cmodel(cache_frame, frame);
+                               if( ret > 0 )
+                                       ffmpeg->put_cache_frame(cache_frame, curr_pos);
+                       }
+                       ++curr_pos;
+               }
        }
+       seeking = 0;
        if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 )
                ret = -1;
        if( ret >= 0 ) {
@@ -1218,6 +1328,78 @@ int FFVideoStream::init_frame(AVFrame *picture)
        return ret;
 }
 
+int FFVideoStream::convert_hw_frame(AVFrame *ifrm, AVFrame *ofrm)
+{
+       AVPixelFormat ifmt = (AVPixelFormat)ifrm->format;
+       AVPixelFormat ofmt = (AVPixelFormat)st->codecpar->format;
+       ofrm->width  = ifrm->width;
+       ofrm->height = ifrm->height;
+       ofrm->format = ofmt;
+        int ret = av_frame_get_buffer(ofrm, 32);
+       if( ret < 0 ) {
+               ff_err(ret, "FFVideoStream::convert_hw_frame:"
+                               " av_frame_get_buffer failed\n");
+               return -1;
+       }
+       fconvert_ctx = sws_getCachedContext(fconvert_ctx,
+               ifrm->width, ifrm->height, ifmt,
+               ofrm->width, ofrm->height, ofmt,
+               SWS_POINT, NULL, NULL, NULL);
+       if( !fconvert_ctx ) {
+               ff_err(AVERROR(EINVAL), "FFVideoStream::convert_hw_frame:"
+                               " sws_getCachedContext() failed\n");
+               return -1;
+       }
+       int codec_range = st->codecpar->color_range;
+       int codec_space = st->codecpar->color_space;
+       const int *codec_table = sws_getCoefficients(codec_space);
+       int *inv_table, *table, src_range, dst_range;
+       int brightness, contrast, saturation;
+       if( !sws_getColorspaceDetails(fconvert_ctx,
+                       &inv_table, &src_range, &table, &dst_range,
+                       &brightness, &contrast, &saturation) ) {
+               if( src_range != codec_range || dst_range != codec_range ||
+                   inv_table != codec_table || table != codec_table )
+                       sws_setColorspaceDetails(fconvert_ctx,
+                                       codec_table, codec_range, codec_table, codec_range,
+                                       brightness, contrast, saturation);
+       }
+       ret = sws_scale(fconvert_ctx,
+               ifrm->data, ifrm->linesize, 0, ifrm->height,
+               ofrm->data, ofrm->linesize);
+       if( ret < 0 ) {
+               ff_err(ret, "FFVideoStream::convert_hw_frame:"
+                               " sws_scale() failed\nfile: %s\n",
+                               ffmpeg->fmt_ctx->url);
+               return -1;
+       }
+       return 0;
+}
+
+int FFVideoStream::load_filter(AVFrame *frame)
+{
+       AVPixelFormat pix_fmt = (AVPixelFormat)frame->format;
+       if( pix_fmt == hw_pixfmt ) {
+               AVFrame *hw_frame = this->frame;
+               av_frame_unref(hw_frame);
+               int ret = av_hwframe_transfer_data(hw_frame, frame, 0);
+               if( ret < 0 ) {
+                       eprintf(_("Error retrieving data from GPU to CPU\nfile: %s\n"),
+                               ffmpeg->fmt_ctx->url);
+                       return -1;
+               }
+               av_frame_unref(frame);
+               ret = convert_hw_frame(hw_frame, frame);
+               if( ret < 0 ) {
+                       eprintf(_("Error converting data from GPU to CPU\nfile: %s\n"),
+                               ffmpeg->fmt_ctx->url);
+                       return -1;
+               }
+               av_frame_unref(hw_frame);
+       }
+       return FFStream::load_filter(frame);
+}
+
 int FFVideoStream::encode(VFrame *vframe)
 {
        if( encode_activate() <= 0 ) return -1;
@@ -1244,6 +1426,7 @@ int FFVideoStream::encode(VFrame *vframe)
 
 int FFVideoStream::drain()
 {
+
        return 0;
 }
 
@@ -1344,7 +1527,7 @@ int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip)
 }
 
 int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *ipic)
-{
+{ // picture = vframe
        int cmodel = frame->get_color_model();
        AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel);
        if( ofmt == AV_PIX_FMT_NB ) return -1;
@@ -1398,6 +1581,32 @@ int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *
                                " sws_getCachedContext() failed\n");
                return -1;
        }
+
+       int color_range = 0;
+       switch( preferences->yuv_color_range ) {
+       case BC_COLORS_JPEG:  color_range = 1;  break;
+       case BC_COLORS_MPEG:  color_range = 0;  break;
+       }
+       int color_space = SWS_CS_ITU601;
+       switch( preferences->yuv_color_space ) {
+       case BC_COLORS_BT601:  color_space = SWS_CS_ITU601;  break;
+       case BC_COLORS_BT709:  color_space = SWS_CS_ITU709;  break;
+       case BC_COLORS_BT2020: color_space = SWS_CS_BT2020;  break;
+       }
+       const int *color_table = sws_getCoefficients(color_space);
+
+       int *inv_table, *table, src_range, dst_range;
+       int brightness, contrast, saturation;
+       if( !sws_getColorspaceDetails(convert_ctx,
+                       &inv_table, &src_range, &table, &dst_range,
+                       &brightness, &contrast, &saturation) ) {
+               if( src_range != color_range || dst_range != color_range ||
+                   inv_table != color_table || table != color_table )
+                       sws_setColorspaceDetails(convert_ctx,
+                                       color_table, color_range, color_table, color_range,
+                                       brightness, contrast, saturation);
+       }
+
        int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height,
            ipic->data, ipic->linesize);
        if( ret < 0 ) {
@@ -1462,7 +1671,7 @@ int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op)
 }
 
 int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *opic)
-{
+{ // vframe = picture
        int cmodel = frame->get_color_model();
        AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel);
        if( ifmt == AV_PIX_FMT_NB ) return -1;
@@ -1500,6 +1709,32 @@ int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *
                                " sws_getCachedContext() failed\n");
                return -1;
        }
+
+
+       int color_range = 0;
+       switch( preferences->yuv_color_range ) {
+       case BC_COLORS_JPEG:  color_range = 1;  break;
+       case BC_COLORS_MPEG:  color_range = 0;  break;
+       }
+       int color_space = SWS_CS_ITU601;
+       switch( preferences->yuv_color_space ) {
+       case BC_COLORS_BT601:  color_space = SWS_CS_ITU601;  break;
+       case BC_COLORS_BT709:  color_space = SWS_CS_ITU709;  break;
+       case BC_COLORS_BT2020: color_space = SWS_CS_BT2020;  break;
+       }
+       const int *color_table = sws_getCoefficients(color_space);
+
+       int *inv_table, *table, src_range, dst_range;
+       int brightness, contrast, saturation;
+       if( !sws_getColorspaceDetails(convert_ctx,
+                       &inv_table, &src_range, &table, &dst_range,
+                       &brightness, &contrast, &saturation) ) {
+               if( dst_range != color_range || table != color_table )
+                       sws_setColorspaceDetails(convert_ctx,
+                                       inv_table, src_range, color_table, color_range,
+                                       brightness, contrast, saturation);
+       }
+
        int ret = sws_scale(convert_ctx, opic->data, opic->linesize, 0, frame->get_h(),
                        op->data, op->linesize);
        if( ret < 0 ) {
@@ -1577,6 +1812,7 @@ FFMPEG::FFMPEG(FileBase *file_base)
        flow = 1;
        decoding = encoding = 0;
        has_audio = has_video = 0;
+       interlace_from_codec = 0;
        opts = 0;
        opt_duration = -1;
        opt_video_filter = 0;
@@ -1628,9 +1864,8 @@ static inline AVRational std_frame_rate(int i)
        return (AVRational) { freq, 1001*12 };
 }
 
-AVRational FFMPEG::check_frame_rate(AVCodec *codec, double frame_rate)
+AVRational FFMPEG::check_frame_rate(const AVRational *p, double frame_rate)
 {
-       const AVRational *p = codec->supported_framerates;
        AVRational rate, best_rate = (AVRational) { 0, 0 };
        double max_err = 1.;  int i = 0;
        while( ((p ? (rate=*p++) : (rate=std_frame_rate(i++))), rate.num) != 0 ) {
@@ -1928,6 +2163,20 @@ void FFMPEG::load_video_options(Asset *asset, EDL *edl)
                scan_video_options(asset, edl);
 }
 
+void FFMPEG::scan_format_options(Asset *asset, EDL *edl)
+{
+}
+
+void FFMPEG::load_format_options(Asset *asset, EDL *edl)
+{
+       char options_path[BCTEXTLEN];
+       set_option_path(options_path, "format/%s", asset->fformat);
+       if( !load_options(options_path,
+                       asset->ff_format_options,
+                       sizeof(asset->ff_format_options)) )
+               scan_format_options(asset, edl);
+}
+
 int FFMPEG::load_defaults(const char *path, const char *type,
                 char *codec, char *codec_options, int len)
 {
@@ -1953,6 +2202,8 @@ void FFMPEG::set_asset_format(Asset *asset, EDL *edl, const char *text)
        if( asset->format != FILE_FFMPEG ) return;
        if( text != asset->fformat )
                strcpy(asset->fformat, text);
+       if( !asset->ff_format_options[0] )
+               load_format_options(asset, edl);
        if( asset->audio_data && !asset->ff_audio_options[0] ) {
                if( !load_defaults("audio", text, asset->acodec,
                                asset->ff_audio_options, sizeof(asset->ff_audio_options)) )
@@ -2024,11 +2275,28 @@ int FFMPEG::scan_options(const char *options, AVDictionary *&opts, AVStream *st)
        if( !fp ) return 0;
        int ret = read_options(fp, options, opts);
        fclose(fp);
-       AVDictionaryEntry *tag = av_dict_get(opts, "id", NULL, 0);
-       if( tag ) st->id = strtol(tag->value,0,0);
+       if( !ret && st ) {
+               AVDictionaryEntry *tag = av_dict_get(opts, "id", NULL, 0);
+               if( tag ) st->id = strtol(tag->value,0,0);
+       }
        return ret;
 }
 
+void FFMPEG::put_cache_frame(VFrame *frame, int64_t position)
+{
+       file_base->file->put_cache_frame(frame, position, 0);
+}
+
+int FFMPEG::get_use_cache()
+{
+       return file_base->file->get_use_cache();
+}
+
+void FFMPEG::purge_cache()
+{
+       file_base->file->purge_cache();
+}
+
 FFCodecRemap::FFCodecRemap()
 {
        old_codec = 0;
@@ -2169,16 +2437,27 @@ int FFMPEG::info(char *text, int len)
        if( ffvideo.size() > 0 )
                report("\n%d video stream%s\n",ffvideo.size(), ffvideo.size()!=1 ? "s" : "");
        for( int vidx=0; vidx<ffvideo.size(); ++vidx ) {
+               const char *unkn = _("(unkn)");
                FFVideoStream *vid = ffvideo[vidx];
                AVStream *st = vid->st;
                AVCodecID codec_id = st->codecpar->codec_id;
                report(_("vid%d (%d),  id 0x%06x:\n"), vid->idx, vid->fidx, codec_id);
                const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
-               report("  video%d %s", vidx+1, desc ? desc->name : " (unkn)");
+               report("  video%d %s ", vidx+1, desc ? desc->name : unkn);
                report(" %dx%d %5.2f", vid->width, vid->height, vid->frame_rate);
                AVPixelFormat pix_fmt = (AVPixelFormat)st->codecpar->format;
                const char *pfn = av_get_pix_fmt_name(pix_fmt);
-               report(" pix %s\n", pfn ? pfn : "(unkn)");
+               report(" pix %s\n", pfn ? pfn : unkn);
+               int interlace = st->codecpar->field_order;
+               report("  interlace (container level): %i\n", interlace ? interlace : -1);
+               int interlace_codec = interlace_from_codec;
+               report("  interlace (codec level): %i\n", interlace_codec ? interlace_codec : -1);
+               enum AVColorSpace space = st->codecpar->color_space;
+               const char *nm = av_color_space_name(space);
+               report("    color space:%s", nm ? nm : unkn);
+               enum AVColorRange range = st->codecpar->color_range;
+               const char *rg = av_color_range_name(range);
+               report("/ range:%s\n", rg ? rg : unkn);
                double secs = to_secs(st->duration, st->time_base);
                int64_t length = secs * vid->frame_rate + 0.5;
                double ofs = to_secs((vid->nudge - st->start_time), st->time_base);
@@ -2188,6 +2467,9 @@ int FFMPEG::info(char *text, int len)
                int hrs = secs/3600;  secs -= hrs*3600;
                int mins = secs/60;  secs -= mins*60;
                report("  %d:%02d:%05.2f\n", hrs, mins, secs);
+               double theta = vid->get_rotation_angle();
+               if( fabs(theta) > 1 ) 
+                       report("    rotation angle: %0.1f\n", theta);
        }
        if( ffaudio.size() > 0 )
                report("\n%d audio stream%s\n",ffaudio.size(), ffaudio.size()!=1 ? "s" : "");
@@ -2339,13 +2621,41 @@ int FFMPEG::open_decoder()
                        vid->width = avpar->width;
                        vid->height = avpar->height;
                        vid->frame_rate = !framerate.den ? 0 : (double)framerate.num / framerate.den;
+                       switch( avpar->color_range ) {
+                       case AVCOL_RANGE_MPEG:
+                               vid->color_range = BC_COLORS_MPEG;
+                               break;
+                       case AVCOL_RANGE_JPEG:
+                               vid->color_range = BC_COLORS_JPEG;
+                               break;
+                       default:
+                               vid->color_range = !file_base ? BC_COLORS_JPEG :
+                                       file_base->file->preferences->yuv_color_range;
+                               break;
+                       }
+                       switch( avpar->color_space ) {
+                       case AVCOL_SPC_BT470BG:
+                       case AVCOL_SPC_SMPTE170M:
+                               vid->color_space = BC_COLORS_BT601;
+                               break;
+                       case AVCOL_SPC_BT709:
+                               vid->color_space = BC_COLORS_BT709;
+                               break;
+                       case AVCOL_SPC_BT2020_NCL:
+                       case AVCOL_SPC_BT2020_CL:
+                               vid->color_space = BC_COLORS_BT2020;
+                               break;
+                       default:
+                               vid->color_space = !file_base ? BC_COLORS_BT601 :
+                                       file_base->file->preferences->yuv_color_space;
+                               break;
+                       }
                        double secs = to_secs(st->duration, st->time_base);
                        vid->length = secs * vid->frame_rate;
                        vid->aspect_ratio = (double)st->sample_aspect_ratio.num / st->sample_aspect_ratio.den;
                        vid->nudge = st->start_time;
                        vid->reading = -1;
-                       if( opt_video_filter )
-                               ret = vid->create_filter(opt_video_filter, avpar);
+                       ret = vid->create_filter(opt_video_filter);
                        break; }
                case AVMEDIA_TYPE_AUDIO: {
                        if( avpar->channels < 1 ) continue;
@@ -2364,8 +2674,7 @@ int FFMPEG::open_decoder()
                        aud->init_swr(aud->channels, avpar->format, aud->sample_rate);
                        aud->nudge = st->start_time;
                        aud->reading = -1;
-                       if( opt_audio_filter )
-                               ret = aud->create_filter(opt_audio_filter, avpar);
+                       ret = aud->create_filter(opt_audio_filter);
                        break; }
                default: break;
                }
@@ -2571,7 +2880,19 @@ int FFMPEG::open_encoder(const char *type, const char *spec)
                        vid->width = asset->width;
                        vid->height = asset->height;
                        vid->frame_rate = asset->frame_rate;
-
+                       if( (vid->color_range = asset->ff_color_range) < 0 )
+                               vid->color_range = file_base->file->preferences->yuv_color_range;
+                       switch( vid->color_range ) {
+                       case BC_COLORS_MPEG:  ctx->color_range = AVCOL_RANGE_MPEG;  break;
+                       case BC_COLORS_JPEG:  ctx->color_range = AVCOL_RANGE_JPEG;  break;
+                       }
+                       if( (vid->color_space = asset->ff_color_space) < 0 )
+                               vid->color_space = file_base->file->preferences->yuv_color_space;
+                       switch( vid->color_space ) {
+                       case BC_COLORS_BT601:  ctx->colorspace = AVCOL_SPC_SMPTE170M;  break;
+                       case BC_COLORS_BT709:  ctx->colorspace = AVCOL_SPC_BT709;      break;
+                       case BC_COLORS_BT2020: ctx->colorspace = AVCOL_SPC_BT2020_NCL; break;
+                       }
                        AVPixelFormat pix_fmt = av_get_pix_fmt(asset->ff_pixel_format);
                        if( opt_hw_dev != 0 ) {
                                AVHWDeviceType hw_type = vid->encode_hw_activate(opt_hw_dev);
@@ -2593,7 +2914,7 @@ int FFMPEG::open_encoder(const char *type, const char *spec)
                        int mask_h = (1<<desc->log2_chroma_h)-1;
                        ctx->height = (vid->height+mask_h) & ~mask_h;
                        ctx->sample_aspect_ratio = to_sample_aspect_ratio(asset);
-                       AVRational frame_rate = check_frame_rate(codec, vid->frame_rate);
+                       AVRational frame_rate = check_frame_rate(codec->supported_framerates, vid->frame_rate);
                        if( !frame_rate.num || !frame_rate.den ) {
                                eprintf(_("check_frame_rate failed %s\n"), filename);
                                ret = 1;
@@ -2609,6 +2930,25 @@ int FFMPEG::open_encoder(const char *type, const char *spec)
                        vid->interlaced = asset->interlace_mode == ILACE_MODE_TOP_FIRST ||
                                asset->interlace_mode == ILACE_MODE_BOTTOM_FIRST ? 1 : 0;
                        vid->top_field_first = asset->interlace_mode == ILACE_MODE_TOP_FIRST ? 1 : 0;
+                       switch (asset->interlace_mode)  {               
+                       case ILACE_MODE_TOP_FIRST: 
+                       if (ctx->codec->id == AV_CODEC_ID_MJPEG)
+                       av_dict_set(&sopts, "field_order", "tt", 0); 
+                       else
+                       av_dict_set(&sopts, "field_order", "tb", 0); 
+                       if (ctx->codec_id != AV_CODEC_ID_MJPEG) 
+                       av_dict_set(&sopts, "flags", "+ilme+ildct", 0);
+                       break;
+                       case ILACE_MODE_BOTTOM_FIRST: 
+                       if (ctx->codec->id == AV_CODEC_ID_MJPEG)
+                       av_dict_set(&sopts, "field_order", "bb", 0); 
+                       else
+                       av_dict_set(&sopts, "field_order", "bt", 0); 
+                       if (ctx->codec_id != AV_CODEC_ID_MJPEG)
+                       av_dict_set(&sopts, "flags", "+ilme+ildct", 0);
+                       break;
+                       case ILACE_MODE_NOTINTERLACED: av_dict_set(&sopts, "field_order", "progressive", 0); break;
+                       }
                        break; }
                default:
                        eprintf(_("not audio/video, %s:%s\n"), codec_name, filename);
@@ -2821,7 +3161,20 @@ int FFMPEG::encode_activate()
                                fmt_ctx->url);
                        return -1;
                }
-
+               if( !strcmp(file_format, "image2") ) {
+                       Asset *asset = file_base->asset;
+                       const char *filename = asset->path;
+                       FILE *fp = fopen(filename,"w");
+                       if( !fp ) {
+                               eprintf(_("Cant write image2 header file: %s\n  %m"), filename);
+                               return 1;
+                       }
+                       fprintf(fp, "IMAGE2\n");
+                       fprintf(fp, "# Frame rate: %f\n", asset->frame_rate);
+                       fprintf(fp, "# Width: %d\n", asset->width);
+                       fprintf(fp, "# Height: %d\n", asset->height);
+                       fclose(fp);
+               }
                int prog_id = 1;
                AVProgram *prog = av_new_program(fmt_ctx, prog_id);
                for( int i=0; i< ffvideo.size(); ++i )
@@ -2861,7 +3214,13 @@ int FFMPEG::encode_activate()
                char option_path[BCTEXTLEN];
                set_option_path(option_path, "format/%s", file_format);
                read_options(option_path, fopts, 1);
-               ret = avformat_write_header(fmt_ctx, &fopts);
+               av_dict_copy(&fopts, opts, 0);
+               if( scan_options(file_base->asset->ff_format_options, fopts, 0) ) {
+                       eprintf(_("bad format options %s\n"), file_base->asset->path);
+                       ret = -1;
+               }
+               if( ret >= 0 )
+                       ret = avformat_write_header(fmt_ctx, &fopts);
                if( ret < 0 ) {
                        ff_err(ret, "FFMPEG::encode_activate: write header failed %s\n",
                                fmt_ctx->url);
@@ -2882,6 +3241,33 @@ int FFMPEG::audio_seek(int stream, int64_t pos)
        return 0;
 }
 
+int FFMPEG::video_probe(int64_t pos)
+{
+       int vidx = vstrm_index[0].st_idx;
+       FFVideoStream *vid = ffvideo[vidx];
+       vid->probe(pos);
+       
+       int interlace1 = interlace_from_codec;
+       //printf("interlace from codec: %i\n", interlace1);
+
+       switch (interlace1)
+       {
+       case AV_FIELD_TT:
+       case AV_FIELD_TB:
+           return ILACE_MODE_TOP_FIRST;
+       case AV_FIELD_BB:
+       case AV_FIELD_BT:
+           return ILACE_MODE_BOTTOM_FIRST;
+       case AV_FIELD_PROGRESSIVE:
+           return ILACE_MODE_NOTINTERLACED;
+       default:
+           return ILACE_MODE_UNDETECTED;
+       }
+
+}
+
+
+
 int FFMPEG::video_seek(int stream, int64_t pos)
 {
        int vidx = vstrm_index[stream].st_idx;
@@ -3131,25 +3517,29 @@ int FFMPEG::ff_total_vstreams()
 
 int FFMPEG::ff_video_width(int stream)
 {
-       return ffvideo[stream]->width;
+       FFVideoStream *vst = ffvideo[stream];
+       return !vst->transpose ? vst->width : vst->height;
 }
 
 int FFMPEG::ff_video_height(int stream)
 {
-       return ffvideo[stream]->height;
+       FFVideoStream *vst = ffvideo[stream];
+       return !vst->transpose ? vst->height : vst->width;
 }
 
 int FFMPEG::ff_set_video_width(int stream, int width)
 {
-       int w = ffvideo[stream]->width;
-       ffvideo[stream]->width = width;
+       FFVideoStream *vst = ffvideo[stream];
+       int *vw = !vst->transpose ? &vst->width : &vst->height, w = *vw;
+       *vw = width;
        return w;
 }
 
 int FFMPEG::ff_set_video_height(int stream, int height)
 {
-       int h = ffvideo[stream]->height;
-       ffvideo[stream]->height = height;
+       FFVideoStream *vst = ffvideo[stream];
+       int *vh = !vst->transpose ? &vst->height : &vst->width, h = *vh;
+       *vh = height;
        return h;
 }
 
@@ -3165,10 +3555,23 @@ int FFMPEG::ff_coded_height(int stream)
 
 float FFMPEG::ff_aspect_ratio(int stream)
 {
-       return ffvideo[stream]->aspect_ratio;
+       //return ffvideo[stream]->aspect_ratio;
+       AVFormatContext *fmt_ctx = ffvideo[stream]->fmt_ctx;
+       AVStream *strm = ffvideo[stream]->st;
+       AVCodecParameters *par = ffvideo[stream]->st->codecpar;
+       AVRational dar;
+       AVRational sar = av_guess_sample_aspect_ratio(fmt_ctx, strm, NULL);
+        if (sar.num) {
+            av_reduce(&dar.num, &dar.den,
+                      par->width  * sar.num,
+                      par->height * sar.den,
+                      1024*1024);
+                      return av_q2d(dar);
+                      }
+        return ffvideo[stream]->aspect_ratio;
 }
 
-const char* FFMPEG::ff_video_format(int stream)
+const char* FFMPEG::ff_video_codec(int stream)
 {
        AVStream *st = ffvideo[stream]->st;
        AVCodecID id = st->codecpar->codec_id;
@@ -3176,6 +3579,16 @@ const char* FFMPEG::ff_video_format(int stream)
        return desc ? desc->name : _("Unknown");
 }
 
+int FFMPEG::ff_color_range(int stream)
+{
+       return ffvideo[stream]->color_range;
+}
+
+int FFMPEG::ff_color_space(int stream)
+{
+       return ffvideo[stream]->color_space;
+}
+
 double FFMPEG::ff_frame_rate(int stream)
 {
        return ffvideo[stream]->frame_rate;
@@ -3196,9 +3609,33 @@ int FFMPEG::ff_video_mpeg_color_range(int stream)
        return ffvideo[stream]->st->codecpar->color_range == AVCOL_RANGE_MPEG ? 1 : 0;
 }
 
+int FFMPEG::ff_interlace(int stream)
+{
+// https://ffmpeg.org/doxygen/trunk/structAVCodecParserContext.html
+/* reads from demuxer because codec frame not ready */
+       int interlace0 = ffvideo[stream]->st->codecpar->field_order;
+
+       switch (interlace0)
+       {
+       case AV_FIELD_TT:
+       case AV_FIELD_TB:
+           return ILACE_MODE_TOP_FIRST;
+       case AV_FIELD_BB:
+       case AV_FIELD_BT:
+           return ILACE_MODE_BOTTOM_FIRST;
+       case AV_FIELD_PROGRESSIVE:
+           return ILACE_MODE_NOTINTERLACED;
+       default:
+           return ILACE_MODE_UNDETECTED;
+       }
+       
+}
+
+
+
 int FFMPEG::ff_cpus()
 {
-       return file_base->file->cpus;
+       return !file_base ? 1 : file_base->file->cpus;
 }
 
 const char *FFMPEG::ff_hw_dev()
@@ -3206,75 +3643,148 @@ const char *FFMPEG::ff_hw_dev()
        return &file_base->file->preferences->use_hw_dev[0];
 }
 
-int FFVideoStream::create_filter(const char *filter_spec, AVCodecParameters *avpar)
+Preferences *FFMPEG::ff_prefs()
 {
+       return !file_base ? 0 : file_base->file->preferences;
+}
+
+double FFVideoStream::get_rotation_angle()
+{
+       int size = 0;
+       int *matrix = (int*)av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, &size);
+       int len = size/sizeof(*matrix);
+       if( !matrix || len < 5 ) return 0;
+       const double s = 1/65536.;
+       double theta = (!matrix[0] && !matrix[3]) || (!matrix[1] && !matrix[4]) ? 0 :
+                atan2( s*matrix[1] / hypot(s*matrix[1], s*matrix[4]),
+                       s*matrix[0] / hypot(s*matrix[0], s*matrix[3])) * 180/M_PI;
+       return theta;
+}
+
+int FFVideoStream::flip(double theta)
+{
+       int ret = 0;
+       transpose = 0;
+       Preferences *preferences = ffmpeg->ff_prefs();
+       if( !preferences || !preferences->auto_rotate ) return ret;
+       double tolerance = 1;
+       if( fabs(theta-0) < tolerance ) return  ret;
+       if( (theta=fmod(theta, 360)) < 0 ) theta += 360;
+        if( fabs(theta-90) < tolerance ) {
+               if( (ret = insert_filter("transpose", "clock")) < 0 )
+                       return ret;
+               transpose = 1;
+        }
+       else if( fabs(theta-180) < tolerance ) {
+               if( (ret=insert_filter("hflip", 0)) < 0 )
+                       return ret;
+               if( (ret=insert_filter("vflip", 0)) < 0 )
+                       return ret;
+        }
+       else if (fabs(theta-270) < tolerance ) {
+               if( (ret=insert_filter("transpose", "cclock")) < 0 )
+                       return ret;
+               transpose = 1;
+        }
+       else {
+               char angle[BCSTRLEN];
+               sprintf(angle, "%f", theta*M_PI/180.);
+               if( (ret=insert_filter("rotate", angle)) < 0 )
+                       return ret;
+        }
+       return 1;
+}
+
+int FFVideoStream::create_filter(const char *filter_spec)
+{
+       double theta = get_rotation_angle();
+       if( !theta && !filter_spec )
+               return 0;
        avfilter_register_all();
-       const char *sp = filter_spec;
-       char filter_name[BCSTRLEN], *np = filter_name;
-       int i = sizeof(filter_name);
-       while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
-       *np = 0;
-       const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
-       if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) {
-               ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec);
-               return -1;
+       if( filter_spec ) {
+               const char *sp = filter_spec;
+               char filter_name[BCSTRLEN], *np = filter_name;
+               int i = sizeof(filter_name);
+               while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+               *np = 0;
+               const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
+               if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) {
+                       ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec);
+                       return -1;
+               }
        }
-       filter_graph = avfilter_graph_alloc();
-       const AVFilter *buffersrc = avfilter_get_by_name("buffer");
-       const AVFilter *buffersink = avfilter_get_by_name("buffersink");
+       AVCodecParameters *avpar = st->codecpar;
+       int sa_num = avpar->sample_aspect_ratio.num;
+       if( !sa_num ) sa_num = 1;
+       int sa_den = avpar->sample_aspect_ratio.den;
+       if( !sa_den ) sa_num = 1;
 
        int ret = 0;  char args[BCTEXTLEN];
        AVPixelFormat pix_fmt = (AVPixelFormat)avpar->format;
        snprintf(args, sizeof(args),
                "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
                avpar->width, avpar->height, (int)pix_fmt,
-               st->time_base.num, st->time_base.den,
-               avpar->sample_aspect_ratio.num, avpar->sample_aspect_ratio.den);
-       if( ret >= 0 )
-               ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
-                       args, NULL, filter_graph);
-       if( ret >= 0 )
-               ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
-                       NULL, NULL, filter_graph);
+               st->time_base.num, st->time_base.den, sa_num, sa_den);
+       if( ret >= 0 ) {
+               filt_ctx = 0;
+               ret = insert_filter("buffer", args, "in");
+               buffersrc_ctx = filt_ctx;
+       }
        if( ret >= 0 )
+               ret = flip(theta);
+       AVFilterContext *fsrc = filt_ctx;
+       if( ret >= 0 ) {
+               filt_ctx = 0;
+               ret = insert_filter("buffersink", 0, "out");
+               buffersink_ctx = filt_ctx;
+       }
+       if( ret >= 0 ) {
                ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
                        (uint8_t*)&pix_fmt, sizeof(pix_fmt),
                        AV_OPT_SEARCH_CHILDREN);
-       if( ret < 0 )
-               ff_err(ret, "FFVideoStream::create_filter");
+       }
+       if( ret >= 0 )
+               ret = config_filters(filter_spec, fsrc);
        else
-               ret = FFStream::create_filter(filter_spec);
+               ff_err(ret, "FFVideoStream::create_filter");
        return ret >= 0 ? 0 : -1;
 }
 
-int FFAudioStream::create_filter(const char *filter_spec, AVCodecParameters *avpar)
+int FFAudioStream::create_filter(const char *filter_spec)
 {
+       if( !filter_spec )
+               return 0;
        avfilter_register_all();
-       const char *sp = filter_spec;
-       char filter_name[BCSTRLEN], *np = filter_name;
-       int i = sizeof(filter_name);
-       while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
-       *np = 0;
-       const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
-       if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) {
-               ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec);
-               return -1;
+       if( filter_spec ) {
+               const char *sp = filter_spec;
+               char filter_name[BCSTRLEN], *np = filter_name;
+               int i = sizeof(filter_name);
+               while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+               *np = 0;
+               const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
+               if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) {
+                       ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec);
+                       return -1;
+               }
        }
-       filter_graph = avfilter_graph_alloc();
-       const AVFilter *buffersrc = avfilter_get_by_name("abuffer");
-       const AVFilter *buffersink = avfilter_get_by_name("abuffersink");
        int ret = 0;  char args[BCTEXTLEN];
+       AVCodecParameters *avpar = st->codecpar;
        AVSampleFormat sample_fmt = (AVSampleFormat)avpar->format;
        snprintf(args, sizeof(args),
                "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%jx",
                st->time_base.num, st->time_base.den, avpar->sample_rate,
                av_get_sample_fmt_name(sample_fmt), avpar->channel_layout);
-       if( ret >= 0 )
-               ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
-                       args, NULL, filter_graph);
-       if( ret >= 0 )
-               ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
-                       NULL, NULL, filter_graph);
+       if( ret >= 0 ) {
+               filt_ctx = 0;
+               ret = insert_filter("abuffer", args, "in");
+               buffersrc_ctx = filt_ctx;
+       }
+       AVFilterContext *fsrc = filt_ctx;
+       if( ret >= 0 ) {
+               filt_ctx = 0;
+               ret = insert_filter("abuffersink", 0, "out");
+               buffersink_ctx = filt_ctx;
+       }
        if( ret >= 0 )
                ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
                        (uint8_t*)&sample_fmt, sizeof(sample_fmt),
@@ -3287,45 +3797,122 @@ int FFAudioStream::create_filter(const char *filter_spec, AVCodecParameters *avp
                ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
                        (uint8_t*)&sample_rate, sizeof(sample_rate),
                        AV_OPT_SEARCH_CHILDREN);
-       if( ret < 0 )
-               ff_err(ret, "FFAudioStream::create_filter");
+       if( ret >= 0 )
+               ret = config_filters(filter_spec, fsrc);
        else
-               ret = FFStream::create_filter(filter_spec);
+               ff_err(ret, "FFAudioStream::create_filter");
        return ret >= 0 ? 0 : -1;
 }
 
-int FFStream::create_filter(const char *filter_spec)
+int FFStream::insert_filter(const char *name, const char *arg, const char *inst_name)
 {
-       /* Endpoints for the filter graph. */
-       AVFilterInOut *outputs = avfilter_inout_alloc();
-       outputs->name = av_strdup("in");
-       outputs->filter_ctx = buffersrc_ctx;
-       outputs->pad_idx = 0;
-       outputs->next = 0;
-
-       AVFilterInOut *inputs  = avfilter_inout_alloc();
-       inputs->name = av_strdup("out");
-       inputs->filter_ctx = buffersink_ctx;
-       inputs->pad_idx = 0;
-       inputs->next = 0;
-
-       int ret = !outputs->name || !inputs->name ? -1 : 0;
+       const AVFilter *filter = avfilter_get_by_name(name);
+       if( !filter ) return -1;
+       char filt_inst[BCSTRLEN];
+       if( !inst_name ) {
+               snprintf(filt_inst, sizeof(filt_inst), "%s_%d", name, ++filt_id);
+               inst_name = filt_inst;
+       }
+       if( !filter_graph )
+               filter_graph = avfilter_graph_alloc();
+       AVFilterContext *fctx = 0;
+       int ret = avfilter_graph_create_filter(&fctx,
+               filter, inst_name, arg, NULL, filter_graph);
+       if( ret >= 0 && filt_ctx )
+               ret = avfilter_link(filt_ctx, 0, fctx, 0);
        if( ret >= 0 )
-               ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
-                       &inputs, &outputs, NULL);
+               filt_ctx = fctx;
+       else
+               avfilter_free(fctx);
+       return ret;
+}
+
+int FFStream::config_filters(const char *filter_spec, AVFilterContext *fsrc)
+{
+       int ret = 0;
+       AVFilterContext *fsink = buffersink_ctx;
+       if( filter_spec ) {
+               /* Endpoints for the filter graph. */
+               AVFilterInOut *outputs = avfilter_inout_alloc();
+               AVFilterInOut *inputs = avfilter_inout_alloc();
+               if( !inputs || !outputs ) ret = -1;
+               if( ret >= 0 ) {
+                       outputs->filter_ctx = fsrc;
+                       outputs->pad_idx = 0;
+                       outputs->next = 0;
+                       if( !(outputs->name = av_strdup(fsrc->name)) ) ret = -1;
+               }
+               if( ret >= 0 ) {
+                       inputs->filter_ctx = fsink;
+                       inputs->pad_idx = 0;
+                       inputs->next = 0;
+                       if( !(inputs->name = av_strdup(fsink->name)) ) ret = -1;
+               }
+               if( ret >= 0 ) {
+                       int len = strlen(fsrc->name)+2 + strlen(filter_spec) + 1;
+                       char spec[len];  sprintf(spec, "[%s]%s", fsrc->name, filter_spec);
+                       ret = avfilter_graph_parse_ptr(filter_graph, spec,
+                               &inputs, &outputs, NULL);
+               }
+               avfilter_inout_free(&inputs);
+               avfilter_inout_free(&outputs);
+       }
+       else
+               ret = avfilter_link(fsrc, 0, fsink, 0);
        if( ret >= 0 )
                ret = avfilter_graph_config(filter_graph, NULL);
-
        if( ret < 0 ) {
                ff_err(ret, "FFStream::create_filter");
                avfilter_graph_free(&filter_graph);
                filter_graph = 0;
        }
-       avfilter_inout_free(&inputs);
-       avfilter_inout_free(&outputs);
        return ret;
 }
 
+
+AVCodecContext *FFMPEG::activate_decoder(AVStream *st)
+{
+       AVDictionary *copts = 0;
+       av_dict_copy(&copts, opts, 0);
+       AVCodecID codec_id = st->codecpar->codec_id;
+       AVCodec *decoder = 0;
+       switch( st->codecpar->codec_type ) {
+       case AVMEDIA_TYPE_VIDEO:
+               if( opt_video_decoder )
+                       decoder = avcodec_find_decoder_by_name(opt_video_decoder);
+               else
+                       video_codec_remaps.update(codec_id, decoder);
+               break;
+       case AVMEDIA_TYPE_AUDIO:
+               if( opt_audio_decoder )
+                       decoder = avcodec_find_decoder_by_name(opt_audio_decoder);
+               else
+                       audio_codec_remaps.update(codec_id, decoder);
+               break;
+       default:
+               return 0;
+       }
+       if( !decoder && !(decoder = avcodec_find_decoder(codec_id)) ) {
+               eprintf(_("cant find decoder codec %d\n"), (int)codec_id);
+               return 0;
+       }
+       AVCodecContext *avctx = avcodec_alloc_context3(decoder);
+       if( !avctx ) {
+               eprintf(_("cant allocate codec context\n"));
+               return 0;
+       }
+       avcodec_parameters_to_context(avctx, st->codecpar);
+       if( !av_dict_get(copts, "threads", NULL, 0) )
+               avctx->thread_count = ff_cpus();
+       int ret = avcodec_open2(avctx, decoder, &copts);
+       av_dict_free(&copts);
+       if( ret < 0 ) {
+               avcodec_free_context(&avctx);
+               avctx = 0;
+       }
+       return avctx;
+}
+
 int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled)
 {
        AVPacket pkt;
@@ -3342,25 +3929,9 @@ int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled)
        index_state->add_audio_markers(ffaudio.size());
 
        for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) {
-               int ret = 0;
-               AVDictionary *copts = 0;
-               av_dict_copy(&copts, opts, 0);
                AVStream *st = fmt_ctx->streams[i];
-               AVCodecID codec_id = st->codecpar->codec_id;
-               AVCodec *decoder = avcodec_find_decoder(codec_id);
-               AVCodecContext *avctx = avcodec_alloc_context3(decoder);
-               if( !avctx ) {
-                       eprintf(_("cant allocate codec context\n"));
-                       ret = AVERROR(ENOMEM);
-               }
-               if( ret >= 0 ) {
-                       avcodec_parameters_to_context(avctx, st->codecpar);
-                       if( !av_dict_get(copts, "threads", NULL, 0) )
-                               avctx->thread_count = ff_cpus();
-                       ret = avcodec_open2(avctx, decoder, &copts);
-               }
-               av_dict_free(&copts);
-               if( ret >= 0 ) {
+               AVCodecContext *avctx = activate_decoder(st);
+               if( avctx ) {
                        AVCodecParameters *avpar = st->codecpar;
                        switch( avpar->codec_type ) {
                        case AVMEDIA_TYPE_VIDEO: {
@@ -3428,8 +3999,8 @@ int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled)
                        if( vidx < 0 ) break;
                        FFVideoStream *vid = ffvideo[vidx];
                        if( !vid->avctx ) break;
-                       int64_t tstmp = pkt.dts;
-                       if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.pts;
+                       int64_t tstmp = pkt.pts;
+                       if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.dts;
                        if( tstmp != AV_NOPTS_VALUE && (pkt.flags & AV_PKT_FLAG_KEY) && pkt.pos > 0 ) {
                                if( vid->nudge != AV_NOPTS_VALUE ) tstmp -= vid->nudge;
                                double secs = to_secs(tstmp, st->time_base);
@@ -3519,3 +4090,249 @@ void FFStream::load_markers(IndexMarks &marks, double rate)
        }
 }
 
+
+/*
+ * 1) if the format context has a timecode
+ *   return fmt_ctx->timecode - 0
+ * 2) if the layer/channel has a timecode
+ *   return st->timecode - (start_time-nudge)
+ * 3) find the 1st program with stream, find 1st program video stream,
+ *   if video stream has a timecode, return st->timecode - (start_time-nudge)
+ * 4) find timecode in any stream, return st->timecode
+ * 5) read 100 packets, save ofs=pkt.pts*st->time_base - st->nudge:
+ *   decode frame for video stream of 1st program
+ *   if frame->timecode has a timecode, return frame->timecode - ofs
+ *   if side_data has gop timecode, return gop->timecode - ofs
+ *   if side_data has smpte timecode, return smpte->timecode - ofs
+ * 6) if the filename/url scans *date_time.ext, return date_time
+ * 7) if stat works on the filename/url, return mtime
+ * 8) return -1 failure
+*/
+double FFMPEG::get_initial_timecode(int data_type, int channel, double frame_rate)
+{
+       AVRational rate = check_frame_rate(0, frame_rate);
+       if( !rate.num ) return -1;
+// format context timecode
+       AVDictionaryEntry *tc = av_dict_get(fmt_ctx->metadata, "timecode", 0, 0);
+       if( tc ) return ff_get_timecode(tc->value, rate, 0);
+// stream timecode
+       if( open_decoder() ) return -1;
+       AVStream *st = 0;
+       int64_t nudge = 0;
+       int codec_type = -1, fidx = -1;
+       switch( data_type ) {
+       case TRACK_AUDIO: {
+               codec_type = AVMEDIA_TYPE_AUDIO;
+               int aidx = astrm_index[channel].st_idx;
+               FFAudioStream *aud = ffaudio[aidx];
+               fidx = aud->fidx;
+               nudge = aud->nudge;
+               st = aud->st;
+               AVDictionaryEntry *tref = av_dict_get(fmt_ctx->metadata, "time_reference", 0, 0);
+               if( tref && aud && aud->sample_rate )
+                       return strtod(tref->value, 0) / aud->sample_rate;
+               break; }
+       case TRACK_VIDEO: {
+               codec_type = AVMEDIA_TYPE_VIDEO;
+               int vidx = vstrm_index[channel].st_idx;
+               FFVideoStream *vid = ffvideo[vidx];
+               fidx = vid->fidx;
+               nudge = vid->nudge;
+               st = vid->st;
+               break; }
+       }
+       if( codec_type < 0 ) return -1;
+       if( st )
+               tc = av_dict_get(st->metadata, "timecode", 0, 0);
+       if( !tc ) {
+               st = 0;
+// find first program which references this stream
+               int pidx = -1;
+               for( int i=0, m=fmt_ctx->nb_programs; pidx<0 && i<m; ++i ) {
+                       AVProgram *pgrm = fmt_ctx->programs[i];
+                       for( int j=0, n=pgrm->nb_stream_indexes; j<n; ++j ) {
+                               int st_idx = pgrm->stream_index[j];
+                               if( st_idx == fidx ) { pidx = i;  break; }
+                       }
+               }
+               fidx = -1;
+               if( pidx >= 0 ) {
+                       AVProgram *pgrm = fmt_ctx->programs[pidx];
+                       for( int j=0, n=pgrm->nb_stream_indexes; j<n; ++j ) {
+                               int st_idx = pgrm->stream_index[j];
+                               AVStream *tst = fmt_ctx->streams[st_idx];
+                               if( !tst ) continue;
+                               if( tst->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
+                                       st = tst;  fidx = st_idx;
+                                       break;
+                               }
+                       }
+               }
+               else {
+                       for( int i=0, n=fmt_ctx->nb_streams; i<n; ++i ) {
+                               AVStream *tst = fmt_ctx->streams[i];
+                               if( !tst ) continue;
+                               if( tst->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
+                                       st = tst;  fidx = i;
+                                       break;
+                               }
+                       }
+               }
+               if( st )
+                       tc = av_dict_get(st->metadata, "timecode", 0, 0);
+       }
+
+       if( !tc ) {
+               // any timecode, includes -data- streams
+               for( int i=0, n=fmt_ctx->nb_streams; i<n; ++i ) {
+                       AVStream *tst = fmt_ctx->streams[i];
+                       if( !tst ) continue;
+                       if( (tc = av_dict_get(tst->metadata, "timecode", 0, 0)) ) {
+                               st = tst;  fidx = i;
+                               break;
+                       }
+               }
+       }
+
+       if( st && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
+               if( st->r_frame_rate.num && st->r_frame_rate.den )
+                       rate = st->r_frame_rate;
+               nudge = st->start_time;
+               for( int i=0; i<ffvideo.size(); ++i ) {
+                       if( ffvideo[i]->st == st ) {
+                               nudge = ffvideo[i]->nudge;
+                               break;
+                       }
+               }
+       }
+
+       if( tc ) { // return timecode
+               double secs = st->start_time == AV_NOPTS_VALUE ? 0 :
+                       to_secs(st->start_time - nudge, st->time_base);
+               return ff_get_timecode(tc->value, rate, secs);
+       }
+       
+       if( !st || fidx < 0 ) return -1;
+
+       decode_activate();
+       AVCodecContext *av_ctx = activate_decoder(st);
+       if( !av_ctx ) {
+               fprintf(stderr,"activate_decoder failed\n");
+               return -1;
+       }
+       avCodecContext avctx(av_ctx); // auto deletes
+       if( avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
+           avctx->framerate.num && avctx->framerate.den )
+               rate = avctx->framerate;
+
+       avPacket pkt;   // auto deletes
+       avFrame frame;  // auto deletes
+       if( !frame ) {
+               fprintf(stderr,"av_frame_alloc failed\n");
+               return -1;
+       }
+       int errs = 0;
+       int64_t max_packets = 100;
+       char tcbuf[AV_TIMECODE_STR_SIZE];
+
+       for( int64_t count=0; count<max_packets; ++count ) {
+               av_packet_unref(pkt);
+               pkt->data = 0; pkt->size = 0;
+
+               int ret = av_read_frame(fmt_ctx, pkt);
+               if( ret < 0 ) {
+                       if( ret == AVERROR_EOF ) break;
+                       if( ++errs > 100 ) {
+                               fprintf(stderr,"over 100 read_frame errs\n");
+                               break;
+                       }
+                       continue;
+               }
+               if( !pkt->data ) continue;
+               int i = pkt->stream_index;
+               if( i != fidx ) continue;
+               int64_t tstmp = pkt->pts;
+               if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt->dts;
+               double secs = to_secs(tstmp - nudge, st->time_base);
+               ret = avcodec_send_packet(avctx, pkt);
+               if( ret < 0 ) return -1;
+
+               while( (ret = avcodec_receive_frame(avctx, frame)) >= 0 ) {
+                       if( (tc = av_dict_get(frame->metadata, "timecode", 0, 0)) )
+                               return ff_get_timecode(tc->value, rate, secs);
+                       int k = frame->nb_side_data;
+                       AVFrameSideData *side_data = 0;
+                       while( --k >= 0 ) {
+                               side_data = frame->side_data[k];
+                               switch( side_data->type ) {
+                               case AV_FRAME_DATA_GOP_TIMECODE: {
+                                       int64_t data = *(int64_t *)side_data->data;
+                                       int sz = sizeof(data);
+                                       if( side_data->size >= sz ) {
+                                               av_timecode_make_mpeg_tc_string(tcbuf, data);
+                                               return ff_get_timecode(tcbuf, rate, secs);
+                                       }
+                                       break; }
+                               case AV_FRAME_DATA_S12M_TIMECODE: {
+                                       uint32_t *data = (uint32_t *)side_data->data;
+                                       int n = data[0], sz = (n+1)*sizeof(*data);
+                                       if( side_data->size >= sz ) {
+                                               av_timecode_make_smpte_tc_string(tcbuf, data[n], 0);
+                                               return ff_get_timecode(tcbuf, rate, secs);
+                                       }
+                                       break; }
+                               default:
+                                       break;
+                               }
+                       }
+               }
+       }
+       char *path = fmt_ctx->url;
+       char *bp = strrchr(path, '/');
+       if( !bp ) bp = path; else ++bp;
+       char *cp = strrchr(bp, '.');
+       if( cp && (cp-=(8+1+6)) >= bp ) {
+               char sep[BCSTRLEN];
+               int year,mon,day, hour,min,sec, frm=0;
+               if( sscanf(cp,"%4d%2d%2d%[_-]%2d%2d%2d",
+                               &year,&mon,&day, sep, &hour,&min,&sec) == 7 ) {
+                       int ch = sep[0];
+                       // year>=1970,mon=1..12,day=1..31, hour=0..23,min=0..59,sec=0..60
+                       if( (ch=='_' || ch=='-' ) &&
+                           year >= 1970 && mon>=1 && mon<=12 && day>=1 && day<=31 &&
+                           hour>=0 && hour<24 && min>=0 && min<60 && sec>=0 && sec<=60 ) {
+                               sprintf(tcbuf,"%d:%02d:%02d:%02d", hour,min,sec, frm);
+                               return ff_get_timecode(tcbuf, rate, 0);
+                       }
+               }
+       }
+       struct stat tst;
+       if( stat(path, &tst) >= 0 ) {
+               time_t t = (time_t)tst.st_mtim.tv_sec;
+               struct tm tm;
+               localtime_r(&t, &tm);
+               int64_t us = tst.st_mtim.tv_nsec / 1000;
+               int frm = us/1000000. * frame_rate;
+               sprintf(tcbuf,"%d:%02d:%02d:%02d", tm.tm_hour, tm.tm_min, tm.tm_sec, frm);
+               return ff_get_timecode(tcbuf, rate, 0);
+       }
+       return -1;
+}
+
+double FFMPEG::ff_get_timecode(char *str, AVRational rate, double pos)
+{
+       AVTimecode tc;
+       if( av_timecode_init_from_string(&tc, rate, str, fmt_ctx) )
+               return -1;
+       double secs = (double)tc.start / tc.fps - pos;
+       if( secs < 0 ) secs = 0;
+       return secs;
+}
+
+double FFMPEG::get_timecode(const char *path, int data_type, int channel, double rate)
+{
+       FFMPEG ffmpeg(0);
+       if( ffmpeg.init_decoder(path) ) return -1;
+       return ffmpeg.get_initial_timecode(data_type, channel, rate);
+}
+