batchrender asset path fix, ru xlat, fixup hevc/h265 opts, expand new bg pngs
[goodguy/history.git] / cinelerra-5.1 / cinelerra / ffmpeg.C
index 7849148e58938afa42e9a07859fc0ba17693257c..f3f9dcc4cff4a4cd5cb86a8d78d0206475788c7a 100644 (file)
@@ -21,6 +21,7 @@
 #include "file.h"
 #include "ffmpeg.h"
 #include "indexfile.h"
+#include "interlacemodes.h"
 #include "libdv.h"
 #include "libmjpeg.h"
 #include "mainerror.h"
@@ -427,6 +428,8 @@ int FFStream::write_packet(FFPacket &pkt)
 
 int FFStream::flush()
 {
+       if( writing < 0 )
+               return -1;
        int ret = 0;
        while( ret >= 0 ) {
                FFPacket pkt;
@@ -526,7 +529,7 @@ int FFAudioStream::get_samples(float *&samples, uint8_t **data, int len)
 {
        samples = *(float **)data;
        if( resample_context ) {
-               if( len > aud_bfr_sz ) {        
+               if( len > aud_bfr_sz ) {
                        delete [] aud_bfr;
                        aud_bfr = 0;
                }
@@ -723,6 +726,8 @@ FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx)
        frame_rate = 0;
        aspect_ratio = 0;
        length = 0;
+       interlaced = 0;
+       top_field_first = 0;
 }
 
 FFVideoStream::~FFVideoStream()
@@ -761,10 +766,10 @@ int FFVideoStream::load(VFrame *vframe, int64_t pos)
                ret = read_frame(frame);
                if( ret > 0 ) ++curr_pos;
        }
+       if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 )
+               ret = -1;
        if( ret >= 0 ) {
-               AVCodecContext *ctx = st->codec;
-               ret = convert_cmodel(vframe, frame,
-                       ctx->pix_fmt, ctx->width, ctx->height);
+               ret = convert_cmodel(vframe, frame);
        }
        ret = ret > 0 ? 1 : ret < 0 ? -1 : 0;
        return ret;
@@ -805,9 +810,7 @@ int FFVideoStream::encode(VFrame *vframe)
        if( ret >= 0 ) {
                AVFrame *frame = *picture;
                frame->pts = curr_pos;
-               AVCodecContext *ctx = st->codec;
-               ret = convert_pixfmt(vframe, frame,
-                       ctx->pix_fmt, ctx->width, ctx->height);
+               ret = convert_pixfmt(vframe, frame);
        }
        if( ret >= 0 ) {
                picture->queue(curr_pos);
@@ -822,6 +825,10 @@ int FFVideoStream::encode(VFrame *vframe)
 
 int FFVideoStream::encode_frame(AVPacket *pkt, AVFrame *frame, int &got_packet)
 {
+       if( frame ) {
+               frame->interlaced_frame = interlaced;
+               frame->top_field_first = top_field_first;
+       }
        int ret = avcodec_encode_video2(st->codec, pkt, frame, &got_packet);
        if( ret < 0 ) {
                ff_err(ret, "FFVideoStream::encode_frame: encode video failed\n");
@@ -832,7 +839,7 @@ int FFVideoStream::encode_frame(AVPacket *pkt, AVFrame *frame, int &got_packet)
 
 AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model)
 {
-       switch( color_model ) { 
+       switch( color_model ) {
        case BC_YUV422:         return AV_PIX_FMT_YUYV422;
        case BC_RGB888:         return AV_PIX_FMT_RGB24;
        case BC_RGBA8888:       return AV_PIX_FMT_RGBA;
@@ -848,6 +855,7 @@ AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model)
        case BC_RGB565:         return AV_PIX_FMT_RGB565;
        case BC_RGB161616:      return AV_PIX_FMT_RGB48LE;
        case BC_RGBA16161616:   return AV_PIX_FMT_RGBA64LE;
+       case BC_AYUV16161616:   return AV_PIX_FMT_AYUV64LE;
        default: break;
        }
 
@@ -856,7 +864,7 @@ AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model)
 
 int FFVideoConvert::pix_fmt_to_color_model(AVPixelFormat pix_fmt)
 {
-       switch (pix_fmt) { 
+       switch (pix_fmt) {
        case AV_PIX_FMT_YUYV422:        return BC_YUV422;
        case AV_PIX_FMT_RGB24:          return BC_RGB888;
        case AV_PIX_FMT_RGBA:           return BC_RGBA8888;
@@ -872,59 +880,61 @@ int FFVideoConvert::pix_fmt_to_color_model(AVPixelFormat pix_fmt)
        case AV_PIX_FMT_RGB565:         return BC_RGB565;
        case AV_PIX_FMT_RGB48LE:        return BC_RGB161616;
        case AV_PIX_FMT_RGBA64LE:       return BC_RGBA16161616;
+       case AV_PIX_FMT_AYUV64LE:       return BC_AYUV16161616;
        default: break;
        }
 
        return -1;
 }
 
-int FFVideoConvert::convert_picture_vframe(VFrame *frame,
-               AVFrame *ip, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip)
+{
+       AVFrame *ipic = av_frame_alloc();
+       int ret = convert_picture_vframe(frame, ip, ipic);
+       av_frame_free(&ipic);
+       return ret;
+}
+
+int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *ipic)
 {
-       // try bc_xfer methods
-       int imodel = pix_fmt_to_color_model(ifmt);
-       if( imodel >= 0 ) {
-               long y_ofs = 0, u_ofs = 0, v_ofs = 0;
-               uint8_t *data = ip->data[0];
-               if( BC_CModels::is_yuv(imodel) ) {
-                       u_ofs = ip->data[1] - data;
-                       v_ofs = ip->data[2] - data;
-               }
-               VFrame iframe(data, -1, y_ofs, u_ofs, v_ofs, iw, ih, imodel, ip->linesize[0]);
-               frame->transfer_from(&iframe);
-               return 0;
-       }
-       // try sws methods
-       AVFrame opic;
        int cmodel = frame->get_color_model();
        AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel);
        if( ofmt == AV_PIX_FMT_NB ) return -1;
-       int size = av_image_fill_arrays(opic.data, opic.linesize,
+       int size = av_image_fill_arrays(ipic->data, ipic->linesize,
                frame->get_data(), ofmt, frame->get_w(), frame->get_h(), 1);
        if( size < 0 ) return -1;
 
-       // transfer line sizes must match also
-       int planar = BC_CModels::is_planar(cmodel);
-       int packed_width = !planar ? frame->get_bytes_per_line() :
-                BC_CModels::calculate_pixelsize(cmodel) * frame->get_w();
-       if( packed_width != opic.linesize[0] )  return -1;
-
-       if( planar ) {
+       int bpp = BC_CModels::calculate_pixelsize(cmodel);
+       int ysz = bpp * frame->get_w(), usz = ysz;
+       switch( cmodel ) {
+       case BC_YUV410P:
+       case BC_YUV411P:
+               usz /= 2;
+       case BC_YUV420P:
+       case BC_YUV422P:
+               usz /= 2;
+       case BC_YUV444P:
                // override av_image_fill_arrays() for planar types
-               opic.data[0] = frame->get_y();
-               opic.data[1] = frame->get_u();
-               opic.data[2] = frame->get_v();
-       }
-
-       convert_ctx = sws_getCachedContext(convert_ctx, iw, ih, ifmt,
-               frame->get_w(), frame->get_h(), ofmt, SWS_BICUBIC, NULL, NULL, NULL);
+               ipic->data[0] = frame->get_y();  ipic->linesize[0] = ysz;
+               ipic->data[1] = frame->get_u();  ipic->linesize[1] = usz;
+               ipic->data[2] = frame->get_v();  ipic->linesize[2] = usz;
+               break;
+       default:
+               ipic->data[0] = frame->get_data();
+               ipic->linesize[0] = frame->get_bytes_per_line();
+               break;
+       }
+
+       AVPixelFormat pix_fmt = (AVPixelFormat)ip->format;
+       convert_ctx = sws_getCachedContext(convert_ctx, ip->width, ip->height, pix_fmt,
+               frame->get_w(), frame->get_h(), ofmt, SWS_POINT, NULL, NULL, NULL);
        if( !convert_ctx ) {
                fprintf(stderr, "FFVideoConvert::convert_picture_frame:"
                                " sws_getCachedContext() failed\n");
                return -1;
        }
-       int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ih,
-           opic.data, opic.linesize);
+       int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height,
+           ipic->data, ipic->linesize);
        if( ret < 0 ) {
                ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\n");
                return -1;
@@ -932,34 +942,40 @@ int FFVideoConvert::convert_picture_vframe(VFrame *frame,
        return 0;
 }
 
-int FFVideoConvert::convert_cmodel(VFrame *frame,
-                AVFrame *ip, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::convert_cmodel(VFrame *frame, AVFrame *ip)
 {
        // try direct transfer
-       if( !convert_picture_vframe(frame, ip, ifmt, iw, ih) ) return 1;
+       if( !convert_picture_vframe(frame, ip) ) return 1;
        // use indirect transfer
+       AVPixelFormat ifmt = (AVPixelFormat)ip->format;
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(ifmt);
        int max_bits = 0;
        for( int i = 0; i <desc->nb_components; ++i ) {
                int bits = desc->comp[i].depth;
                if( bits > max_bits ) max_bits = bits;
        }
-// from libavcodec/pixdesc.c
-#define pixdesc_has_alpha(pixdesc) ((pixdesc)->nb_components == 2 || \
- (pixdesc)->nb_components == 4 || (pixdesc)->flags & AV_PIX_FMT_FLAG_PAL)
-       int icolor_model = pixdesc_has_alpha(desc) ?
-               (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
-               (max_bits > 8 ? BC_RGB161616 : BC_RGB888) ;
-       VFrame vframe(iw, ih, icolor_model);
-       if( convert_picture_vframe(&vframe, ip, ifmt, iw, ih) ) return -1;
+       int imodel = pix_fmt_to_color_model(ifmt);
+       int imodel_is_yuv = BC_CModels::is_yuv(imodel);
+       int cmodel = frame->get_color_model();
+       int cmodel_is_yuv = BC_CModels::is_yuv(cmodel);
+       if( imodel < 0 || imodel_is_yuv != cmodel_is_yuv ) {
+               imodel = cmodel_is_yuv ?
+                   (BC_CModels::has_alpha(cmodel) ?
+                       BC_AYUV16161616 :
+                       (max_bits > 8 ? BC_AYUV16161616 : BC_YUV444P)) :
+                   (BC_CModels::has_alpha(cmodel) ?
+                       (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
+                       (max_bits > 8 ? BC_RGB161616 : BC_RGB888)) ;
+       }
+       VFrame vframe(ip->width, ip->height, imodel);
+       if( convert_picture_vframe(&vframe, ip) ) return -1;
        frame->transfer_from(&vframe);
        return 1;
 }
 
-int FFVideoConvert::transfer_cmodel(VFrame *frame,
-                AVFrame *ifp, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::transfer_cmodel(VFrame *frame, AVFrame *ifp)
 {
-       int ret = convert_cmodel(frame, ifp, ifmt, iw, ih);
+       int ret = convert_cmodel(frame, ifp);
        if( ret > 0 ) {
                const AVDictionary *src = av_frame_get_metadata(ifp);
                AVDictionaryEntry *t = NULL;
@@ -971,38 +987,53 @@ int FFVideoConvert::transfer_cmodel(VFrame *frame,
        return ret;
 }
 
-int FFVideoConvert::convert_vframe_picture(VFrame *frame,
-               AVFrame *op, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op)
+{
+       AVFrame *opic = av_frame_alloc();
+       int ret = convert_vframe_picture(frame, op, opic);
+       av_frame_free(&opic);
+       return ret;
+}
+
+int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *opic)
 {
-       AVFrame opic;
        int cmodel = frame->get_color_model();
        AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel);
        if( ifmt == AV_PIX_FMT_NB ) return -1;
-       int size = av_image_fill_arrays(opic.data, opic.linesize,
+       int size = av_image_fill_arrays(opic->data, opic->linesize,
                 frame->get_data(), ifmt, frame->get_w(), frame->get_h(), 1);
        if( size < 0 ) return -1;
 
-       // transfer line sizes must match also
-       int planar = BC_CModels::is_planar(cmodel);
-       int packed_width = !planar ? frame->get_bytes_per_line() :
-                BC_CModels::calculate_pixelsize(cmodel) * frame->get_w();
-       if( packed_width != opic.linesize[0] )  return -1;
-
-       if( planar ) {
+       int bpp = BC_CModels::calculate_pixelsize(cmodel);
+       int ysz = bpp * frame->get_w(), usz = ysz;
+       switch( cmodel ) {
+       case BC_YUV410P:
+       case BC_YUV411P:
+               usz /= 2;
+       case BC_YUV420P:
+       case BC_YUV422P:
+               usz /= 2;
+       case BC_YUV444P:
                // override av_image_fill_arrays() for planar types
-               opic.data[0] = frame->get_y();
-               opic.data[1] = frame->get_u();
-               opic.data[2] = frame->get_v();
-       }
-
-       convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(), ifmt,
-               ow, oh, ofmt, SWS_BICUBIC, NULL, NULL, NULL);
+               opic->data[0] = frame->get_y();  opic->linesize[0] = ysz;
+               opic->data[1] = frame->get_u();  opic->linesize[1] = usz;
+               opic->data[2] = frame->get_v();  opic->linesize[2] = usz;
+               break;
+       default:
+               opic->data[0] = frame->get_data();
+               opic->linesize[0] = frame->get_bytes_per_line();
+               break;
+       }
+
+       AVPixelFormat ofmt = (AVPixelFormat)op->format;
+       convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(),
+               ifmt, op->width, op->height, ofmt, SWS_POINT, NULL, NULL, NULL);
        if( !convert_ctx ) {
                fprintf(stderr, "FFVideoConvert::convert_frame_picture:"
                                " sws_getCachedContext() failed\n");
                return -1;
        }
-       int ret = sws_scale(convert_ctx, opic.data, opic.linesize, 0, frame->get_h(),
+       int ret = sws_scale(convert_ctx, opic->data, opic->linesize, 0, frame->get_h(),
                        op->data, op->linesize);
        if( ret < 0 ) {
                ff_err(ret, "FFVideoConvert::convert_frame_picture: sws_scale() failed\n");
@@ -1011,28 +1042,36 @@ int FFVideoConvert::convert_vframe_picture(VFrame *frame,
        return 0;
 }
 
-int FFVideoConvert::convert_pixfmt(VFrame *frame,
-                AVFrame *op, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::convert_pixfmt(VFrame *frame, AVFrame *op)
 {
        // try direct transfer
-       if( !convert_vframe_picture(frame, op, ofmt, ow, oh) ) return 1;
+       if( !convert_vframe_picture(frame, op) ) return 1;
        // use indirect transfer
-       int colormodel = frame->get_color_model();
-       int bits = BC_CModels::calculate_pixelsize(colormodel) * 8;
-       bits /= BC_CModels::components(colormodel);
-       int icolor_model =  BC_CModels::has_alpha(colormodel) ?
-               (bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
-               (bits > 8 ? BC_RGB161616: BC_RGB888) ;
-       VFrame vframe(frame->get_w(), frame->get_h(), icolor_model);
+       int cmodel = frame->get_color_model();
+       int max_bits = BC_CModels::calculate_pixelsize(cmodel) * 8;
+       max_bits /= BC_CModels::components(cmodel);
+       AVPixelFormat ofmt = (AVPixelFormat)op->format;
+       int imodel = pix_fmt_to_color_model(ofmt);
+       int imodel_is_yuv = BC_CModels::is_yuv(imodel);
+       int cmodel_is_yuv = BC_CModels::is_yuv(cmodel);
+       if( imodel < 0 || imodel_is_yuv != cmodel_is_yuv ) {
+               imodel = cmodel_is_yuv ?
+                   (BC_CModels::has_alpha(cmodel) ?
+                       BC_AYUV16161616 :
+                       (max_bits > 8 ? BC_AYUV16161616 : BC_YUV444P)) :
+                   (BC_CModels::has_alpha(cmodel) ?
+                       (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
+                       (max_bits > 8 ? BC_RGB161616 : BC_RGB888)) ;
+       }
+       VFrame vframe(frame->get_w(), frame->get_h(), imodel);
        vframe.transfer_from(frame);
-       if( !convert_vframe_picture(&vframe, op, ofmt, ow, oh) ) return 1;
+       if( !convert_vframe_picture(&vframe, op) ) return 1;
        return -1;
 }
 
-int FFVideoConvert::transfer_pixfmt(VFrame *frame,
-                AVFrame *ofp, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::transfer_pixfmt(VFrame *frame, AVFrame *ofp)
 {
-       int ret = convert_pixfmt(frame, ofp, ofmt, ow, oh);
+       int ret = convert_pixfmt(frame, ofp);
        if( ret > 0 ) {
                BC_Hash *hp = frame->get_params();
                AVDictionary **dict = avpriv_frame_get_metadatap(ofp);
@@ -1140,7 +1179,7 @@ AVRational FFMPEG::to_sample_aspect_ratio(Asset *asset)
        int width = 1000000, height = width * sample_aspect + 0.5;
        float w, h;
        MWindow::create_aspect_ratio(w, h, width, height);
-       return (AVRational){(int)h, (int)w};
+       return (AVRational){(int)w, (int)h};
 #else
 // square pixels
        return (AVRational){1, 1};
@@ -1213,20 +1252,38 @@ int FFMPEG::get_codec(char *codec, const char *path, const char *spec)
 
 int FFMPEG::get_file_format()
 {
-       int ret = 0;
+       char audio_muxer[BCSTRLEN], video_muxer[BCSTRLEN];
        char audio_format[BCSTRLEN], video_format[BCSTRLEN];
-       file_format[0] = audio_format[0] = video_format[0] = 0;
+       audio_muxer[0] = audio_format[0] = 0;
+       video_muxer[0] = video_format[0] = 0;
        Asset *asset = file_base->asset;
-       if( !ret && asset->audio_data )
-               ret = get_format(audio_format, "audio", asset->acodec);
-       if( !ret && asset->video_data )
-               ret = get_format(video_format, "video", asset->vcodec);
-       if( !ret && !audio_format[0] && !video_format[0] )
+       int ret = asset ? 0 : 1;
+       if( !ret && asset->audio_data ) {
+               if( !(ret=get_format(audio_format, "audio", asset->acodec)) ) {
+                       if( get_format(audio_muxer, "format", audio_format) ) {
+                               strcpy(audio_muxer, audio_format);
+                               audio_format[0] = 0;
+                       }
+               }
+       }
+       if( !ret && asset->video_data ) {
+               if( !(ret=get_format(video_format, "video", asset->vcodec)) ) {
+                       if( get_format(video_muxer, "format", video_format) ) {
+                               strcpy(video_muxer, video_format);
+                               video_format[0] = 0;
+                       }
+               }
+       }
+       if( !ret && !audio_muxer[0] && !video_muxer[0] )
                ret = 1;
+       if( !ret && audio_muxer[0] && video_muxer[0] &&
+           strcmp(audio_muxer, video_muxer) ) ret = -1;
        if( !ret && audio_format[0] && video_format[0] &&
            strcmp(audio_format, video_format) ) ret = -1;
        if( !ret )
-               strcpy(file_format, audio_format[0] ? audio_format : video_format);
+               strcpy(file_format, !audio_format[0] && !video_format[0] ?
+                       (audio_muxer[0] ? audio_muxer : video_muxer) :
+                       (audio_format[0] ? audio_format : video_format));
        return ret;
 }
 
@@ -1234,7 +1291,7 @@ int FFMPEG::scan_option_line(char *cp, char *tag, char *val)
 {
        while( *cp == ' ' || *cp == '\t' ) ++cp;
        char *bp = cp;
-       while( *cp && *cp != ' ' && *cp != '\t' && *cp != '=' ) ++cp;
+       while( *cp && *cp != ' ' && *cp != '\t' && *cp != '=' && *cp != '\n' ) ++cp;
        int len = cp - bp;
        if( !len || len > BCSTRLEN-1 ) return 1;
        while( bp < cp ) *tag++ = *bp++;
@@ -1255,7 +1312,7 @@ int FFMPEG::load_defaults(const char *path, const char *type,
                 char *codec, char *codec_options, int len)
 {
        char default_file[BCTEXTLEN];
-       FFMPEG::set_option_path(default_file, "%s/%s.dfl", path, type);
+       set_option_path(default_file, "%s/%s.dfl", path, type);
        FILE *fp = fopen(default_file,"r");
        if( !fp ) return 1;
        fgets(codec, BCSTRLEN, fp);
@@ -1267,14 +1324,15 @@ int FFMPEG::load_defaults(const char *path, const char *type,
                codec_options += n;  len -= n;
        }
        fclose(fp);
-       FFMPEG::set_option_path(default_file, "%s/%s", path, codec);
-       return FFMPEG::load_options(default_file, codec_options, len);
+       set_option_path(default_file, "%s/%s", path, codec);
+       return load_options(default_file, codec_options, len);
 }
 
 void FFMPEG::set_asset_format(Asset *asset, const char *text)
 {
        if( asset->format != FILE_FFMPEG ) return;
-       strcpy(asset->fformat, text);
+       if( text != asset->fformat )
+               strcpy(asset->fformat, text);
        if( !asset->ff_audio_options[0] ) {
                asset->audio_data = !load_defaults("audio", text, asset->acodec,
                        asset->ff_audio_options, sizeof(asset->ff_audio_options));
@@ -1315,11 +1373,18 @@ int FFMPEG::get_encoder(FILE *fp,
        return 0;
 }
 
-int FFMPEG::read_options(const char *options, AVDictionary *&opts)
+int FFMPEG::read_options(const char *options, AVDictionary *&opts, int skip)
 {
        FILE *fp = fopen(options,"r");
        if( !fp ) return 1;
-       int ret = read_options(fp, options, opts);
+       int ret = 0;
+       while( !ret && --skip >= 0 ) {
+               int ch = getc(fp);
+               while( ch >= 0 && ch != '\n' ) ch = getc(fp);
+               if( ch < 0 ) ret = 1;
+       }
+       if( !ret )
+               ret = read_options(fp, options, opts);
        fclose(fp);
        return ret;
 }
@@ -1341,7 +1406,6 @@ int FFMPEG::read_options(FILE *fp, const char *options, AVDictionary *&opts)
        char line[BCTEXTLEN];
        while( !ret && fgets(line, sizeof(line), fp) ) {
                line[sizeof(line)-1] = 0;
-               ++no;
                if( line[0] == '#' ) continue;
                if( line[0] == '\n' ) continue;
                char key[BCSTRLEN], val[BCTEXTLEN];
@@ -1414,7 +1478,7 @@ double FFMPEG::to_secs(int64_t time, AVRational time_base)
 {
        double base_time = time == AV_NOPTS_VALUE ? 0 :
                av_rescale_q(time, time_base, AV_TIME_BASE_Q);
-       return base_time / AV_TIME_BASE; 
+       return base_time / AV_TIME_BASE;
 }
 
 int FFMPEG::info(char *text, int len)
@@ -1562,8 +1626,8 @@ int FFMPEG::open_decoder()
                printf("FFMPEG::open_decoder: some stream times estimated\n");
 
        ff_lock("FFMPEG::open_decoder");
-       int bad_time = 0;
-       for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) {
+       int ret = 0, bad_time = 0;
+       for( int i=0; !ret && i<(int)fmt_ctx->nb_streams; ++i ) {
                AVStream *st = fmt_ctx->streams[i];
                if( st->duration == AV_NOPTS_VALUE ) bad_time = 1;
                AVCodecContext *avctx = st->codec;
@@ -1588,7 +1652,7 @@ int FFMPEG::open_decoder()
                        vid->nudge = st->start_time;
                        vid->reading = -1;
                        if( opt_video_filter )
-                               vid->create_filter(opt_video_filter, avctx,avctx);
+                               ret = vid->create_filter(opt_video_filter, avctx,avctx);
                }
                else if( avctx->codec_type == AVMEDIA_TYPE_AUDIO ) {
                        if( avctx->channels < 1 ) continue;
@@ -1616,13 +1680,13 @@ int FFMPEG::open_decoder()
                        aud->nudge = st->start_time;
                        aud->reading = -1;
                        if( opt_audio_filter )
-                               aud->create_filter(opt_audio_filter, avctx,avctx);
+                               ret = aud->create_filter(opt_audio_filter, avctx,avctx);
                }
        }
        if( bad_time )
                printf("FFMPEG::open_decoder: some stream have bad times\n");
        ff_unlock();
-       return 0;
+       return ret < 0 ? -1 : 0;
 }
 
 
@@ -1646,7 +1710,10 @@ int FFMPEG::init_encoder(const char *filename)
        }
        ff_lock("FFMPEG::init_encoder");
        av_register_all();
-       avformat_alloc_output_context2(&fmt_ctx, 0, file_format, filename);
+       char format[BCSTRLEN];
+       if( get_format(format, "format", file_format) )
+               strcpy(format, file_format);
+       avformat_alloc_output_context2(&fmt_ctx, 0, format, filename);
        if( !fmt_ctx ) {
                eprintf(_("failed: %s\n"), filename);
                ret = 1;
@@ -1705,7 +1772,7 @@ int FFMPEG::open_encoder(const char *type, const char *spec)
                        eprintf(_("cant create stream %s:%s\n"), codec_name, filename);
                        ret = 1;
                }
-       } 
+       }
        if( !ret ) {
                AVCodecContext *ctx = st->codec;
                switch( codec_desc->type ) {
@@ -1804,6 +1871,9 @@ int FFMPEG::open_encoder(const char *type, const char *spec)
                        ctx->time_base = (AVRational) { frame_rate.den, frame_rate.num };
                        st->time_base = ctx->time_base;
                        vid->writing = -1;
+                       vid->interlaced = asset->interlace_mode == ILACE_MODE_TOP_FIRST ||
+                               asset->interlace_mode == ILACE_MODE_BOTTOM_FIRST ? 1 : 0;
+                       vid->top_field_first = asset->interlace_mode == ILACE_MODE_TOP_FIRST ? 1 : 0;
                        break; }
                default:
                        eprintf(_("not audio/video, %s:%s\n"), codec_name, filename);
@@ -1828,9 +1898,10 @@ int FFMPEG::open_encoder(const char *type, const char *spec)
                        fst->add_bsfilter(bsfilter, !bsargs[0] ? 0 : bsargs);
        }
 
-       ff_unlock();
        if( !ret )
                start_muxer();
+
+       ff_unlock();
        av_dict_free(&sopts);
        return ret;
 }
@@ -1949,20 +2020,55 @@ int FFMPEG::encode_activate()
                    (ret=avio_open(&fmt_ctx->pb, fmt_ctx->filename, AVIO_FLAG_WRITE)) < 0 ) {
                        ff_err(ret, "FFMPEG::encode_activate: err opening : %s\n",
                                fmt_ctx->filename);
-                       return 1;
+                       return -1;
+               }
+
+               int prog_id = 1;
+               AVProgram *prog = av_new_program(fmt_ctx, prog_id);
+               for( int i=0; i< ffvideo.size(); ++i )
+                       av_program_add_stream_index(fmt_ctx, prog_id, ffvideo[i]->fidx);
+               for( int i=0; i< ffaudio.size(); ++i )
+                       av_program_add_stream_index(fmt_ctx, prog_id, ffaudio[i]->fidx);
+               int pi = fmt_ctx->nb_programs;
+               while(  --pi >= 0 && fmt_ctx->programs[pi]->id != prog_id );
+               AVDictionary **meta = &prog->metadata;
+               av_dict_set(meta, "service_provider", "cin5", 0);
+               const char *path = fmt_ctx->filename, *bp = strrchr(path,'/');
+               if( bp ) path = bp + 1;
+               av_dict_set(meta, "title", path, 0);
+
+               if( ffaudio.size() ) {
+                       const char *ep = getenv("CIN_AUDIO_LANG"), *lp = 0;
+                       if( !ep && (lp=getenv("LANG")) ) { // some are guesses
+                               static struct { const char lc[3], lng[4]; } lcode[] = {
+                                       { "en", "eng" }, { "de", "ger" }, { "es", "spa" },
+                                       { "eu", "bas" }, { "fr", "fre" }, { "el", "gre" },
+                                       { "hi", "hin" }, { "it", "ita" }, { "ja", "jap" },
+                                       { "ko", "kor" }, { "du", "dut" }, { "pl", "pol" },
+                                       { "pt", "por" }, { "ru", "rus" }, { "sl", "slv" },
+                                       { "uk", "ukr" }, { "vi", "vie" }, { "zh", "chi" },
+                               };
+                               for( int i=sizeof(lcode)/sizeof(lcode[0]); --i>=0 && !ep; )
+                                       if( !strncmp(lcode[i].lc,lp,2) ) ep = lcode[i].lng;
+                       }
+                       if( !ep ) ep = "und";
+                       char lang[5];
+                       strncpy(lang,ep,3);  lang[3] = 0;
+                       AVStream *st = ffaudio[0]->st;
+                       av_dict_set(&st->metadata,"language",lang,0);
                }
 
                AVDictionary *fopts = 0;
                char option_path[BCTEXTLEN];
                set_option_path(option_path, "format/%s", file_format);
-               read_options(option_path, fopts);
+               read_options(option_path, fopts, 1);
                ret = avformat_write_header(fmt_ctx, &fopts);
-               av_dict_free(&fopts);
                if( ret < 0 ) {
                        ff_err(ret, "FFMPEG::encode_activate: write header failed %s\n",
                                fmt_ctx->filename);
-                       return 1;
+                       return -1;
                }
+               av_dict_free(&fopts);
                encoding = 1;
        }
        return encoding;
@@ -2302,7 +2408,12 @@ int FFVideoStream::create_filter(const char *filter_spec,
                AVCodecContext *src_ctx, AVCodecContext *sink_ctx)
 {
        avfilter_register_all();
-       AVFilter *filter = avfilter_get_by_name(filter_spec);
+       const char *sp = filter_spec;
+       char filter_name[BCSTRLEN], *np = filter_name;
+       int i = sizeof(filter_name);
+       while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+       *np = 0;
+       AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
        if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) {
                ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec);
                return -1;
@@ -2331,14 +2442,19 @@ int FFVideoStream::create_filter(const char *filter_spec,
                ff_err(ret, "FFVideoStream::create_filter");
        else
                ret = FFStream::create_filter(filter_spec);
-       return ret >= 0 ? 0 : 1;
+       return ret >= 0 ? 0 : -1;
 }
 
 int FFAudioStream::create_filter(const char *filter_spec,
                AVCodecContext *src_ctx, AVCodecContext *sink_ctx)
 {
        avfilter_register_all();
-       AVFilter *filter = avfilter_get_by_name(filter_spec);
+       const char *sp = filter_spec;
+       char filter_name[BCSTRLEN], *np = filter_name;
+       int i = sizeof(filter_name);
+       while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+       *np = 0;
+       AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
        if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) {
                ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec);
                return -1;
@@ -2373,7 +2489,7 @@ int FFAudioStream::create_filter(const char *filter_spec,
                ff_err(ret, "FFAudioStream::create_filter");
        else
                ret = FFStream::create_filter(filter_spec);
-       return ret >= 0 ? 0 : 1;
+       return ret >= 0 ? 0 : -1;
 }
 
 int FFStream::create_filter(const char *filter_spec)
@@ -2398,8 +2514,11 @@ int FFStream::create_filter(const char *filter_spec)
        if( ret >= 0 )
                ret = avfilter_graph_config(filter_graph, NULL);
 
-       if( ret < 0 )
+       if( ret < 0 ) {
                ff_err(ret, "FFStream::create_filter");
+               avfilter_graph_free(&filter_graph);
+               filter_graph = 0;
+       }
        avfilter_inout_free(&inputs);
        avfilter_inout_free(&outputs);
        return ret;