change ffmpeg buffer strategy, reactivate 'new' dialog
[goodguy/history.git] / cinelerra-5.1 / cinelerra / ffmpeg.C
index 9b468bf4f8ea599718bb195e0e94c908b9ada180..31a0d22963afe40587d2871b162267cb9ac00293 100644 (file)
@@ -761,10 +761,10 @@ int FFVideoStream::load(VFrame *vframe, int64_t pos)
                ret = read_frame(frame);
                if( ret > 0 ) ++curr_pos;
        }
+       if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 )
+               ret = -1;
        if( ret >= 0 ) {
-               AVCodecContext *ctx = st->codec;
-               ret = convert_cmodel(vframe, frame,
-                       ctx->pix_fmt, ctx->width, ctx->height);
+               ret = convert_cmodel(vframe, frame);
        }
        ret = ret > 0 ? 1 : ret < 0 ? -1 : 0;
        return ret;
@@ -805,9 +805,7 @@ int FFVideoStream::encode(VFrame *vframe)
        if( ret >= 0 ) {
                AVFrame *frame = *picture;
                frame->pts = curr_pos;
-               AVCodecContext *ctx = st->codec;
-               ret = convert_pixfmt(vframe, frame,
-                       ctx->pix_fmt, ctx->width, ctx->height);
+               ret = convert_pixfmt(vframe, frame);
        }
        if( ret >= 0 ) {
                picture->queue(curr_pos);
@@ -878,53 +876,54 @@ int FFVideoConvert::pix_fmt_to_color_model(AVPixelFormat pix_fmt)
        return -1;
 }
 
-int FFVideoConvert::convert_picture_vframe(VFrame *frame,
-               AVFrame *ip, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip)
+{
+       AVFrame *ipic = av_frame_alloc();
+       int ret = convert_picture_vframe(frame, ip, ipic);
+       av_frame_free(&ipic);
+       return ret;
+}
+
+int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *ipic)
 {
-       // try bc_xfer methods
-       int imodel = pix_fmt_to_color_model(ifmt);
-       if( imodel >= 0 ) {
-               long y_ofs = 0, u_ofs = 0, v_ofs = 0;
-               uint8_t *data = ip->data[0];
-               if( BC_CModels::is_yuv(imodel) ) {
-                       u_ofs = ip->data[1] - data;
-                       v_ofs = ip->data[2] - data;
-               }
-               VFrame iframe(data, -1, y_ofs, u_ofs, v_ofs, iw, ih, imodel, ip->linesize[0]);
-               frame->transfer_from(&iframe);
-               return 0;
-       }
-       // try sws methods
-       AVFrame opic;
        int cmodel = frame->get_color_model();
        AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel);
        if( ofmt == AV_PIX_FMT_NB ) return -1;
-       int size = av_image_fill_arrays(opic.data, opic.linesize,
+       int size = av_image_fill_arrays(ipic->data, ipic->linesize,
                frame->get_data(), ofmt, frame->get_w(), frame->get_h(), 1);
        if( size < 0 ) return -1;
 
-       // transfer line sizes must match also
-       int planar = BC_CModels::is_planar(cmodel);
-       int packed_width = !planar ? frame->get_bytes_per_line() :
-                BC_CModels::calculate_pixelsize(cmodel) * frame->get_w();
-       if( packed_width != opic.linesize[0] )  return -1;
-
-       if( planar ) {
+       int bpp = BC_CModels::calculate_pixelsize(cmodel);
+       int ysz = bpp * frame->get_w(), usz = ysz;
+       switch( cmodel ) {
+       case BC_YUV410P:
+       case BC_YUV411P:
+               usz /= 2;
+       case BC_YUV420P:
+       case BC_YUV422P:
+               usz /= 2;
+       case BC_YUV444P:
                // override av_image_fill_arrays() for planar types
-               opic.data[0] = frame->get_y();
-               opic.data[1] = frame->get_u();
-               opic.data[2] = frame->get_v();
-       }
-
-       convert_ctx = sws_getCachedContext(convert_ctx, iw, ih, ifmt,
+               ipic->data[0] = frame->get_y();  ipic->linesize[0] = ysz;
+               ipic->data[1] = frame->get_u();  ipic->linesize[1] = usz;
+               ipic->data[2] = frame->get_v();  ipic->linesize[2] = usz;
+               break;
+       default:
+               ipic->data[0] = frame->get_data();
+               ipic->linesize[0] = frame->get_bytes_per_line();
+               break;
+       }
+
+       AVPixelFormat pix_fmt = (AVPixelFormat)ip->format;
+       convert_ctx = sws_getCachedContext(convert_ctx, ip->width, ip->height, pix_fmt,
                frame->get_w(), frame->get_h(), ofmt, SWS_BICUBIC, NULL, NULL, NULL);
        if( !convert_ctx ) {
                fprintf(stderr, "FFVideoConvert::convert_picture_frame:"
                                " sws_getCachedContext() failed\n");
                return -1;
        }
-       int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ih,
-           opic.data, opic.linesize);
+       int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height,
+           ipic->data, ipic->linesize);
        if( ret < 0 ) {
                ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\n");
                return -1;
@@ -932,13 +931,13 @@ int FFVideoConvert::convert_picture_vframe(VFrame *frame,
        return 0;
 }
 
-int FFVideoConvert::convert_cmodel(VFrame *frame,
-                AVFrame *ip, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::convert_cmodel(VFrame *frame, AVFrame *ip)
 {
        // try direct transfer
-       if( !convert_picture_vframe(frame, ip, ifmt, iw, ih) ) return 1;
+       if( !convert_picture_vframe(frame, ip) ) return 1;
        // use indirect transfer
-       const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(ifmt);
+       AVPixelFormat pix_fmt = (AVPixelFormat)ip->format;
+       const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
        int max_bits = 0;
        for( int i = 0; i <desc->nb_components; ++i ) {
                int bits = desc->comp[i].depth;
@@ -950,16 +949,15 @@ int FFVideoConvert::convert_cmodel(VFrame *frame,
        int icolor_model = pixdesc_has_alpha(desc) ?
                (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
                (max_bits > 8 ? BC_RGB161616 : BC_RGB888) ;
-       VFrame vframe(iw, ih, icolor_model);
-       if( convert_picture_vframe(&vframe, ip, ifmt, iw, ih) ) return -1;
+       VFrame vframe(ip->width, ip->height, icolor_model);
+       if( convert_picture_vframe(&vframe, ip) ) return -1;
        frame->transfer_from(&vframe);
        return 1;
 }
 
-int FFVideoConvert::transfer_cmodel(VFrame *frame,
-                AVFrame *ifp, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::transfer_cmodel(VFrame *frame, AVFrame *ifp)
 {
-       int ret = convert_cmodel(frame, ifp, ifmt, iw, ih);
+       int ret = convert_cmodel(frame, ifp);
        if( ret > 0 ) {
                const AVDictionary *src = av_frame_get_metadata(ifp);
                AVDictionaryEntry *t = NULL;
@@ -971,38 +969,53 @@ int FFVideoConvert::transfer_cmodel(VFrame *frame,
        return ret;
 }
 
-int FFVideoConvert::convert_vframe_picture(VFrame *frame,
-               AVFrame *op, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op)
+{
+       AVFrame *opic = av_frame_alloc();
+       int ret = convert_vframe_picture(frame, op, opic);
+       av_frame_free(&opic);
+       return ret;
+}
+
+int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *opic)
 {
-       AVFrame opic;
        int cmodel = frame->get_color_model();
        AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel);
        if( ifmt == AV_PIX_FMT_NB ) return -1;
-       int size = av_image_fill_arrays(opic.data, opic.linesize,
+       int size = av_image_fill_arrays(opic->data, opic->linesize,
                 frame->get_data(), ifmt, frame->get_w(), frame->get_h(), 1);
        if( size < 0 ) return -1;
 
-       // transfer line sizes must match also
-       int planar = BC_CModels::is_planar(cmodel);
-       int packed_width = !planar ? frame->get_bytes_per_line() :
-                BC_CModels::calculate_pixelsize(cmodel) * frame->get_w();
-       if( packed_width != opic.linesize[0] )  return -1;
-
-       if( planar ) {
+       int bpp = BC_CModels::calculate_pixelsize(cmodel);
+       int ysz = bpp * frame->get_w(), usz = ysz;
+       switch( cmodel ) {
+       case BC_YUV410P:
+       case BC_YUV411P:
+               usz /= 2;
+       case BC_YUV420P:
+       case BC_YUV422P:
+               usz /= 2;
+       case BC_YUV444P:
                // override av_image_fill_arrays() for planar types
-               opic.data[0] = frame->get_y();
-               opic.data[1] = frame->get_u();
-               opic.data[2] = frame->get_v();
-       }
-
-       convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(), ifmt,
-               ow, oh, ofmt, SWS_BICUBIC, NULL, NULL, NULL);
+               opic->data[0] = frame->get_y();  opic->linesize[0] = ysz;
+               opic->data[1] = frame->get_u();  opic->linesize[1] = usz;
+               opic->data[2] = frame->get_v();  opic->linesize[2] = usz;
+               break;
+       default:
+               opic->data[0] = frame->get_data();
+               opic->linesize[0] = frame->get_bytes_per_line();
+               break;
+       }
+
+       AVPixelFormat ofmt = (AVPixelFormat)op->format;
+       convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(),
+               ifmt, op->width, op->height, ofmt, SWS_BICUBIC, NULL, NULL, NULL);
        if( !convert_ctx ) {
                fprintf(stderr, "FFVideoConvert::convert_frame_picture:"
                                " sws_getCachedContext() failed\n");
                return -1;
        }
-       int ret = sws_scale(convert_ctx, opic.data, opic.linesize, 0, frame->get_h(),
+       int ret = sws_scale(convert_ctx, opic->data, opic->linesize, 0, frame->get_h(),
                        op->data, op->linesize);
        if( ret < 0 ) {
                ff_err(ret, "FFVideoConvert::convert_frame_picture: sws_scale() failed\n");
@@ -1011,11 +1024,10 @@ int FFVideoConvert::convert_vframe_picture(VFrame *frame,
        return 0;
 }
 
-int FFVideoConvert::convert_pixfmt(VFrame *frame,
-                AVFrame *op, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::convert_pixfmt(VFrame *frame, AVFrame *op)
 {
        // try direct transfer
-       if( !convert_vframe_picture(frame, op, ofmt, ow, oh) ) return 1;
+       if( !convert_vframe_picture(frame, op) ) return 1;
        // use indirect transfer
        int colormodel = frame->get_color_model();
        int bits = BC_CModels::calculate_pixelsize(colormodel) * 8;
@@ -1025,14 +1037,13 @@ int FFVideoConvert::convert_pixfmt(VFrame *frame,
                (bits > 8 ? BC_RGB161616: BC_RGB888) ;
        VFrame vframe(frame->get_w(), frame->get_h(), icolor_model);
        vframe.transfer_from(frame);
-       if( !convert_vframe_picture(&vframe, op, ofmt, ow, oh) ) return 1;
+       if( !convert_vframe_picture(&vframe, op) ) return 1;
        return -1;
 }
 
-int FFVideoConvert::transfer_pixfmt(VFrame *frame,
-                AVFrame *ofp, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::transfer_pixfmt(VFrame *frame, AVFrame *ofp)
 {
-       int ret = convert_pixfmt(frame, ofp, ofmt, ow, oh);
+       int ret = convert_pixfmt(frame, ofp);
        if( ret > 0 ) {
                BC_Hash *hp = frame->get_params();
                AVDictionary **dict = avpriv_frame_get_metadatap(ofp);
@@ -1562,8 +1573,8 @@ int FFMPEG::open_decoder()
                printf("FFMPEG::open_decoder: some stream times estimated\n");
 
        ff_lock("FFMPEG::open_decoder");
-       int bad_time = 0;
-       for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) {
+       int ret = 0, bad_time = 0;
+       for( int i=0; !ret && i<(int)fmt_ctx->nb_streams; ++i ) {
                AVStream *st = fmt_ctx->streams[i];
                if( st->duration == AV_NOPTS_VALUE ) bad_time = 1;
                AVCodecContext *avctx = st->codec;
@@ -1588,7 +1599,7 @@ int FFMPEG::open_decoder()
                        vid->nudge = st->start_time;
                        vid->reading = -1;
                        if( opt_video_filter )
-                               vid->create_filter(opt_video_filter, avctx,avctx);
+                               ret = vid->create_filter(opt_video_filter, avctx,avctx);
                }
                else if( avctx->codec_type == AVMEDIA_TYPE_AUDIO ) {
                        if( avctx->channels < 1 ) continue;
@@ -1616,13 +1627,13 @@ int FFMPEG::open_decoder()
                        aud->nudge = st->start_time;
                        aud->reading = -1;
                        if( opt_audio_filter )
-                               aud->create_filter(opt_audio_filter, avctx,avctx);
+                               ret = aud->create_filter(opt_audio_filter, avctx,avctx);
                }
        }
        if( bad_time )
                printf("FFMPEG::open_decoder: some stream have bad times\n");
        ff_unlock();
-       return 0;
+       return ret < 0 ? -1 : 0;
 }
 
 
@@ -2302,7 +2313,12 @@ int FFVideoStream::create_filter(const char *filter_spec,
                AVCodecContext *src_ctx, AVCodecContext *sink_ctx)
 {
        avfilter_register_all();
-       AVFilter *filter = avfilter_get_by_name(filter_spec);
+       const char *sp = filter_spec;
+       char filter_name[BCSTRLEN], *np = filter_name;
+       int i = sizeof(filter_name);
+       while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+       *np = 0;
+       AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
        if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) {
                ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec);
                return -1;
@@ -2331,14 +2347,19 @@ int FFVideoStream::create_filter(const char *filter_spec,
                ff_err(ret, "FFVideoStream::create_filter");
        else
                ret = FFStream::create_filter(filter_spec);
-       return ret >= 0 ? 0 : 1;
+       return ret >= 0 ? 0 : -1;
 }
 
 int FFAudioStream::create_filter(const char *filter_spec,
                AVCodecContext *src_ctx, AVCodecContext *sink_ctx)
 {
        avfilter_register_all();
-       AVFilter *filter = avfilter_get_by_name(filter_spec);
+       const char *sp = filter_spec;
+       char filter_name[BCSTRLEN], *np = filter_name;
+       int i = sizeof(filter_name);
+       while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+       *np = 0;
+       AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
        if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) {
                ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec);
                return -1;
@@ -2373,7 +2394,7 @@ int FFAudioStream::create_filter(const char *filter_spec,
                ff_err(ret, "FFAudioStream::create_filter");
        else
                ret = FFStream::create_filter(filter_spec);
-       return ret >= 0 ? 0 : 1;
+       return ret >= 0 ? 0 : -1;
 }
 
 int FFStream::create_filter(const char *filter_spec)
@@ -2398,8 +2419,11 @@ int FFStream::create_filter(const char *filter_spec)
        if( ret >= 0 )
                ret = avfilter_graph_config(filter_graph, NULL);
 
-       if( ret < 0 )
+       if( ret < 0 ) {
                ff_err(ret, "FFStream::create_filter");
+               avfilter_graph_free(&filter_graph);
+               filter_graph = 0;
+       }
        avfilter_inout_free(&inputs);
        avfilter_inout_free(&outputs);
        return ret;