X-Git-Url: http://git.cinelerra-gg.org/git/?a=blobdiff_plain;f=cinelerra-5.1%2Fcinelerra%2Fffmpeg.C;h=31a0d22963afe40587d2871b162267cb9ac00293;hb=28327674e68a9641ffc542190a95fad05e1023ac;hp=37a999298b0487c9ce726015f61dfd616e97bba6;hpb=9df45f6232e108a5f36ba415ff7da8a51ca9dbdc;p=goodguy%2Fhistory.git diff --git a/cinelerra-5.1/cinelerra/ffmpeg.C b/cinelerra-5.1/cinelerra/ffmpeg.C index 37a99929..31a0d229 100644 --- a/cinelerra-5.1/cinelerra/ffmpeg.C +++ b/cinelerra-5.1/cinelerra/ffmpeg.C @@ -240,7 +240,6 @@ FFStream::FFStream(FFMPEG *ffmpeg, AVStream *st, int fidx) nudge = AV_NOPTS_VALUE; seek_pos = curr_pos = 0; seeked = 1; eof = 0; - index_markers = 0; reading = writing = 0; flushed = 0; need_packet = 1; @@ -315,10 +314,10 @@ int FFStream::decode_activate() if( ret >= 0 ) reading = 1; else - eprintf("FFStream::decode_activate: open decoder failed\n"); + eprintf(_("open decoder failed\n")); } else - eprintf("FFStream::decode_activate: can't clone input file\n"); + eprintf(_("can't clone input file\n")); av_dict_free(&copts); ff_unlock(); } @@ -385,7 +384,7 @@ int FFStream::load_filter(AVFrame *frame) frame, AV_BUFFERSRC_FLAG_KEEP_REF); if( ret < 0 ) { av_frame_unref(frame); - eprintf("FFStream::load_filter: av_buffersrc_add_frame_flags failed\n"); + eprintf(_("av_buffersrc_add_frame_flags failed\n")); } return ret; } @@ -447,6 +446,7 @@ int FFStream::seek(int64_t no, double rate) // default ffmpeg native seek int npkts = 1; int64_t pos = no, plmt = -1; + IndexMarks *index_markers = get_markers(); if( index_markers && index_markers->size() > 1 ) { IndexMarks &marks = *index_markers; int i = marks.find(pos); @@ -526,7 +526,7 @@ int FFAudioStream::get_samples(float *&samples, uint8_t **data, int len) { samples = *(float **)data; if( resample_context ) { - if( len > aud_bfr_sz ) { + if( len > aud_bfr_sz ) { delete [] aud_bfr; aud_bfr = 0; } @@ -703,11 +703,18 @@ int FFAudioStream::encode_frame(AVPacket *pkt, AVFrame *frame, int &got_packet) void FFAudioStream::load_markers() { IndexState *index_state = ffmpeg->file_base->asset->index_state; - if( index_state->marker_status == MARKERS_NOTTESTED ) return; if( !index_state || idx >= index_state->audio_markers.size() ) return; + if( index_state->marker_status == MARKERS_NOTTESTED ) return; FFStream::load_markers(*index_state->audio_markers[idx], sample_rate); } +IndexMarks *FFAudioStream::get_markers() +{ + IndexState *index_state = ffmpeg->file_base->asset->index_state; + if( !index_state || idx >= index_state->audio_markers.size() ) return 0; + return index_state->audio_markers[idx]; +} + FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx) : FFStream(ffmpeg, strm, fidx) { @@ -754,10 +761,10 @@ int FFVideoStream::load(VFrame *vframe, int64_t pos) ret = read_frame(frame); if( ret > 0 ) ++curr_pos; } + if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 ) + ret = -1; if( ret >= 0 ) { - AVCodecContext *ctx = st->codec; - ret = convert_cmodel(vframe, frame, - ctx->pix_fmt, ctx->width, ctx->height); + ret = convert_cmodel(vframe, frame); } ret = ret > 0 ? 1 : ret < 0 ? -1 : 0; return ret; @@ -798,9 +805,7 @@ int FFVideoStream::encode(VFrame *vframe) if( ret >= 0 ) { AVFrame *frame = *picture; frame->pts = curr_pos; - AVCodecContext *ctx = st->codec; - ret = convert_pixfmt(vframe, frame, - ctx->pix_fmt, ctx->width, ctx->height); + ret = convert_pixfmt(vframe, frame); } if( ret >= 0 ) { picture->queue(curr_pos); @@ -825,12 +830,15 @@ int FFVideoStream::encode_frame(AVPacket *pkt, AVFrame *frame, int &got_packet) AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model) { - switch( color_model ) { + switch( color_model ) { case BC_YUV422: return AV_PIX_FMT_YUYV422; case BC_RGB888: return AV_PIX_FMT_RGB24; case BC_RGBA8888: return AV_PIX_FMT_RGBA; case BC_BGR8888: return AV_PIX_FMT_BGR0; case BC_BGR888: return AV_PIX_FMT_BGR24; + case BC_ARGB8888: return AV_PIX_FMT_ARGB; + case BC_ABGR8888: return AV_PIX_FMT_ABGR; + case BC_RGB8: return AV_PIX_FMT_RGB8; case BC_YUV420P: return AV_PIX_FMT_YUV420P; case BC_YUV422P: return AV_PIX_FMT_YUV422P; case BC_YUV444P: return AV_PIX_FMT_YUV444P; @@ -846,12 +854,15 @@ AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model) int FFVideoConvert::pix_fmt_to_color_model(AVPixelFormat pix_fmt) { - switch (pix_fmt) { + switch (pix_fmt) { case AV_PIX_FMT_YUYV422: return BC_YUV422; case AV_PIX_FMT_RGB24: return BC_RGB888; case AV_PIX_FMT_RGBA: return BC_RGBA8888; case AV_PIX_FMT_BGR0: return BC_BGR8888; case AV_PIX_FMT_BGR24: return BC_BGR888; + case AV_PIX_FMT_ARGB: return BC_ARGB8888; + case AV_PIX_FMT_ABGR: return BC_ABGR8888; + case AV_PIX_FMT_RGB8: return BC_RGB8; case AV_PIX_FMT_YUV420P: return BC_YUV420P; case AV_PIX_FMT_YUV422P: return BC_YUV422P; case AV_PIX_FMT_YUV444P: return BC_YUV444P; @@ -865,53 +876,54 @@ int FFVideoConvert::pix_fmt_to_color_model(AVPixelFormat pix_fmt) return -1; } -int FFVideoConvert::convert_picture_vframe(VFrame *frame, - AVFrame *ip, AVPixelFormat ifmt, int iw, int ih) +int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip) +{ + AVFrame *ipic = av_frame_alloc(); + int ret = convert_picture_vframe(frame, ip, ipic); + av_frame_free(&ipic); + return ret; +} + +int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *ipic) { - // try bc_xfer methods - int imodel = pix_fmt_to_color_model(ifmt); - if( imodel >= 0 ) { - long y_ofs = 0, u_ofs = 0, v_ofs = 0; - uint8_t *data = ip->data[0]; - if( BC_CModels::is_yuv(imodel) ) { - u_ofs = ip->data[1] - data; - v_ofs = ip->data[2] - data; - } - VFrame iframe(data, -1, y_ofs, u_ofs, v_ofs, iw, ih, imodel, ip->linesize[0]); - frame->transfer_from(&iframe); - return 0; - } - // try sws methods - AVFrame opic; int cmodel = frame->get_color_model(); AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel); if( ofmt == AV_PIX_FMT_NB ) return -1; - int size = av_image_fill_arrays(opic.data, opic.linesize, + int size = av_image_fill_arrays(ipic->data, ipic->linesize, frame->get_data(), ofmt, frame->get_w(), frame->get_h(), 1); if( size < 0 ) return -1; - // transfer line sizes must match also - int planar = BC_CModels::is_planar(cmodel); - int packed_width = !planar ? frame->get_bytes_per_line() : - BC_CModels::calculate_pixelsize(cmodel) * frame->get_w(); - if( packed_width != opic.linesize[0] ) return -1; - - if( planar ) { + int bpp = BC_CModels::calculate_pixelsize(cmodel); + int ysz = bpp * frame->get_w(), usz = ysz; + switch( cmodel ) { + case BC_YUV410P: + case BC_YUV411P: + usz /= 2; + case BC_YUV420P: + case BC_YUV422P: + usz /= 2; + case BC_YUV444P: // override av_image_fill_arrays() for planar types - opic.data[0] = frame->get_y(); - opic.data[1] = frame->get_u(); - opic.data[2] = frame->get_v(); - } - - convert_ctx = sws_getCachedContext(convert_ctx, iw, ih, ifmt, + ipic->data[0] = frame->get_y(); ipic->linesize[0] = ysz; + ipic->data[1] = frame->get_u(); ipic->linesize[1] = usz; + ipic->data[2] = frame->get_v(); ipic->linesize[2] = usz; + break; + default: + ipic->data[0] = frame->get_data(); + ipic->linesize[0] = frame->get_bytes_per_line(); + break; + } + + AVPixelFormat pix_fmt = (AVPixelFormat)ip->format; + convert_ctx = sws_getCachedContext(convert_ctx, ip->width, ip->height, pix_fmt, frame->get_w(), frame->get_h(), ofmt, SWS_BICUBIC, NULL, NULL, NULL); if( !convert_ctx ) { fprintf(stderr, "FFVideoConvert::convert_picture_frame:" " sws_getCachedContext() failed\n"); return -1; } - int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ih, - opic.data, opic.linesize); + int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height, + ipic->data, ipic->linesize); if( ret < 0 ) { ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\n"); return -1; @@ -919,13 +931,13 @@ int FFVideoConvert::convert_picture_vframe(VFrame *frame, return 0; } -int FFVideoConvert::convert_cmodel(VFrame *frame, - AVFrame *ip, AVPixelFormat ifmt, int iw, int ih) +int FFVideoConvert::convert_cmodel(VFrame *frame, AVFrame *ip) { // try direct transfer - if( !convert_picture_vframe(frame, ip, ifmt, iw, ih) ) return 1; + if( !convert_picture_vframe(frame, ip) ) return 1; // use indirect transfer - const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(ifmt); + AVPixelFormat pix_fmt = (AVPixelFormat)ip->format; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); int max_bits = 0; for( int i = 0; i nb_components; ++i ) { int bits = desc->comp[i].depth; @@ -937,16 +949,15 @@ int FFVideoConvert::convert_cmodel(VFrame *frame, int icolor_model = pixdesc_has_alpha(desc) ? (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) : (max_bits > 8 ? BC_RGB161616 : BC_RGB888) ; - VFrame vframe(iw, ih, icolor_model); - if( convert_picture_vframe(&vframe, ip, ifmt, iw, ih) ) return -1; + VFrame vframe(ip->width, ip->height, icolor_model); + if( convert_picture_vframe(&vframe, ip) ) return -1; frame->transfer_from(&vframe); return 1; } -int FFVideoConvert::transfer_cmodel(VFrame *frame, - AVFrame *ifp, AVPixelFormat ifmt, int iw, int ih) +int FFVideoConvert::transfer_cmodel(VFrame *frame, AVFrame *ifp) { - int ret = convert_cmodel(frame, ifp, ifmt, iw, ih); + int ret = convert_cmodel(frame, ifp); if( ret > 0 ) { const AVDictionary *src = av_frame_get_metadata(ifp); AVDictionaryEntry *t = NULL; @@ -958,38 +969,53 @@ int FFVideoConvert::transfer_cmodel(VFrame *frame, return ret; } -int FFVideoConvert::convert_vframe_picture(VFrame *frame, - AVFrame *op, AVPixelFormat ofmt, int ow, int oh) +int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op) +{ + AVFrame *opic = av_frame_alloc(); + int ret = convert_vframe_picture(frame, op, opic); + av_frame_free(&opic); + return ret; +} + +int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *opic) { - AVFrame opic; int cmodel = frame->get_color_model(); AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel); if( ifmt == AV_PIX_FMT_NB ) return -1; - int size = av_image_fill_arrays(opic.data, opic.linesize, + int size = av_image_fill_arrays(opic->data, opic->linesize, frame->get_data(), ifmt, frame->get_w(), frame->get_h(), 1); if( size < 0 ) return -1; - // transfer line sizes must match also - int planar = BC_CModels::is_planar(cmodel); - int packed_width = !planar ? frame->get_bytes_per_line() : - BC_CModels::calculate_pixelsize(cmodel) * frame->get_w(); - if( packed_width != opic.linesize[0] ) return -1; - - if( planar ) { + int bpp = BC_CModels::calculate_pixelsize(cmodel); + int ysz = bpp * frame->get_w(), usz = ysz; + switch( cmodel ) { + case BC_YUV410P: + case BC_YUV411P: + usz /= 2; + case BC_YUV420P: + case BC_YUV422P: + usz /= 2; + case BC_YUV444P: // override av_image_fill_arrays() for planar types - opic.data[0] = frame->get_y(); - opic.data[1] = frame->get_u(); - opic.data[2] = frame->get_v(); - } - - convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(), ifmt, - ow, oh, ofmt, SWS_BICUBIC, NULL, NULL, NULL); + opic->data[0] = frame->get_y(); opic->linesize[0] = ysz; + opic->data[1] = frame->get_u(); opic->linesize[1] = usz; + opic->data[2] = frame->get_v(); opic->linesize[2] = usz; + break; + default: + opic->data[0] = frame->get_data(); + opic->linesize[0] = frame->get_bytes_per_line(); + break; + } + + AVPixelFormat ofmt = (AVPixelFormat)op->format; + convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(), + ifmt, op->width, op->height, ofmt, SWS_BICUBIC, NULL, NULL, NULL); if( !convert_ctx ) { fprintf(stderr, "FFVideoConvert::convert_frame_picture:" " sws_getCachedContext() failed\n"); return -1; } - int ret = sws_scale(convert_ctx, opic.data, opic.linesize, 0, frame->get_h(), + int ret = sws_scale(convert_ctx, opic->data, opic->linesize, 0, frame->get_h(), op->data, op->linesize); if( ret < 0 ) { ff_err(ret, "FFVideoConvert::convert_frame_picture: sws_scale() failed\n"); @@ -998,11 +1024,10 @@ int FFVideoConvert::convert_vframe_picture(VFrame *frame, return 0; } -int FFVideoConvert::convert_pixfmt(VFrame *frame, - AVFrame *op, AVPixelFormat ofmt, int ow, int oh) +int FFVideoConvert::convert_pixfmt(VFrame *frame, AVFrame *op) { // try direct transfer - if( !convert_vframe_picture(frame, op, ofmt, ow, oh) ) return 1; + if( !convert_vframe_picture(frame, op) ) return 1; // use indirect transfer int colormodel = frame->get_color_model(); int bits = BC_CModels::calculate_pixelsize(colormodel) * 8; @@ -1012,14 +1037,13 @@ int FFVideoConvert::convert_pixfmt(VFrame *frame, (bits > 8 ? BC_RGB161616: BC_RGB888) ; VFrame vframe(frame->get_w(), frame->get_h(), icolor_model); vframe.transfer_from(frame); - if( !convert_vframe_picture(&vframe, op, ofmt, ow, oh) ) return 1; + if( !convert_vframe_picture(&vframe, op) ) return 1; return -1; } -int FFVideoConvert::transfer_pixfmt(VFrame *frame, - AVFrame *ofp, AVPixelFormat ofmt, int ow, int oh) +int FFVideoConvert::transfer_pixfmt(VFrame *frame, AVFrame *ofp) { - int ret = convert_pixfmt(frame, ofp, ofmt, ow, oh); + int ret = convert_pixfmt(frame, ofp); if( ret > 0 ) { BC_Hash *hp = frame->get_params(); AVDictionary **dict = avpriv_frame_get_metadatap(ofp); @@ -1035,10 +1059,17 @@ int FFVideoConvert::transfer_pixfmt(VFrame *frame, void FFVideoStream::load_markers() { IndexState *index_state = ffmpeg->file_base->asset->index_state; - if( idx >= index_state->video_markers.size() ) return; + if( !index_state || idx >= index_state->video_markers.size() ) return; FFStream::load_markers(*index_state->video_markers[idx], frame_rate); } +IndexMarks *FFVideoStream::get_markers() +{ + IndexState *index_state = ffmpeg->file_base->asset->index_state; + if( !index_state || idx >= index_state->video_markers.size() ) return 0; + return !index_state ? 0 : index_state->video_markers[idx]; +} + FFMPEG::FFMPEG(FileBase *file_base) { @@ -1134,13 +1165,15 @@ AVRational FFMPEG::to_time_base(int sample_rate) void FFMPEG::set_option_path(char *path, const char *fmt, ...) { - get_exe_path(path); - strcat(path, "/ffmpeg/"); + char *ep = path + BCTEXTLEN-1; + strncpy(path, File::get_cindat_path(), ep-path); + strncat(path, "/ffmpeg/", ep-path); path += strlen(path); va_list ap; va_start(ap, fmt); - vsprintf(path, fmt, ap); + path += vsnprintf(path, ep-path, fmt, ap); va_end(ap); + *path = 0; } void FFMPEG::get_option_path(char *path, const char *type, const char *spec) @@ -1268,12 +1301,11 @@ int FFMPEG::get_encoder(const char *options, { FILE *fp = fopen(options,"r"); if( !fp ) { - eprintf("FFMPEG::get_encoder: options open failed %s\n",options); + eprintf(_("options open failed %s\n"),options); return 1; } if( get_encoder(fp, format, codec, bsfilter, bsargs) ) - eprintf(_("FFMPEG::get_encoder:" - " err: format/codec not found %s\n"), options); + eprintf(_("format/codec not found %s\n"), options); fclose(fp); return 0; } @@ -1325,8 +1357,7 @@ int FFMPEG::read_options(FILE *fp, const char *options, AVDictionary *&opts) if( line[0] == '\n' ) continue; char key[BCSTRLEN], val[BCTEXTLEN]; if( scan_option_line(line, key, val) ) { - eprintf(_("FFMPEG::read_options:" - " err reading %s: line %d\n"), options, no); + eprintf(_("err reading %s: line %d\n"), options, no); ret = 1; } if( !ret ) { @@ -1394,7 +1425,7 @@ double FFMPEG::to_secs(int64_t time, AVRational time_base) { double base_time = time == AV_NOPTS_VALUE ? 0 : av_rescale_q(time, time_base, AV_TIME_BASE_Q); - return base_time / AV_TIME_BASE; + return base_time / AV_TIME_BASE; } int FFMPEG::info(char *text, int len) @@ -1519,8 +1550,7 @@ int FFMPEG::open_decoder() { struct stat st; if( stat(fmt_ctx->filename, &st) < 0 ) { - eprintf("FFMPEG::open_decoder: can't stat file: %s\n", - fmt_ctx->filename); + eprintf(_("can't stat file: %s\n"), fmt_ctx->filename); return 1; } @@ -1543,8 +1573,8 @@ int FFMPEG::open_decoder() printf("FFMPEG::open_decoder: some stream times estimated\n"); ff_lock("FFMPEG::open_decoder"); - int bad_time = 0; - for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) { + int ret = 0, bad_time = 0; + for( int i=0; !ret && i<(int)fmt_ctx->nb_streams; ++i ) { AVStream *st = fmt_ctx->streams[i]; if( st->duration == AV_NOPTS_VALUE ) bad_time = 1; AVCodecContext *avctx = st->codec; @@ -1569,7 +1599,7 @@ int FFMPEG::open_decoder() vid->nudge = st->start_time; vid->reading = -1; if( opt_video_filter ) - vid->create_filter(opt_video_filter, avctx,avctx); + ret = vid->create_filter(opt_video_filter, avctx,avctx); } else if( avctx->codec_type == AVMEDIA_TYPE_AUDIO ) { if( avctx->channels < 1 ) continue; @@ -1597,13 +1627,13 @@ int FFMPEG::open_decoder() aud->nudge = st->start_time; aud->reading = -1; if( opt_audio_filter ) - aud->create_filter(opt_audio_filter, avctx,avctx); + ret = aud->create_filter(opt_audio_filter, avctx,avctx); } } if( bad_time ) printf("FFMPEG::open_decoder: some stream have bad times\n"); ff_unlock(); - return 0; + return ret < 0 ? -1 : 0; } @@ -1612,24 +1642,24 @@ int FFMPEG::init_encoder(const char *filename) int fd = ::open(filename,O_WRONLY); if( fd < 0 ) fd = open(filename,O_WRONLY+O_CREAT,0666); if( fd < 0 ) { - eprintf("FFMPEG::init_encoder: bad file path: %s\n", filename); + eprintf(_("bad file path: %s\n"), filename); return 1; } ::close(fd); int ret = get_file_format(); if( ret > 0 ) { - eprintf("FFMPEG::init_encoder: bad file format: %s\n", filename); + eprintf(_("bad file format: %s\n"), filename); return 1; } if( ret < 0 ) { - eprintf("FFMPEG::init_encoder: mismatch audio/video file format: %s\n", filename); + eprintf(_("mismatch audio/video file format: %s\n"), filename); return 1; } ff_lock("FFMPEG::init_encoder"); av_register_all(); avformat_alloc_output_context2(&fmt_ctx, 0, file_format, filename); if( !fmt_ctx ) { - eprintf("FFMPEG::init_encoder: failed: %s\n", filename); + eprintf(_("failed: %s\n"), filename); ret = 1; } if( !ret ) { @@ -1654,8 +1684,7 @@ int FFMPEG::open_encoder(const char *type, const char *spec) char format_name[BCSTRLEN], codec_name[BCTEXTLEN]; char bsfilter[BCSTRLEN], bsargs[BCTEXTLEN]; if( get_encoder(option_path, format_name, codec_name, bsfilter, bsargs) ) { - eprintf("FFMPEG::open_encoder: get_encoder failed %s:%s\n", - option_path, filename); + eprintf(_("get_encoder failed %s:%s\n"), option_path, filename); return 1; } @@ -1671,40 +1700,35 @@ int FFMPEG::open_encoder(const char *type, const char *spec) const AVCodecDescriptor *codec_desc = 0; AVCodec *codec = avcodec_find_encoder_by_name(codec_name); if( !codec ) { - eprintf("FFMPEG::open_encoder: cant find codec %s:%s\n", - codec_name, filename); + eprintf(_("cant find codec %s:%s\n"), codec_name, filename); ret = 1; } if( !ret ) { codec_desc = avcodec_descriptor_get(codec->id); if( !codec_desc ) { - eprintf("FFMPEG::open_encoder: unknown codec %s:%s\n", - codec_name, filename); + eprintf(_("unknown codec %s:%s\n"), codec_name, filename); ret = 1; } } if( !ret ) { st = avformat_new_stream(fmt_ctx, 0); if( !st ) { - eprintf("FFMPEG::open_encoder: cant create stream %s:%s\n", - codec_name, filename); + eprintf(_("cant create stream %s:%s\n"), codec_name, filename); ret = 1; } - } + } if( !ret ) { AVCodecContext *ctx = st->codec; switch( codec_desc->type ) { case AVMEDIA_TYPE_AUDIO: { if( has_audio ) { - eprintf("FFMPEG::open_encoder: duplicate audio %s:%s\n", - codec_name, filename); + eprintf(_("duplicate audio %s:%s\n"), codec_name, filename); ret = 1; break; } has_audio = 1; if( scan_options(asset->ff_audio_options, sopts, st) ) { - eprintf("FFMPEG::open_encoder: bad audio options %s:%s\n", - codec_name, filename); + eprintf(_("bad audio options %s:%s\n"), codec_name, filename); ret = 1; break; } @@ -1725,8 +1749,7 @@ int FFMPEG::open_encoder(const char *type, const char *spec) ctx->channel_layout = av_get_default_channel_layout(ctx->channels); ctx->sample_rate = check_sample_rate(codec, asset->sample_rate); if( !ctx->sample_rate ) { - eprintf("FFMPEG::open_encoder:" - " check_sample_rate failed %s\n", filename); + eprintf(_("check_sample_rate failed %s\n"), filename); ret = 1; break; } @@ -1742,15 +1765,13 @@ int FFMPEG::open_encoder(const char *type, const char *spec) break; } case AVMEDIA_TYPE_VIDEO: { if( has_video ) { - eprintf("FFMPEG::open_encoder: duplicate video %s:%s\n", - codec_name, filename); + eprintf(_("duplicate video %s:%s\n"), codec_name, filename); ret = 1; break; } has_video = 1; if( scan_options(asset->ff_video_options, sopts, st) ) { - eprintf("FFMPEG::open_encoder: bad video options %s:%s\n", - codec_name, filename); + eprintf(_("bad video options %s:%s\n"), codec_name, filename); ret = 1; break; } @@ -1787,8 +1808,7 @@ int FFMPEG::open_encoder(const char *type, const char *spec) ctx->pix_fmt = codec->pix_fmts ? codec->pix_fmts[0] : AV_PIX_FMT_YUV420P; AVRational frame_rate = check_frame_rate(codec, vid->frame_rate); if( !frame_rate.num || !frame_rate.den ) { - eprintf("FFMPEG::open_encoder:" - " check_frame_rate failed %s\n", filename); + eprintf(_("check_frame_rate failed %s\n"), filename); ret = 1; break; } @@ -1797,8 +1817,7 @@ int FFMPEG::open_encoder(const char *type, const char *spec) vid->writing = -1; break; } default: - eprintf("FFMPEG::open_encoder: not audio/video, %s:%s\n", - codec_name, filename); + eprintf(_("not audio/video, %s:%s\n"), codec_name, filename); ret = 1; } } @@ -1809,8 +1828,7 @@ int FFMPEG::open_encoder(const char *type, const char *spec) ret = avcodec_open2(st->codec, codec, &sopts); if( ret < 0 ) { ff_err(ret,"FFMPEG::open_encoder"); - eprintf("FFMPEG::open_encoder: open failed %s:%s\n", - codec_name, filename); + eprintf(_("open failed %s:%s\n"), codec_name, filename); ret = 1; } else @@ -1942,7 +1960,7 @@ int FFMPEG::encode_activate() (ret=avio_open(&fmt_ctx->pb, fmt_ctx->filename, AVIO_FLAG_WRITE)) < 0 ) { ff_err(ret, "FFMPEG::encode_activate: err opening : %s\n", fmt_ctx->filename); - return 1; + return -1; } AVDictionary *fopts = 0; @@ -1954,7 +1972,7 @@ int FFMPEG::encode_activate() if( ret < 0 ) { ff_err(ret, "FFMPEG::encode_activate: write header failed %s\n", fmt_ctx->filename); - return 1; + return -1; } encoding = 1; } @@ -2157,7 +2175,7 @@ const char* FFMPEG::ff_audio_format(int stream) AVStream *st = ffaudio[stream]->st; AVCodecID id = st->codec->codec_id; const AVCodecDescriptor *desc = avcodec_descriptor_get(id); - return desc ? desc->name : "Unknown"; + return desc ? desc->name : _("Unknown"); } int FFMPEG::ff_audio_pid(int stream) @@ -2267,7 +2285,7 @@ const char* FFMPEG::ff_video_format(int stream) AVStream *st = ffvideo[stream]->st; AVCodecID id = st->codec->codec_id; const AVCodecDescriptor *desc = avcodec_descriptor_get(id); - return desc ? desc->name : "Unknown"; + return desc ? desc->name : _("Unknown"); } double FFMPEG::ff_frame_rate(int stream) @@ -2295,7 +2313,12 @@ int FFVideoStream::create_filter(const char *filter_spec, AVCodecContext *src_ctx, AVCodecContext *sink_ctx) { avfilter_register_all(); - AVFilter *filter = avfilter_get_by_name(filter_spec); + const char *sp = filter_spec; + char filter_name[BCSTRLEN], *np = filter_name; + int i = sizeof(filter_name); + while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++; + *np = 0; + AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name); if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) { ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec); return -1; @@ -2324,14 +2347,19 @@ int FFVideoStream::create_filter(const char *filter_spec, ff_err(ret, "FFVideoStream::create_filter"); else ret = FFStream::create_filter(filter_spec); - return ret >= 0 ? 0 : 1; + return ret >= 0 ? 0 : -1; } int FFAudioStream::create_filter(const char *filter_spec, AVCodecContext *src_ctx, AVCodecContext *sink_ctx) { avfilter_register_all(); - AVFilter *filter = avfilter_get_by_name(filter_spec); + const char *sp = filter_spec; + char filter_name[BCSTRLEN], *np = filter_name; + int i = sizeof(filter_name); + while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++; + *np = 0; + AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name); if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) { ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec); return -1; @@ -2366,7 +2394,7 @@ int FFAudioStream::create_filter(const char *filter_spec, ff_err(ret, "FFAudioStream::create_filter"); else ret = FFStream::create_filter(filter_spec); - return ret >= 0 ? 0 : 1; + return ret >= 0 ? 0 : -1; } int FFStream::create_filter(const char *filter_spec) @@ -2391,8 +2419,11 @@ int FFStream::create_filter(const char *filter_spec) if( ret >= 0 ) ret = avfilter_graph_config(filter_graph, NULL); - if( ret < 0 ) + if( ret < 0 ) { ff_err(ret, "FFStream::create_filter"); + avfilter_graph_free(&filter_graph); + filter_graph = 0; + } avfilter_inout_free(&inputs); avfilter_inout_free(&outputs); return ret; @@ -2444,7 +2475,8 @@ int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled) av_init_packet(&pkt); AVFrame *frame = av_frame_alloc(); if( !frame ) { - fprintf(stderr, "FFMPEG::scan: av_frame_alloc failed\n"); + fprintf(stderr,"FFMPEG::scan: "); + fprintf(stderr,_("av_frame_alloc failed\n")); return -1; } @@ -2457,8 +2489,10 @@ int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled) AVStream *st = fmt_ctx->streams[i]; AVCodecID codec_id = st->codec->codec_id; AVCodec *decoder = avcodec_find_decoder(codec_id); - if( avcodec_open2(st->codec, decoder, &copts) < 0 ) - fprintf(stderr, "FFMPEG::scan: codec open failed\n"); + if( avcodec_open2(st->codec, decoder, &copts) < 0 ) { + fprintf(stderr,"FFMPEG::scan: "); + fprintf(stderr,_("codec open failed\n")); + } av_dict_free(&copts); } int errs = 0; @@ -2470,7 +2504,7 @@ int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled) if( ret < 0 ) { if( ret == AVERROR_EOF ) break; if( ++errs > 100 ) { - ff_err(ret, "over 100 read_frame errs\n"); + ff_err(ret,_("over 100 read_frame errs\n")); break; } continue; @@ -2528,7 +2562,7 @@ int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled) int64_t pos = index_state->pos(ch); if( pos != aud->curr_pos ) { if( abs(pos-aud->curr_pos) > 1 ) -printf("audio%d pad %ld %ld (%ld)\n", aud->idx, pos, aud->curr_pos, pos-aud->curr_pos); +printf("audio%d pad %jd %jd (%jd)\n", aud->idx, pos, aud->curr_pos, pos-aud->curr_pos); index_state->pad_data(ch, nch, aud->curr_pos); } av_frame_unref(frame); @@ -2556,7 +2590,6 @@ printf("audio%d pad %ld %ld (%ld)\n", aud->idx, pos, aud->curr_pos, pos-aud->cur void FFStream::load_markers(IndexMarks &marks, double rate) { - index_markers = &marks; int in = 0; int64_t sz = marks.size(); int max_entries = fmt_ctx->max_index_size / sizeof(AVIndexEntry) - 1;