#include "file.h"
#include "ffmpeg.h"
#include "indexfile.h"
+#include "interlacemodes.h"
#include "libdv.h"
#include "libmjpeg.h"
#include "mainerror.h"
int FFStream::flush()
{
+ if( writing < 0 )
+ return -1;
int ret = 0;
while( ret >= 0 ) {
FFPacket pkt;
{
samples = *(float **)data;
if( resample_context ) {
- if( len > aud_bfr_sz ) {
+ if( len > aud_bfr_sz ) {
delete [] aud_bfr;
aud_bfr = 0;
}
frame_rate = 0;
aspect_ratio = 0;
length = 0;
+ interlaced = 0;
+ top_field_first = 0;
}
FFVideoStream::~FFVideoStream()
ret = read_frame(frame);
if( ret > 0 ) ++curr_pos;
}
+ if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 )
+ ret = -1;
if( ret >= 0 ) {
- AVCodecContext *ctx = st->codec;
- ret = convert_cmodel(vframe, frame,
- ctx->pix_fmt, ctx->width, ctx->height);
+ ret = convert_cmodel(vframe, frame);
}
ret = ret > 0 ? 1 : ret < 0 ? -1 : 0;
return ret;
if( ret >= 0 ) {
AVFrame *frame = *picture;
frame->pts = curr_pos;
- AVCodecContext *ctx = st->codec;
- ret = convert_pixfmt(vframe, frame,
- ctx->pix_fmt, ctx->width, ctx->height);
+ ret = convert_pixfmt(vframe, frame);
}
if( ret >= 0 ) {
picture->queue(curr_pos);
int FFVideoStream::encode_frame(AVPacket *pkt, AVFrame *frame, int &got_packet)
{
+ if( frame ) {
+ frame->interlaced_frame = interlaced;
+ frame->top_field_first = top_field_first;
+ }
int ret = avcodec_encode_video2(st->codec, pkt, frame, &got_packet);
if( ret < 0 ) {
ff_err(ret, "FFVideoStream::encode_frame: encode video failed\n");
AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model)
{
- switch( color_model ) {
+ switch( color_model ) {
case BC_YUV422: return AV_PIX_FMT_YUYV422;
case BC_RGB888: return AV_PIX_FMT_RGB24;
case BC_RGBA8888: return AV_PIX_FMT_RGBA;
case BC_RGB565: return AV_PIX_FMT_RGB565;
case BC_RGB161616: return AV_PIX_FMT_RGB48LE;
case BC_RGBA16161616: return AV_PIX_FMT_RGBA64LE;
+ case BC_AYUV16161616: return AV_PIX_FMT_AYUV64LE;
default: break;
}
int FFVideoConvert::pix_fmt_to_color_model(AVPixelFormat pix_fmt)
{
- switch (pix_fmt) {
+ switch (pix_fmt) {
case AV_PIX_FMT_YUYV422: return BC_YUV422;
case AV_PIX_FMT_RGB24: return BC_RGB888;
case AV_PIX_FMT_RGBA: return BC_RGBA8888;
case AV_PIX_FMT_RGB565: return BC_RGB565;
case AV_PIX_FMT_RGB48LE: return BC_RGB161616;
case AV_PIX_FMT_RGBA64LE: return BC_RGBA16161616;
+ case AV_PIX_FMT_AYUV64LE: return BC_AYUV16161616;
default: break;
}
return -1;
}
-int FFVideoConvert::convert_picture_vframe(VFrame *frame,
- AVFrame *ip, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip)
+{
+ AVFrame *ipic = av_frame_alloc();
+ int ret = convert_picture_vframe(frame, ip, ipic);
+ av_frame_free(&ipic);
+ return ret;
+}
+
+int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *ipic)
{
- // try bc_xfer methods
- int imodel = pix_fmt_to_color_model(ifmt);
- if( imodel >= 0 ) {
- long y_ofs = 0, u_ofs = 0, v_ofs = 0;
- uint8_t *data = ip->data[0];
- if( BC_CModels::is_yuv(imodel) ) {
- u_ofs = ip->data[1] - data;
- v_ofs = ip->data[2] - data;
- }
- VFrame iframe(data, -1, y_ofs, u_ofs, v_ofs, iw, ih, imodel, ip->linesize[0]);
- frame->transfer_from(&iframe);
- return 0;
- }
- // try sws methods
- AVFrame opic;
int cmodel = frame->get_color_model();
AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel);
if( ofmt == AV_PIX_FMT_NB ) return -1;
- int size = av_image_fill_arrays(opic.data, opic.linesize,
+ int size = av_image_fill_arrays(ipic->data, ipic->linesize,
frame->get_data(), ofmt, frame->get_w(), frame->get_h(), 1);
if( size < 0 ) return -1;
- // transfer line sizes must match also
- int planar = BC_CModels::is_planar(cmodel);
- int packed_width = !planar ? frame->get_bytes_per_line() :
- BC_CModels::calculate_pixelsize(cmodel) * frame->get_w();
- if( packed_width != opic.linesize[0] ) return -1;
-
- if( planar ) {
+ int bpp = BC_CModels::calculate_pixelsize(cmodel);
+ int ysz = bpp * frame->get_w(), usz = ysz;
+ switch( cmodel ) {
+ case BC_YUV410P:
+ case BC_YUV411P:
+ usz /= 2;
+ case BC_YUV420P:
+ case BC_YUV422P:
+ usz /= 2;
+ case BC_YUV444P:
// override av_image_fill_arrays() for planar types
- opic.data[0] = frame->get_y();
- opic.data[1] = frame->get_u();
- opic.data[2] = frame->get_v();
- }
-
- convert_ctx = sws_getCachedContext(convert_ctx, iw, ih, ifmt,
- frame->get_w(), frame->get_h(), ofmt, SWS_BICUBIC, NULL, NULL, NULL);
+ ipic->data[0] = frame->get_y(); ipic->linesize[0] = ysz;
+ ipic->data[1] = frame->get_u(); ipic->linesize[1] = usz;
+ ipic->data[2] = frame->get_v(); ipic->linesize[2] = usz;
+ break;
+ default:
+ ipic->data[0] = frame->get_data();
+ ipic->linesize[0] = frame->get_bytes_per_line();
+ break;
+ }
+
+ AVPixelFormat pix_fmt = (AVPixelFormat)ip->format;
+ convert_ctx = sws_getCachedContext(convert_ctx, ip->width, ip->height, pix_fmt,
+ frame->get_w(), frame->get_h(), ofmt, SWS_POINT, NULL, NULL, NULL);
if( !convert_ctx ) {
fprintf(stderr, "FFVideoConvert::convert_picture_frame:"
" sws_getCachedContext() failed\n");
return -1;
}
- int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ih,
- opic.data, opic.linesize);
+ int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height,
+ ipic->data, ipic->linesize);
if( ret < 0 ) {
ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\n");
return -1;
return 0;
}
-int FFVideoConvert::convert_cmodel(VFrame *frame,
- AVFrame *ip, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::convert_cmodel(VFrame *frame, AVFrame *ip)
{
// try direct transfer
- if( !convert_picture_vframe(frame, ip, ifmt, iw, ih) ) return 1;
+ if( !convert_picture_vframe(frame, ip) ) return 1;
// use indirect transfer
+ AVPixelFormat ifmt = (AVPixelFormat)ip->format;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(ifmt);
int max_bits = 0;
for( int i = 0; i <desc->nb_components; ++i ) {
int bits = desc->comp[i].depth;
if( bits > max_bits ) max_bits = bits;
}
-// from libavcodec/pixdesc.c
-#define pixdesc_has_alpha(pixdesc) ((pixdesc)->nb_components == 2 || \
- (pixdesc)->nb_components == 4 || (pixdesc)->flags & AV_PIX_FMT_FLAG_PAL)
- int icolor_model = pixdesc_has_alpha(desc) ?
- (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
- (max_bits > 8 ? BC_RGB161616 : BC_RGB888) ;
- VFrame vframe(iw, ih, icolor_model);
- if( convert_picture_vframe(&vframe, ip, ifmt, iw, ih) ) return -1;
+ int imodel = pix_fmt_to_color_model(ifmt);
+ int imodel_is_yuv = BC_CModels::is_yuv(imodel);
+ int cmodel = frame->get_color_model();
+ int cmodel_is_yuv = BC_CModels::is_yuv(cmodel);
+ if( imodel < 0 || imodel_is_yuv != cmodel_is_yuv ) {
+ imodel = cmodel_is_yuv ?
+ (BC_CModels::has_alpha(cmodel) ?
+ BC_AYUV16161616 :
+ (max_bits > 8 ? BC_AYUV16161616 : BC_YUV444P)) :
+ (BC_CModels::has_alpha(cmodel) ?
+ (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
+ (max_bits > 8 ? BC_RGB161616 : BC_RGB888)) ;
+ }
+ VFrame vframe(ip->width, ip->height, imodel);
+ if( convert_picture_vframe(&vframe, ip) ) return -1;
frame->transfer_from(&vframe);
return 1;
}
-int FFVideoConvert::transfer_cmodel(VFrame *frame,
- AVFrame *ifp, AVPixelFormat ifmt, int iw, int ih)
+int FFVideoConvert::transfer_cmodel(VFrame *frame, AVFrame *ifp)
{
- int ret = convert_cmodel(frame, ifp, ifmt, iw, ih);
+ int ret = convert_cmodel(frame, ifp);
if( ret > 0 ) {
const AVDictionary *src = av_frame_get_metadata(ifp);
AVDictionaryEntry *t = NULL;
return ret;
}
-int FFVideoConvert::convert_vframe_picture(VFrame *frame,
- AVFrame *op, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op)
+{
+ AVFrame *opic = av_frame_alloc();
+ int ret = convert_vframe_picture(frame, op, opic);
+ av_frame_free(&opic);
+ return ret;
+}
+
+int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *opic)
{
- AVFrame opic;
int cmodel = frame->get_color_model();
AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel);
if( ifmt == AV_PIX_FMT_NB ) return -1;
- int size = av_image_fill_arrays(opic.data, opic.linesize,
+ int size = av_image_fill_arrays(opic->data, opic->linesize,
frame->get_data(), ifmt, frame->get_w(), frame->get_h(), 1);
if( size < 0 ) return -1;
- // transfer line sizes must match also
- int planar = BC_CModels::is_planar(cmodel);
- int packed_width = !planar ? frame->get_bytes_per_line() :
- BC_CModels::calculate_pixelsize(cmodel) * frame->get_w();
- if( packed_width != opic.linesize[0] ) return -1;
-
- if( planar ) {
+ int bpp = BC_CModels::calculate_pixelsize(cmodel);
+ int ysz = bpp * frame->get_w(), usz = ysz;
+ switch( cmodel ) {
+ case BC_YUV410P:
+ case BC_YUV411P:
+ usz /= 2;
+ case BC_YUV420P:
+ case BC_YUV422P:
+ usz /= 2;
+ case BC_YUV444P:
// override av_image_fill_arrays() for planar types
- opic.data[0] = frame->get_y();
- opic.data[1] = frame->get_u();
- opic.data[2] = frame->get_v();
- }
-
- convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(), ifmt,
- ow, oh, ofmt, SWS_BICUBIC, NULL, NULL, NULL);
+ opic->data[0] = frame->get_y(); opic->linesize[0] = ysz;
+ opic->data[1] = frame->get_u(); opic->linesize[1] = usz;
+ opic->data[2] = frame->get_v(); opic->linesize[2] = usz;
+ break;
+ default:
+ opic->data[0] = frame->get_data();
+ opic->linesize[0] = frame->get_bytes_per_line();
+ break;
+ }
+
+ AVPixelFormat ofmt = (AVPixelFormat)op->format;
+ convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(),
+ ifmt, op->width, op->height, ofmt, SWS_POINT, NULL, NULL, NULL);
if( !convert_ctx ) {
fprintf(stderr, "FFVideoConvert::convert_frame_picture:"
" sws_getCachedContext() failed\n");
return -1;
}
- int ret = sws_scale(convert_ctx, opic.data, opic.linesize, 0, frame->get_h(),
+ int ret = sws_scale(convert_ctx, opic->data, opic->linesize, 0, frame->get_h(),
op->data, op->linesize);
if( ret < 0 ) {
ff_err(ret, "FFVideoConvert::convert_frame_picture: sws_scale() failed\n");
return 0;
}
-int FFVideoConvert::convert_pixfmt(VFrame *frame,
- AVFrame *op, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::convert_pixfmt(VFrame *frame, AVFrame *op)
{
// try direct transfer
- if( !convert_vframe_picture(frame, op, ofmt, ow, oh) ) return 1;
+ if( !convert_vframe_picture(frame, op) ) return 1;
// use indirect transfer
- int colormodel = frame->get_color_model();
- int bits = BC_CModels::calculate_pixelsize(colormodel) * 8;
- bits /= BC_CModels::components(colormodel);
- int icolor_model = BC_CModels::has_alpha(colormodel) ?
- (bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
- (bits > 8 ? BC_RGB161616: BC_RGB888) ;
- VFrame vframe(frame->get_w(), frame->get_h(), icolor_model);
+ int cmodel = frame->get_color_model();
+ int max_bits = BC_CModels::calculate_pixelsize(cmodel) * 8;
+ max_bits /= BC_CModels::components(cmodel);
+ AVPixelFormat ofmt = (AVPixelFormat)op->format;
+ int imodel = pix_fmt_to_color_model(ofmt);
+ int imodel_is_yuv = BC_CModels::is_yuv(imodel);
+ int cmodel_is_yuv = BC_CModels::is_yuv(cmodel);
+ if( imodel < 0 || imodel_is_yuv != cmodel_is_yuv ) {
+ imodel = cmodel_is_yuv ?
+ (BC_CModels::has_alpha(cmodel) ?
+ BC_AYUV16161616 :
+ (max_bits > 8 ? BC_AYUV16161616 : BC_YUV444P)) :
+ (BC_CModels::has_alpha(cmodel) ?
+ (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
+ (max_bits > 8 ? BC_RGB161616 : BC_RGB888)) ;
+ }
+ VFrame vframe(frame->get_w(), frame->get_h(), imodel);
vframe.transfer_from(frame);
- if( !convert_vframe_picture(&vframe, op, ofmt, ow, oh) ) return 1;
+ if( !convert_vframe_picture(&vframe, op) ) return 1;
return -1;
}
-int FFVideoConvert::transfer_pixfmt(VFrame *frame,
- AVFrame *ofp, AVPixelFormat ofmt, int ow, int oh)
+int FFVideoConvert::transfer_pixfmt(VFrame *frame, AVFrame *ofp)
{
- int ret = convert_pixfmt(frame, ofp, ofmt, ow, oh);
+ int ret = convert_pixfmt(frame, ofp);
if( ret > 0 ) {
BC_Hash *hp = frame->get_params();
AVDictionary **dict = avpriv_frame_get_metadatap(ofp);
int width = 1000000, height = width * sample_aspect + 0.5;
float w, h;
MWindow::create_aspect_ratio(w, h, width, height);
- return (AVRational){(int)h, (int)w};
+ return (AVRational){(int)w, (int)h};
#else
// square pixels
return (AVRational){1, 1};
int FFMPEG::get_file_format()
{
- int ret = 0;
+ char audio_muxer[BCSTRLEN], video_muxer[BCSTRLEN];
char audio_format[BCSTRLEN], video_format[BCSTRLEN];
- file_format[0] = audio_format[0] = video_format[0] = 0;
+ audio_muxer[0] = audio_format[0] = 0;
+ video_muxer[0] = video_format[0] = 0;
Asset *asset = file_base->asset;
- if( !ret && asset->audio_data )
- ret = get_format(audio_format, "audio", asset->acodec);
- if( !ret && asset->video_data )
- ret = get_format(video_format, "video", asset->vcodec);
- if( !ret && !audio_format[0] && !video_format[0] )
+ int ret = asset ? 0 : 1;
+ if( !ret && asset->audio_data ) {
+ if( !(ret=get_format(audio_format, "audio", asset->acodec)) ) {
+ if( get_format(audio_muxer, "format", audio_format) ) {
+ strcpy(audio_muxer, audio_format);
+ audio_format[0] = 0;
+ }
+ }
+ }
+ if( !ret && asset->video_data ) {
+ if( !(ret=get_format(video_format, "video", asset->vcodec)) ) {
+ if( get_format(video_muxer, "format", video_format) ) {
+ strcpy(video_muxer, video_format);
+ video_format[0] = 0;
+ }
+ }
+ }
+ if( !ret && !audio_muxer[0] && !video_muxer[0] )
ret = 1;
+ if( !ret && audio_muxer[0] && video_muxer[0] &&
+ strcmp(audio_muxer, video_muxer) ) ret = -1;
if( !ret && audio_format[0] && video_format[0] &&
strcmp(audio_format, video_format) ) ret = -1;
if( !ret )
- strcpy(file_format, audio_format[0] ? audio_format : video_format);
+ strcpy(file_format, !audio_format[0] && !video_format[0] ?
+ (audio_muxer[0] ? audio_muxer : video_muxer) :
+ (audio_format[0] ? audio_format : video_format));
return ret;
}
{
while( *cp == ' ' || *cp == '\t' ) ++cp;
char *bp = cp;
- while( *cp && *cp != ' ' && *cp != '\t' && *cp != '=' ) ++cp;
+ while( *cp && *cp != ' ' && *cp != '\t' && *cp != '=' && *cp != '\n' ) ++cp;
int len = cp - bp;
if( !len || len > BCSTRLEN-1 ) return 1;
while( bp < cp ) *tag++ = *bp++;
char *codec, char *codec_options, int len)
{
char default_file[BCTEXTLEN];
- FFMPEG::set_option_path(default_file, "%s/%s.dfl", path, type);
+ set_option_path(default_file, "%s/%s.dfl", path, type);
FILE *fp = fopen(default_file,"r");
if( !fp ) return 1;
fgets(codec, BCSTRLEN, fp);
codec_options += n; len -= n;
}
fclose(fp);
- FFMPEG::set_option_path(default_file, "%s/%s", path, codec);
- return FFMPEG::load_options(default_file, codec_options, len);
+ set_option_path(default_file, "%s/%s", path, codec);
+ return load_options(default_file, codec_options, len);
}
void FFMPEG::set_asset_format(Asset *asset, const char *text)
{
if( asset->format != FILE_FFMPEG ) return;
- strcpy(asset->fformat, text);
+ if( text != asset->fformat )
+ strcpy(asset->fformat, text);
if( !asset->ff_audio_options[0] ) {
asset->audio_data = !load_defaults("audio", text, asset->acodec,
asset->ff_audio_options, sizeof(asset->ff_audio_options));
return 0;
}
-int FFMPEG::read_options(const char *options, AVDictionary *&opts)
+int FFMPEG::read_options(const char *options, AVDictionary *&opts, int skip)
{
FILE *fp = fopen(options,"r");
if( !fp ) return 1;
- int ret = read_options(fp, options, opts);
+ int ret = 0;
+ while( !ret && --skip >= 0 ) {
+ int ch = getc(fp);
+ while( ch >= 0 && ch != '\n' ) ch = getc(fp);
+ if( ch < 0 ) ret = 1;
+ }
+ if( !ret )
+ ret = read_options(fp, options, opts);
fclose(fp);
return ret;
}
char line[BCTEXTLEN];
while( !ret && fgets(line, sizeof(line), fp) ) {
line[sizeof(line)-1] = 0;
- ++no;
if( line[0] == '#' ) continue;
if( line[0] == '\n' ) continue;
char key[BCSTRLEN], val[BCTEXTLEN];
{
double base_time = time == AV_NOPTS_VALUE ? 0 :
av_rescale_q(time, time_base, AV_TIME_BASE_Q);
- return base_time / AV_TIME_BASE;
+ return base_time / AV_TIME_BASE;
}
int FFMPEG::info(char *text, int len)
printf("FFMPEG::open_decoder: some stream times estimated\n");
ff_lock("FFMPEG::open_decoder");
- int bad_time = 0;
- for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) {
+ int ret = 0, bad_time = 0;
+ for( int i=0; !ret && i<(int)fmt_ctx->nb_streams; ++i ) {
AVStream *st = fmt_ctx->streams[i];
if( st->duration == AV_NOPTS_VALUE ) bad_time = 1;
AVCodecContext *avctx = st->codec;
vid->nudge = st->start_time;
vid->reading = -1;
if( opt_video_filter )
- vid->create_filter(opt_video_filter, avctx,avctx);
+ ret = vid->create_filter(opt_video_filter, avctx,avctx);
}
else if( avctx->codec_type == AVMEDIA_TYPE_AUDIO ) {
if( avctx->channels < 1 ) continue;
aud->nudge = st->start_time;
aud->reading = -1;
if( opt_audio_filter )
- aud->create_filter(opt_audio_filter, avctx,avctx);
+ ret = aud->create_filter(opt_audio_filter, avctx,avctx);
}
}
if( bad_time )
printf("FFMPEG::open_decoder: some stream have bad times\n");
ff_unlock();
- return 0;
+ return ret < 0 ? -1 : 0;
}
}
ff_lock("FFMPEG::init_encoder");
av_register_all();
- avformat_alloc_output_context2(&fmt_ctx, 0, file_format, filename);
+ char format[BCSTRLEN];
+ if( get_format(format, "format", file_format) )
+ strcpy(format, file_format);
+ avformat_alloc_output_context2(&fmt_ctx, 0, format, filename);
if( !fmt_ctx ) {
eprintf(_("failed: %s\n"), filename);
ret = 1;
eprintf(_("cant create stream %s:%s\n"), codec_name, filename);
ret = 1;
}
- }
+ }
if( !ret ) {
AVCodecContext *ctx = st->codec;
switch( codec_desc->type ) {
sprintf(arg, "%d", asset->ff_video_bitrate);
av_dict_set(&sopts, "b", arg, 0);
}
- else if( asset->ff_video_quality > 0 ) {
+ else if( asset->ff_video_quality >= 0 ) {
ctx->global_quality = asset->ff_video_quality * FF_QP2LAMBDA;
ctx->qmin = ctx->qmax = asset->ff_video_quality;
ctx->mb_lmin = ctx->qmin * FF_QP2LAMBDA;
ctx->time_base = (AVRational) { frame_rate.den, frame_rate.num };
st->time_base = ctx->time_base;
vid->writing = -1;
+ vid->interlaced = asset->interlace_mode == ILACE_MODE_TOP_FIRST ||
+ asset->interlace_mode == ILACE_MODE_BOTTOM_FIRST ? 1 : 0;
+ vid->top_field_first = asset->interlace_mode == ILACE_MODE_TOP_FIRST ? 1 : 0;
break; }
default:
eprintf(_("not audio/video, %s:%s\n"), codec_name, filename);
if( fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER )
st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ av_dict_set(&sopts, "cin_bitrate", 0, 0);
+ av_dict_set(&sopts, "cin_quality", 0, 0);
+
ret = avcodec_open2(st->codec, codec, &sopts);
if( ret < 0 ) {
ff_err(ret,"FFMPEG::open_encoder");
fst->add_bsfilter(bsfilter, !bsargs[0] ? 0 : bsargs);
}
- ff_unlock();
if( !ret )
start_muxer();
+
+ ff_unlock();
av_dict_free(&sopts);
return ret;
}
(ret=avio_open(&fmt_ctx->pb, fmt_ctx->filename, AVIO_FLAG_WRITE)) < 0 ) {
ff_err(ret, "FFMPEG::encode_activate: err opening : %s\n",
fmt_ctx->filename);
- return 1;
+ return -1;
+ }
+
+ int prog_id = 1;
+ AVProgram *prog = av_new_program(fmt_ctx, prog_id);
+ for( int i=0; i< ffvideo.size(); ++i )
+ av_program_add_stream_index(fmt_ctx, prog_id, ffvideo[i]->fidx);
+ for( int i=0; i< ffaudio.size(); ++i )
+ av_program_add_stream_index(fmt_ctx, prog_id, ffaudio[i]->fidx);
+ int pi = fmt_ctx->nb_programs;
+ while( --pi >= 0 && fmt_ctx->programs[pi]->id != prog_id );
+ AVDictionary **meta = &prog->metadata;
+ av_dict_set(meta, "service_provider", "cin5", 0);
+ const char *path = fmt_ctx->filename, *bp = strrchr(path,'/');
+ if( bp ) path = bp + 1;
+ av_dict_set(meta, "title", path, 0);
+
+ if( ffaudio.size() ) {
+ const char *ep = getenv("CIN_AUDIO_LANG"), *lp = 0;
+ if( !ep && (lp=getenv("LANG")) ) { // some are guesses
+ static struct { const char lc[3], lng[4]; } lcode[] = {
+ { "en", "eng" }, { "de", "ger" }, { "es", "spa" },
+ { "eu", "bas" }, { "fr", "fre" }, { "el", "gre" },
+ { "hi", "hin" }, { "it", "ita" }, { "ja", "jap" },
+ { "ko", "kor" }, { "du", "dut" }, { "pl", "pol" },
+ { "pt", "por" }, { "ru", "rus" }, { "sl", "slv" },
+ { "uk", "ukr" }, { "vi", "vie" }, { "zh", "chi" },
+ };
+ for( int i=sizeof(lcode)/sizeof(lcode[0]); --i>=0 && !ep; )
+ if( !strncmp(lcode[i].lc,lp,2) ) ep = lcode[i].lng;
+ }
+ if( !ep ) ep = "und";
+ char lang[5];
+ strncpy(lang,ep,3); lang[3] = 0;
+ AVStream *st = ffaudio[0]->st;
+ av_dict_set(&st->metadata,"language",lang,0);
}
AVDictionary *fopts = 0;
char option_path[BCTEXTLEN];
set_option_path(option_path, "format/%s", file_format);
- read_options(option_path, fopts);
+ read_options(option_path, fopts, 1);
ret = avformat_write_header(fmt_ctx, &fopts);
- av_dict_free(&fopts);
if( ret < 0 ) {
ff_err(ret, "FFMPEG::encode_activate: write header failed %s\n",
fmt_ctx->filename);
- return 1;
+ return -1;
}
+ av_dict_free(&fopts);
encoding = 1;
}
return encoding;
AVCodecContext *src_ctx, AVCodecContext *sink_ctx)
{
avfilter_register_all();
- AVFilter *filter = avfilter_get_by_name(filter_spec);
+ const char *sp = filter_spec;
+ char filter_name[BCSTRLEN], *np = filter_name;
+ int i = sizeof(filter_name);
+ while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+ *np = 0;
+ AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) {
ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec);
return -1;
ff_err(ret, "FFVideoStream::create_filter");
else
ret = FFStream::create_filter(filter_spec);
- return ret >= 0 ? 0 : 1;
+ return ret >= 0 ? 0 : -1;
}
int FFAudioStream::create_filter(const char *filter_spec,
AVCodecContext *src_ctx, AVCodecContext *sink_ctx)
{
avfilter_register_all();
- AVFilter *filter = avfilter_get_by_name(filter_spec);
+ const char *sp = filter_spec;
+ char filter_name[BCSTRLEN], *np = filter_name;
+ int i = sizeof(filter_name);
+ while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+ *np = 0;
+ AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) {
ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec);
return -1;
ff_err(ret, "FFAudioStream::create_filter");
else
ret = FFStream::create_filter(filter_spec);
- return ret >= 0 ? 0 : 1;
+ return ret >= 0 ? 0 : -1;
}
int FFStream::create_filter(const char *filter_spec)
if( ret >= 0 )
ret = avfilter_graph_config(filter_graph, NULL);
- if( ret < 0 )
+ if( ret < 0 ) {
ff_err(ret, "FFStream::create_filter");
+ avfilter_graph_free(&filter_graph);
+ filter_graph = 0;
+ }
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;