#include <stdarg.h>
#include <fcntl.h>
#include <limits.h>
+#include <ctype.h>
+
// work arounds (centos)
#include <lzma.h>
#ifndef INT64_MAX
#define AUDIO_INBUF_SIZE 0x10000
#define VIDEO_REFILL_THRESH 0
#define AUDIO_REFILL_THRESH 0x1000
+#define AUDIO_MIN_FRAME_SZ 128
Mutex FFMPEG::fflock("FFMPEG::fflock");
need_packet = 1;
frame = fframe = 0;
bsfc = 0;
+ stats_fp = 0;
+ stats_filename = 0;
+ stats_in = 0;
+ pass = 0;
}
FFStream::~FFStream()
if( frame ) av_frame_free(&frame);
if( fframe ) av_frame_free(&fframe);
delete frm_lock;
+ if( stats_fp ) fclose(stats_fp);
+ if( stats_in ) av_freep(&stats_in);
+ delete [] stats_filename;
}
void FFStream::ff_lock(const char *cp)
if( decoder->capabilities & AV_CODEC_CAP_DR1 )
avctx->flags |= CODEC_FLAG_EMU_EDGE;
avcodec_parameters_to_context(avctx, st->codecpar);
+ if( !av_dict_get(copts, "threads", NULL, 0) )
+ avctx->thread_count = ffmpeg->ff_cpus();
ret = avcodec_open2(avctx, decoder, &copts);
}
if( ret >= 0 ) {
int FFStream::load_filter(AVFrame *frame)
{
- av_frame_unref(frame);
- int ret = av_buffersrc_add_frame_flags(buffersrc_ctx,
- frame, AV_BUFFERSRC_FLAG_KEEP_REF);
+ int ret = av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0);
if( ret < 0 )
eprintf(_("av_buffersrc_add_frame_flags failed\n"));
return ret;
ret = write_packet(opkt);
if( ret < 0 ) break;
++pkts;
+ if( frame && stats_fp ) {
+ ret = write_stats_file();
+ if( ret < 0 ) break;
+ }
}
ff_err(ret, "FFStream::encode_frame: encode failed\n");
return -1;
if( writing < 0 )
return -1;
int ret = encode_frame(0);
+ if( ret >= 0 && stats_fp ) {
+ ret = write_stats_file();
+ close_stats_file();
+ }
if( ret < 0 )
ff_err(ret, "FFStream::flush");
return ret >= 0 ? 0 : 1;
}
+
+int FFStream::open_stats_file()
+{
+ stats_fp = fopen(stats_filename,"w");
+ return stats_fp ? 0 : AVERROR(errno);
+}
+
+int FFStream::close_stats_file()
+{
+ if( stats_fp ) {
+ fclose(stats_fp); stats_fp = 0;
+ }
+ return 0;
+}
+
+int FFStream::read_stats_file()
+{
+ int64_t len = 0; struct stat stats_st;
+ int fd = open(stats_filename, O_RDONLY);
+ int ret = fd >= 0 ? 0: ENOENT;
+ if( !ret && fstat(fd, &stats_st) )
+ ret = EINVAL;
+ if( !ret ) {
+ len = stats_st.st_size;
+ stats_in = (char *)av_malloc(len+1);
+ if( !stats_in )
+ ret = ENOMEM;
+ }
+ if( !ret && read(fd, stats_in, len+1) != len )
+ ret = EIO;
+ if( !ret ) {
+ stats_in[len] = 0;
+ avctx->stats_in = stats_in;
+ }
+ if( fd >= 0 )
+ close(fd);
+ return !ret ? 0 : AVERROR(ret);
+}
+
+int FFStream::write_stats_file()
+{
+ int ret = 0;
+ if( avctx->stats_out && (ret=strlen(avctx->stats_out)) > 0 ) {
+ int len = fwrite(avctx->stats_out, 1, ret, stats_fp);
+ if( ret != len )
+ ff_err(ret = AVERROR(errno), "FFStream::write_stats_file");
+ }
+ return ret;
+}
+
+int FFStream::init_stats_file()
+{
+ int ret = 0;
+ if( (pass & 2) && (ret = read_stats_file()) < 0 )
+ ff_err(ret, "stat file read: %s", stats_filename);
+ if( (pass & 1) && (ret=open_stats_file()) < 0 )
+ ff_err(ret, "stat file open: %s", stats_filename);
+ return ret >= 0 ? 0 : ret;
+}
+
int FFStream::seek(int64_t no, double rate)
{
// default ffmpeg native seek
channel0 = channels = 0;
sample_rate = 0;
mbsz = 0;
+ frame_sz = AUDIO_MIN_FRAME_SZ;
length = 0;
resample_context = 0;
swr_ichs = swr_ifmt = swr_irate = 0;
int FFAudioStream::encode_activate()
{
if( writing >= 0 ) return writing;
+ if( !avctx->codec ) return writing = 0;
frame_sz = avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
10000 : avctx->frame_size;
return FFStream::encode_activate();
}
if( mbsz < len ) mbsz = len;
int64_t end_pos = pos + len;
- int ret = 0;
- for( int i=0; ret>=0 && !flushed && curr_pos<end_pos && i<MAX_RETRY; ++i ) {
+ int ret = 0, i = len / frame_sz + MAX_RETRY;
+ while( ret>=0 && !flushed && curr_pos<end_pos && --i>=0 ) {
ret = read_frame(frame);
- if( ret > 0 ) {
+ if( ret > 0 && frame->nb_samples > 0 ) {
init_swr(frame->channels, frame->format, frame->sample_rate);
load_history(&frame->extended_data[0], frame->nb_samples);
curr_pos += frame->nb_samples;
fprintf(stderr, "FFVideoStream::load: av_frame_alloc failed\n");
return -1;
}
- for( int i=0; ret>=0 && !flushed && curr_pos<=pos && i<MAX_RETRY; ++i ) {
+ int i = MAX_RETRY + pos - curr_pos;
+ while( ret>=0 && !flushed && curr_pos<=pos && --i>=0 ) {
ret = read_frame(frame);
if( ret > 0 ) ++curr_pos;
}
case BC_RGB161616: return AV_PIX_FMT_RGB48LE;
case BC_RGBA16161616: return AV_PIX_FMT_RGBA64LE;
case BC_AYUV16161616: return AV_PIX_FMT_AYUV64LE;
+ case BC_GBRP: return AV_PIX_FMT_GBRP;
default: break;
}
case AV_PIX_FMT_RGB48LE: return BC_RGB161616;
case AV_PIX_FMT_RGBA64LE: return BC_RGBA16161616;
case AV_PIX_FMT_AYUV64LE: return BC_AYUV16161616;
+ case AV_PIX_FMT_GBRP: return BC_GBRP;
default: break;
}
estimated = 1;
}
}
- if( estimated )
+ static int notified = 0;
+ if( !notified && estimated ) {
+ notified = 1;
printf("FFMPEG::open_decoder: some stream times estimated\n");
+ }
ff_lock("FFMPEG::open_decoder");
int ret = 0, bad_time = 0;
sprintf(arg, "%d", asset->ff_audio_bitrate);
av_dict_set(&sopts, "b", arg, 0);
}
+ else if( asset->ff_audio_quality >= 0 ) {
+ ctx->global_quality = asset->ff_audio_quality * FF_QP2LAMBDA;
+ ctx->qmin = ctx->qmax = asset->ff_audio_quality;
+ ctx->mb_lmin = ctx->qmin * FF_QP2LAMBDA;
+ ctx->mb_lmax = ctx->qmax * FF_QP2LAMBDA;
+ ctx->flags |= CODEC_FLAG_QSCALE;
+ char arg[BCSTRLEN];
+ av_dict_set(&sopts, "flags", "+qscale", 0);
+ sprintf(arg, "%d", asset->ff_audio_quality);
+ av_dict_set(&sopts, "qscale", arg, 0);
+ sprintf(arg, "%d", ctx->global_quality);
+ av_dict_set(&sopts, "global_quality", arg, 0);
+ }
int aidx = ffaudio.size();
int fidx = aidx + ffvideo.size();
FFAudioStream *aud = new FFAudioStream(this, st, aidx, fidx);
vstrm_index.append(ffidx(vidx, 0));
vid->avctx = ctx; ffvideo.append(vid); fst = vid;
vid->width = asset->width;
- ctx->width = (vid->width+3) & ~3;
vid->height = asset->height;
- ctx->height = (vid->height+3) & ~3;
vid->frame_rate = asset->frame_rate;
+ AVPixelFormat pix_fmt = codec->pix_fmts ?
+ codec->pix_fmts[0] : AV_PIX_FMT_YUV420P;
+ AVDictionaryEntry *tag = av_dict_get(sopts, "cin_pix_fmt", NULL, 0);
+ if( tag != 0 ) {
+ int avfmt = av_get_pix_fmt(tag->value);
+ if( avfmt < 0 ) {
+ eprintf(_("cin_pix_fmt unknown = %s\n"), tag->value);
+ ret = 1;
+ break;
+ }
+ pix_fmt = (AVPixelFormat)avfmt;
+ }
+ ctx->pix_fmt = pix_fmt;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int mask_w = (1<<desc->log2_chroma_w)-1;
+ ctx->width = (vid->width+mask_w) & ~mask_w;
+ int mask_h = (1<<desc->log2_chroma_h)-1;
+ ctx->height = (vid->height+mask_h) & ~mask_h;
ctx->sample_aspect_ratio = to_sample_aspect_ratio(asset);
- ctx->pix_fmt = codec->pix_fmts ? codec->pix_fmts[0] : AV_PIX_FMT_YUV420P;
AVRational frame_rate = check_frame_rate(codec, vid->frame_rate);
if( !frame_rate.num || !frame_rate.den ) {
eprintf(_("check_frame_rate failed %s\n"), filename);
ret = 1;
break;
}
+ av_reduce(&frame_rate.num, &frame_rate.den,
+ frame_rate.num, frame_rate.den, INT_MAX);
+ ctx->framerate = (AVRational) { frame_rate.num, frame_rate.den };
ctx->time_base = (AVRational) { frame_rate.den, frame_rate.num };
+ st->avg_frame_rate = frame_rate;
st->time_base = ctx->time_base;
vid->writing = -1;
vid->interlaced = asset->interlace_mode == ILACE_MODE_TOP_FIRST ||
eprintf(_("not audio/video, %s:%s\n"), codec_name, filename);
ret = 1;
}
+
+ if( ctx ) {
+ AVDictionaryEntry *tag;
+ if( (tag=av_dict_get(sopts, "cin_stats_filename", NULL, 0)) != 0 ) {
+ char suffix[BCSTRLEN]; sprintf(suffix,"-%d.log",fst->fidx);
+ fst->stats_filename = cstrcat(2, tag->value, suffix);
+ }
+ if( (tag=av_dict_get(sopts, "flags", NULL, 0)) != 0 ) {
+ int pass = fst->pass;
+ char *cp = tag->value;
+ while( *cp ) {
+ int ch = *cp++, pfx = ch=='-' ? -1 : ch=='+' ? 1 : 0;
+ if( !isalnum(!pfx ? ch : (ch=*cp++)) ) continue;
+ char id[BCSTRLEN], *bp = id, *ep = bp+sizeof(id)-1;
+ for( *bp++=ch; isalnum(ch=*cp); ++cp )
+ if( bp < ep ) *bp++ = ch;
+ *bp = 0;
+ if( !strcmp(id, "pass1") ) {
+ pass = pfx<0 ? (pass&~1) : pfx>0 ? (pass|1) : 1;
+ }
+ else if( !strcmp(id, "pass2") ) {
+ pass = pfx<0 ? (pass&~2) : pfx>0 ? (pass|2) : 2;
+ }
+ }
+ if( (fst->pass=pass) ) {
+ if( pass & 1 ) ctx->flags |= AV_CODEC_FLAG_PASS1;
+ if( pass & 2 ) ctx->flags |= AV_CODEC_FLAG_PASS2;
+ }
+ }
+ }
}
if( !ret ) {
if( fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER )
ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
+ if( fst->stats_filename && (ret=fst->init_stats_file()) )
+ eprintf(_("error: stats file = %s\n"), fst->stats_filename);
+ }
+ if( !ret ) {
av_dict_set(&sopts, "cin_bitrate", 0, 0);
av_dict_set(&sopts, "cin_quality", 0, 0);
+ if( !av_dict_get(sopts, "threads", NULL, 0) )
+ ctx->thread_count = ff_cpus();
ret = avcodec_open2(ctx, codec, &sopts);
if( ret >= 0 ) {
ret = avcodec_parameters_from_context(st->codecpar, ctx);
if( ret < 0 )
fprintf(stderr, "Could not copy the stream parameters\n");
}
+ if( ret >= 0 ) {
+_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+ ret = avcodec_copy_context(st->codec, ctx);
+_Pragma("GCC diagnostic warning \"-Wdeprecated-declarations\"")
+ if( ret < 0 )
+ fprintf(stderr, "Could not copy the stream context\n");
+ }
if( ret < 0 ) {
ff_err(ret,"FFMPEG::open_encoder");
eprintf(_("open failed %s:%s\n"), codec_name, filename);
if( st->start_time == AV_NOPTS_VALUE ) continue;
int vidx = ffvideo.size();
while( --vidx >= 0 && ffvideo[vidx]->fidx != i );
- if( vidx >= 0 && ffvideo[vidx]->nudge != AV_NOPTS_VALUE ) continue;
+ if( vidx < 0 ) continue;
+ if( ffvideo[vidx]->nudge != AV_NOPTS_VALUE ) continue;
if( vstart_time < st->start_time )
vstart_time = st->start_time;
break; }
if( st->start_time == AV_NOPTS_VALUE ) continue;
int aidx = ffaudio.size();
while( --aidx >= 0 && ffaudio[aidx]->fidx != i );
- if( aidx >= 0 && ffaudio[aidx]->nudge != AV_NOPTS_VALUE ) continue;
+ if( aidx < 0 ) continue;
+ if( ffaudio[aidx]->frame_sz < avpar->frame_size )
+ ffaudio[aidx]->frame_sz = avpar->frame_size;
+ if( ffaudio[aidx]->nudge != AV_NOPTS_VALUE ) continue;
if( astart_time < st->start_time )
astart_time = st->start_time;
break; }
return ffvideo[stream]->st->id;
}
+int FFMPEG::ff_video_mpeg_color_range(int stream)
+{
+ return ffvideo[stream]->st->codecpar->color_range == AVCOL_RANGE_MPEG ? 1 : 0;
+}
int FFMPEG::ff_cpus()
{
}
if( ret >= 0 ) {
avcodec_parameters_to_context(avctx, st->codecpar);
+ if( !av_dict_get(copts, "threads", NULL, 0) )
+ avctx->thread_count = ff_cpus();
ret = avcodec_open2(avctx, decoder, &copts);
}
av_dict_free(&copts);