fmt_ctx = 0;
avctx = 0;
filter_graph = 0;
+ filt_ctx = 0;
+ filt_id = 0;
buffersrc_ctx = 0;
buffersink_ctx = 0;
frm_count = 0;
flushed = 0;
need_packet = 1;
frame = fframe = 0;
+ probe_frame = 0;
bsfc = 0;
stats_fp = 0;
stats_filename = 0;
FFStream::~FFStream()
{
+ frm_lock->lock("FFStream::~FFStream");
if( reading > 0 || writing > 0 ) avcodec_close(avctx);
if( avctx ) avcodec_free_context(&avctx);
if( fmt_ctx ) avformat_close_input(&fmt_ctx);
if( filter_graph ) avfilter_graph_free(&filter_graph);
if( frame ) av_frame_free(&frame);
if( fframe ) av_frame_free(&fframe);
+ if( probe_frame ) av_frame_free(&probe_frame);
+ frm_lock->unlock();
delete frm_lock;
if( stats_fp ) fclose(stats_fp);
if( stats_in ) av_freep(&stats_in);
return writing;
}
+// this is a global parameter that really should be in the context
static AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE; // protected by ff_lock
+
+// goofy maneuver to attach a hw_format to an av_context
+#define GET_HW_PIXFMT(fn, fmt) \
+static AVPixelFormat get_hw_##fn(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts) { \
+ return fmt; \
+}
+GET_HW_PIXFMT(vaapi, AV_PIX_FMT_VAAPI)
+GET_HW_PIXFMT(vdpau, AV_PIX_FMT_VDPAU)
+GET_HW_PIXFMT(cuda, AV_PIX_FMT_CUDA)
+GET_HW_PIXFMT(nv12, AV_PIX_FMT_NV12)
+
static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
const enum AVPixelFormat *pix_fmts)
{
- for( const enum AVPixelFormat *p=pix_fmts; *p!=AV_PIX_FMT_NONE; ++p )
- if( *p == hw_pix_fmt ) return *p;
+ for( const enum AVPixelFormat *p=pix_fmts; *p!=AV_PIX_FMT_NONE; ++p ) {
+ if( *p != hw_pix_fmt ) continue;
+ switch( *p ) {
+ case AV_PIX_FMT_VAAPI: ctx->get_format = get_hw_vaapi; return *p;
+ case AV_PIX_FMT_VDPAU: ctx->get_format = get_hw_vdpau; return *p;
+ case AV_PIX_FMT_CUDA: ctx->get_format = get_hw_cuda; return *p;
+ case AV_PIX_FMT_NV12: ctx->get_format = get_hw_nv12; return *p;
+ default:
+ fprintf(stderr, "Unknown HW surface format: %s\n",
+ av_get_pix_fmt_name(*p));
+ continue;
+ }
+ }
fprintf(stderr, "Failed to get HW surface format.\n");
return hw_pix_fmt = AV_PIX_FMT_NONE;
}
return AV_HWDEVICE_TYPE_NONE;
}
-void FFStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
+int FFStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
{
+ return 0;
}
int FFStream::decode_activate()
}
while( ret >= 0 && st != 0 && !reading ) {
AVCodecID codec_id = st->codecpar->codec_id;
- AVCodec *decoder = avcodec_find_decoder(codec_id);
+ AVCodec *decoder = 0;
+ if( is_video() ) {
+ if( ffmpeg->opt_video_decoder )
+ decoder = avcodec_find_decoder_by_name(ffmpeg->opt_video_decoder);
+ else
+ ffmpeg->video_codec_remaps.update(codec_id, decoder);
+ }
+ else if( is_audio() ) {
+ if( ffmpeg->opt_audio_decoder )
+ decoder = avcodec_find_decoder_by_name(ffmpeg->opt_audio_decoder);
+ else
+ ffmpeg->audio_codec_remaps.update(codec_id, decoder);
+ }
+ if( !decoder )
+ decoder = avcodec_find_decoder(codec_id);
avctx = avcodec_alloc_context3(decoder);
if( !avctx ) {
eprintf(_("cant allocate codec context\n"));
ret = AVERROR(ENOMEM);
}
- if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE )
- decode_hw_format(decoder, hw_type);
-
+ if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
+ ret = decode_hw_format(decoder, hw_type);
+ }
if( ret >= 0 ) {
avcodec_parameters_to_context(avctx, st->codecpar);
if( !av_dict_get(copts, "threads", NULL, 0) )
avctx->thread_count = ffmpeg->ff_cpus();
ret = avcodec_open2(avctx, decoder, &copts);
}
+ AVFrame *hw_frame = 0;
if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
- if( need_packet ) {
- need_packet = 0;
- ret = read_packet();
- }
- if( ret >= 0 ) {
- AVPacket *pkt = (AVPacket*)ipkt;
- ret = avcodec_send_packet(avctx, pkt);
- if( ret < 0 || hw_pix_fmt == AV_PIX_FMT_NONE ) {
- ff_err(ret, "HW device init failed, using SW decode.\nfile:%s\n",
- ffmpeg->fmt_ctx->url);
- avcodec_close(avctx);
- avcodec_free_context(&avctx);
- av_buffer_unref(&hw_device_ctx);
- hw_device_ctx = 0;
- hw_type = AV_HWDEVICE_TYPE_NONE;
- int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
- int idx = st->index;
- av_seek_frame(fmt_ctx, idx, INT64_MIN, flags);
- need_packet = 1; flushed = 0;
- seeked = 1; st_eof(0);
- ret = 0;
- continue;
- }
+ if( !(hw_frame=av_frame_alloc()) ) {
+ fprintf(stderr, "FFStream::decode_activate: av_frame_alloc failed\n");
+ ret = AVERROR(ENOMEM);
}
+ if( ret >= 0 )
+ ret = decode(hw_frame);
}
- if( ret >= 0 ) {
- reading = 1;
+ if( ret < 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
+ ff_err(ret, "HW device init failed, using SW decode.\nfile:%s\n",
+ ffmpeg->fmt_ctx->url);
+ avcodec_close(avctx);
+ avcodec_free_context(&avctx);
+ av_buffer_unref(&hw_device_ctx);
+ hw_device_ctx = 0;
+ av_frame_free(&hw_frame);
+ hw_type = AV_HWDEVICE_TYPE_NONE;
+ int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
+ int idx = st->index;
+ av_seek_frame(fmt_ctx, idx, 0, flags);
+ need_packet = 1; flushed = 0;
+ seeked = 1; st_eof(0);
+ ret = 0;
+ continue;
}
+ probe_frame = hw_frame;
+ if( ret >= 0 )
+ reading = 1;
else
eprintf(_("open decoder failed\n"));
}
int FFStream::decode(AVFrame *frame)
{
+ if( probe_frame ) { // hw probe reads first frame
+ av_frame_ref(frame, probe_frame);
+ av_frame_free(&probe_frame);
+ return 1;
+ }
int ret = 0;
int retries = MAX_RETRY;
-
+ frm_lock->lock("FFStream::decode");
while( ret >= 0 && !flushed && --retries >= 0 ) {
if( need_packet ) {
if( (ret=read_packet()) < 0 ) break;
AVPacket *pkt = ret > 0 ? (AVPacket*)ipkt : 0;
if( pkt ) {
if( pkt->stream_index != st->index ) continue;
- if( !pkt->data | !pkt->size ) continue;
+ if( !pkt->data || !pkt->size ) continue;
}
if( (ret=avcodec_send_packet(avctx, pkt)) < 0 ) {
ff_err(ret, "FFStream::decode: avcodec_send_packet failed.\nfile:%s\n",
flushed = st_eof();
}
}
+ frm_lock->unlock();
if( retries < 0 ) {
fprintf(stderr, "FFStream::decode: Retry limit\n");
tstmp = av_rescale_q(tstmp, time_base, AV_TIME_BASE_Q);
idx = -1;
#endif
-
+ frm_lock->lock("FFStream::seek");
+ av_frame_free(&probe_frame);
avcodec_flush_buffers(avctx);
avformat_flush(fmt_ctx);
#if 0
if( pkt_ts >= tstmp ) break;
}
if( retry < 0 ) {
- fprintf(stderr,"FFStream::seek: retry limit, pos=%jd tstmp=%jd\n",pos,tstmp);
+ ff_err(AVERROR(EIO), "FFStream::seek: %s\n"
+ " retry limit, pos=%jd tstmp=%jd, ",
+ ffmpeg->fmt_ctx->url, pos, tstmp);
ret = -1;
}
if( ret < 0 ) break;
break;
}
}
+ frm_lock->unlock();
if( ret < 0 ) {
printf("** seek fail %jd, %jd\n", pos, tstmp);
seeked = need_packet = 0;
frame->best_effort_timestamp = AV_NOPTS_VALUE;
int ret = avcodec_receive_frame(avctx, frame);
if( ret < 0 ) {
- if( first_frame || ret == AVERROR(EAGAIN) ) return 0;
+ if( first_frame ) return 0;
+ if( ret == AVERROR(EAGAIN) ) return 0;
if( ret == AVERROR_EOF ) { st_eof(1); return 0; }
ff_err(ret, "FFAudioStream::decode_frame: Could not read audio frame.\nfile:%s\n",
ffmpeg->fmt_ctx->url);
}
FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx)
- : FFStream(ffmpeg, strm, fidx)
+ : FFStream(ffmpeg, strm, fidx),
+ FFVideoConvert(ffmpeg->ff_prefs())
{
this->idx = idx;
width = height = 0;
+ transpose = 0;
frame_rate = 0;
aspect_ratio = 0;
length = 0;
interlaced = 0;
top_field_first = 0;
+ color_space = -1;
+ color_range = -1;
+ fconvert_ctx = 0;
}
FFVideoStream::~FFVideoStream()
{
+ if( fconvert_ctx ) sws_freeContext(fconvert_ctx);
}
AVHWDeviceType FFVideoStream::decode_hw_activate()
const char *hw_dev = ffmpeg->opt_hw_dev;
if( !hw_dev ) hw_dev = getenv("CIN_HW_DEV");
if( !hw_dev ) hw_dev = ffmpeg->ff_hw_dev();
- if( hw_dev && *hw_dev && strcmp(_("none"), hw_dev) ) {
+ if( hw_dev && *hw_dev &&
+ strcmp("none", hw_dev) && strcmp(_("none"), hw_dev) ) {
type = av_hwdevice_find_type_by_name(hw_dev);
if( type == AV_HWDEVICE_TYPE_NONE ) {
fprintf(stderr, "Device type %s is not supported.\n", hw_dev);
return type;
}
-void FFVideoStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
+int FFVideoStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
{
+ int ret = 0;
hw_pix_fmt = AV_PIX_FMT_NONE;
for( int i=0; ; ++i ) {
const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
if( !config ) {
fprintf(stderr, "Decoder %s does not support device type %s.\n",
decoder->name, av_hwdevice_get_type_name(type));
+ ret = -1;
break;
}
if( (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) != 0 &&
if( hw_pix_fmt >= 0 ) {
hw_pixfmt = hw_pix_fmt;
avctx->get_format = get_hw_format;
- int ret = av_hwdevice_ctx_create(&hw_device_ctx, type, 0, 0, 0);
- if( ret >= 0 )
+ ret = av_hwdevice_ctx_create(&hw_device_ctx, type, 0, 0, 0);
+ if( ret >= 0 ) {
avctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
- else
+ ret = 1;
+ }
+ else {
ff_err(ret, "Failed HW device create.\ndev:%s\n",
av_hwdevice_get_type_name(type));
+ ret = -1;
+ }
}
+ return ret;
}
AVHWDeviceType FFVideoStream::encode_hw_activate(const char *hw_dev)
int first_frame = seeked; seeked = 0;
int ret = avcodec_receive_frame(avctx, frame);
if( ret < 0 ) {
- if( first_frame || ret == AVERROR(EAGAIN) ) return 0;
+ if( first_frame ) return 0;
if( ret == AVERROR(EAGAIN) ) return 0;
if( ret == AVERROR_EOF ) { st_eof(1); return 0; }
ff_err(ret, "FFVideoStream::decode_frame: Could not read video frame.\nfile:%s\n,",
return ret;
}
+int FFVideoStream::convert_hw_frame(AVFrame *ifrm, AVFrame *ofrm)
+{
+ AVPixelFormat ifmt = (AVPixelFormat)ifrm->format;
+ AVPixelFormat ofmt = (AVPixelFormat)st->codecpar->format;
+ ofrm->width = ifrm->width;
+ ofrm->height = ifrm->height;
+ ofrm->format = ofmt;
+ int ret = av_frame_get_buffer(ofrm, 32);
+ if( ret < 0 ) {
+ ff_err(ret, "FFVideoStream::convert_hw_frame:"
+ " av_frame_get_buffer failed\n");
+ return -1;
+ }
+ fconvert_ctx = sws_getCachedContext(fconvert_ctx,
+ ifrm->width, ifrm->height, ifmt,
+ ofrm->width, ofrm->height, ofmt,
+ SWS_POINT, NULL, NULL, NULL);
+ if( !fconvert_ctx ) {
+ ff_err(AVERROR(EINVAL), "FFVideoStream::convert_hw_frame:"
+ " sws_getCachedContext() failed\n");
+ return -1;
+ }
+ int codec_range = st->codecpar->color_range;
+ int codec_space = st->codecpar->color_space;
+ const int *codec_table = sws_getCoefficients(codec_space);
+ int *inv_table, *table, src_range, dst_range;
+ int brightness, contrast, saturation;
+ if( !sws_getColorspaceDetails(fconvert_ctx,
+ &inv_table, &src_range, &table, &dst_range,
+ &brightness, &contrast, &saturation) ) {
+ if( src_range != codec_range || dst_range != codec_range ||
+ inv_table != codec_table || table != codec_table )
+ sws_setColorspaceDetails(fconvert_ctx,
+ codec_table, codec_range, codec_table, codec_range,
+ brightness, contrast, saturation);
+ }
+ ret = sws_scale(fconvert_ctx,
+ ifrm->data, ifrm->linesize, 0, ifrm->height,
+ ofrm->data, ofrm->linesize);
+ if( ret < 0 ) {
+ ff_err(ret, "FFVideoStream::convert_hw_frame:"
+ " sws_scale() failed\nfile: %s\n",
+ ffmpeg->fmt_ctx->url);
+ return -1;
+ }
+ return 0;
+}
+
+int FFVideoStream::load_filter(AVFrame *frame)
+{
+ AVPixelFormat pix_fmt = (AVPixelFormat)frame->format;
+ if( pix_fmt == hw_pixfmt ) {
+ AVFrame *hw_frame = this->frame;
+ av_frame_unref(hw_frame);
+ int ret = av_hwframe_transfer_data(hw_frame, frame, 0);
+ if( ret < 0 ) {
+ eprintf(_("Error retrieving data from GPU to CPU\nfile: %s\n"),
+ ffmpeg->fmt_ctx->url);
+ return -1;
+ }
+ av_frame_unref(frame);
+ ret = convert_hw_frame(hw_frame, frame);
+ if( ret < 0 ) {
+ eprintf(_("Error converting data from GPU to CPU\nfile: %s\n"),
+ ffmpeg->fmt_ctx->url);
+ return -1;
+ }
+ av_frame_unref(hw_frame);
+ }
+ return FFStream::load_filter(frame);
+}
+
int FFVideoStream::encode(VFrame *vframe)
{
if( encode_activate() <= 0 ) return -1;
frame->interlaced_frame = interlaced;
frame->top_field_first = top_field_first;
}
+ if( frame && frame->format == AV_PIX_FMT_VAAPI ) { // ugly
+ int ret = avcodec_send_frame(avctx, frame);
+ for( int retry=MAX_RETRY; !ret && --retry>=0; ) {
+ FFPacket pkt; av_init_packet(pkt);
+ pkt->data = NULL; pkt->size = 0;
+ if( (ret=avcodec_receive_packet(avctx, pkt)) < 0 ) {
+ if( ret == AVERROR(EAGAIN) ) ret = 0; // weird
+ break;
+ }
+ ret = write_packet(pkt);
+ pkt->stream_index = 0;
+ av_packet_unref(pkt);
+ }
+ if( ret < 0 ) {
+ ff_err(ret, "FFStream::encode_frame: vaapi encode failed.\nfile: %s\n",
+ ffmpeg->fmt_ctx->url);
+ return -1;
+ }
+ return 0;
+ }
return FFStream::encode_frame(frame);
}
}
int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *ipic)
-{
+{ // picture = vframe
int cmodel = frame->get_color_model();
AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel);
if( ofmt == AV_PIX_FMT_NB ) return -1;
" sws_getCachedContext() failed\n");
return -1;
}
+
+ int color_range = 0;
+ switch( preferences->yuv_color_range ) {
+ case BC_COLORS_JPEG: color_range = 1; break;
+ case BC_COLORS_MPEG: color_range = 0; break;
+ }
+ int color_space = SWS_CS_ITU601;
+ switch( preferences->yuv_color_space ) {
+ case BC_COLORS_BT601: color_space = SWS_CS_ITU601; break;
+ case BC_COLORS_BT709: color_space = SWS_CS_ITU709; break;
+ case BC_COLORS_BT2020: color_space = SWS_CS_BT2020; break;
+ }
+ const int *color_table = sws_getCoefficients(color_space);
+
+ int *inv_table, *table, src_range, dst_range;
+ int brightness, contrast, saturation;
+ if( !sws_getColorspaceDetails(convert_ctx,
+ &inv_table, &src_range, &table, &dst_range,
+ &brightness, &contrast, &saturation) ) {
+ if( src_range != color_range || dst_range != color_range ||
+ inv_table != color_table || table != color_table )
+ sws_setColorspaceDetails(convert_ctx,
+ color_table, color_range, color_table, color_range,
+ brightness, contrast, saturation);
+ }
+
int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height,
ipic->data, ipic->linesize);
if( ret < 0 ) {
}
int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *opic)
-{
+{ // vframe = picture
int cmodel = frame->get_color_model();
AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel);
if( ifmt == AV_PIX_FMT_NB ) return -1;
" sws_getCachedContext() failed\n");
return -1;
}
+
+
+ int color_range = 0;
+ switch( preferences->yuv_color_range ) {
+ case BC_COLORS_JPEG: color_range = 1; break;
+ case BC_COLORS_MPEG: color_range = 0; break;
+ }
+ int color_space = SWS_CS_ITU601;
+ switch( preferences->yuv_color_space ) {
+ case BC_COLORS_BT601: color_space = SWS_CS_ITU601; break;
+ case BC_COLORS_BT709: color_space = SWS_CS_ITU709; break;
+ case BC_COLORS_BT2020: color_space = SWS_CS_BT2020; break;
+ }
+ const int *color_table = sws_getCoefficients(color_space);
+
+ int *inv_table, *table, src_range, dst_range;
+ int brightness, contrast, saturation;
+ if( !sws_getColorspaceDetails(convert_ctx,
+ &inv_table, &src_range, &table, &dst_range,
+ &brightness, &contrast, &saturation) ) {
+ if( dst_range != color_range || table != color_table )
+ sws_setColorspaceDetails(convert_ctx,
+ inv_table, src_range, color_table, color_range,
+ brightness, contrast, saturation);
+ }
+
int ret = sws_scale(convert_ctx, opic->data, opic->linesize, 0, frame->get_h(),
op->data, op->linesize);
if( ret < 0 ) {
opt_video_filter = 0;
opt_audio_filter = 0;
opt_hw_dev = 0;
+ opt_video_decoder = 0;
+ opt_audio_decoder = 0;
fflags = 0;
char option_path[BCTEXTLEN];
set_option_path(option_path, "%s", "ffmpeg.opts");
return (AVRational) { freq, 1001*12 };
}
-AVRational FFMPEG::check_frame_rate(AVCodec *codec, double frame_rate)
+AVRational FFMPEG::check_frame_rate(const AVRational *p, double frame_rate)
{
- const AVRational *p = codec->supported_framerates;
AVRational rate, best_rate = (AVRational) { 0, 0 };
double max_err = 1.; int i = 0;
while( ((p ? (rate=*p++) : (rate=std_frame_rate(i++))), rate.num) != 0 ) {
scan_video_options(asset, edl);
}
+void FFMPEG::scan_format_options(Asset *asset, EDL *edl)
+{
+}
+
+void FFMPEG::load_format_options(Asset *asset, EDL *edl)
+{
+ char options_path[BCTEXTLEN];
+ set_option_path(options_path, "format/%s", asset->fformat);
+ if( !load_options(options_path,
+ asset->ff_format_options,
+ sizeof(asset->ff_format_options)) )
+ scan_format_options(asset, edl);
+}
+
int FFMPEG::load_defaults(const char *path, const char *type,
char *codec, char *codec_options, int len)
{
if( asset->format != FILE_FFMPEG ) return;
if( text != asset->fformat )
strcpy(asset->fformat, text);
+ if( !asset->ff_format_options[0] )
+ load_format_options(asset, edl);
if( asset->audio_data && !asset->ff_audio_options[0] ) {
if( !load_defaults("audio", text, asset->acodec,
asset->ff_audio_options, sizeof(asset->ff_audio_options)) )
if( !fp ) return 0;
int ret = read_options(fp, options, opts);
fclose(fp);
- AVDictionaryEntry *tag = av_dict_get(opts, "id", NULL, 0);
- if( tag ) st->id = strtol(tag->value,0,0);
+ if( !ret && st ) {
+ AVDictionaryEntry *tag = av_dict_get(opts, "id", NULL, 0);
+ if( tag ) st->id = strtol(tag->value,0,0);
+ }
return ret;
}
+FFCodecRemap::FFCodecRemap()
+{
+ old_codec = 0;
+ new_codec = 0;
+}
+FFCodecRemap::~FFCodecRemap()
+{
+ delete [] old_codec;
+ delete [] new_codec;
+}
+
+int FFCodecRemaps::add(const char *val)
+{
+ char old_codec[BCSTRLEN], new_codec[BCSTRLEN];
+ if( sscanf(val, " %63[a-zA-z0-9_-] = %63[a-z0-9_-]",
+ &old_codec[0], &new_codec[0]) != 2 ) return 1;
+ FFCodecRemap &remap = append();
+ remap.old_codec = cstrdup(old_codec);
+ remap.new_codec = cstrdup(new_codec);
+ return 0;
+}
+
+
+int FFCodecRemaps::update(AVCodecID &codec_id, AVCodec *&decoder)
+{
+ AVCodec *codec = avcodec_find_decoder(codec_id);
+ if( !codec ) return -1;
+ const char *name = codec->name;
+ FFCodecRemaps &map = *this;
+ int k = map.size();
+ while( --k >= 0 && strcmp(map[k].old_codec, name) );
+ if( k < 0 ) return 1;
+ const char *new_codec = map[k].new_codec;
+ codec = avcodec_find_decoder_by_name(new_codec);
+ if( !codec ) return -1;
+ decoder = codec;
+ return 0;
+}
+
int FFMPEG::read_options(FILE *fp, const char *options, AVDictionary *&opts)
{
int ret = 0, no = 0;
if( !ret ) {
if( !strcmp(key, "duration") )
opt_duration = strtod(val, 0);
+ else if( !strcmp(key, "video_decoder") )
+ opt_video_decoder = cstrdup(val);
+ else if( !strcmp(key, "audio_decoder") )
+ opt_audio_decoder = cstrdup(val);
+ else if( !strcmp(key, "remap_video_decoder") )
+ video_codec_remaps.add(val);
+ else if( !strcmp(key, "remap_audio_decoder") )
+ audio_codec_remaps.add(val);
else if( !strcmp(key, "video_filter") )
opt_video_filter = cstrdup(val);
else if( !strcmp(key, "audio_filter") )
if( ffvideo.size() > 0 )
report("\n%d video stream%s\n",ffvideo.size(), ffvideo.size()!=1 ? "s" : "");
for( int vidx=0; vidx<ffvideo.size(); ++vidx ) {
+ const char *unkn = _("(unkn)");
FFVideoStream *vid = ffvideo[vidx];
AVStream *st = vid->st;
AVCodecID codec_id = st->codecpar->codec_id;
report(_("vid%d (%d), id 0x%06x:\n"), vid->idx, vid->fidx, codec_id);
const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
- report(" video%d %s", vidx+1, desc ? desc->name : " (unkn)");
+ report(" video%d %s ", vidx+1, desc ? desc->name : unkn);
report(" %dx%d %5.2f", vid->width, vid->height, vid->frame_rate);
AVPixelFormat pix_fmt = (AVPixelFormat)st->codecpar->format;
const char *pfn = av_get_pix_fmt_name(pix_fmt);
- report(" pix %s\n", pfn ? pfn : "(unkn)");
+ report(" pix %s\n", pfn ? pfn : unkn);
+ enum AVColorSpace space = st->codecpar->color_space;
+ const char *nm = av_color_space_name(space);
+ report(" color space:%s", nm ? nm : unkn);
+ enum AVColorRange range = st->codecpar->color_range;
+ const char *rg = av_color_range_name(range);
+ report("/ range:%s\n", rg ? rg : unkn);
double secs = to_secs(st->duration, st->time_base);
int64_t length = secs * vid->frame_rate + 0.5;
double ofs = to_secs((vid->nudge - st->start_time), st->time_base);
int hrs = secs/3600; secs -= hrs*3600;
int mins = secs/60; secs -= mins*60;
report(" %d:%02d:%05.2f\n", hrs, mins, secs);
+ double theta = vid->get_rotation_angle();
+ if( fabs(theta) > 1 )
+ report(" rotation angle: %0.1f\n", theta);
}
if( ffaudio.size() > 0 )
report("\n%d audio stream%s\n",ffaudio.size(), ffaudio.size()!=1 ? "s" : "");
vid->width = avpar->width;
vid->height = avpar->height;
vid->frame_rate = !framerate.den ? 0 : (double)framerate.num / framerate.den;
+ switch( avpar->color_range ) {
+ case AVCOL_RANGE_MPEG:
+ vid->color_range = BC_COLORS_MPEG;
+ break;
+ case AVCOL_RANGE_JPEG:
+ vid->color_range = BC_COLORS_JPEG;
+ break;
+ default:
+ vid->color_range = !file_base ? BC_COLORS_JPEG :
+ file_base->file->preferences->yuv_color_range;
+ break;
+ }
+ switch( avpar->color_space ) {
+ case AVCOL_SPC_BT470BG:
+ case AVCOL_SPC_SMPTE170M:
+ vid->color_space = BC_COLORS_BT601;
+ break;
+ case AVCOL_SPC_BT709:
+ vid->color_space = BC_COLORS_BT709;
+ break;
+ case AVCOL_SPC_BT2020_NCL:
+ case AVCOL_SPC_BT2020_CL:
+ vid->color_space = BC_COLORS_BT2020;
+ break;
+ default:
+ vid->color_space = !file_base ? BC_COLORS_BT601 :
+ file_base->file->preferences->yuv_color_space;
+ break;
+ }
double secs = to_secs(st->duration, st->time_base);
vid->length = secs * vid->frame_rate;
vid->aspect_ratio = (double)st->sample_aspect_ratio.num / st->sample_aspect_ratio.den;
vid->nudge = st->start_time;
vid->reading = -1;
- if( opt_video_filter )
- ret = vid->create_filter(opt_video_filter, avpar);
+ ret = vid->create_filter(opt_video_filter);
break; }
case AVMEDIA_TYPE_AUDIO: {
if( avpar->channels < 1 ) continue;
aud->init_swr(aud->channels, avpar->format, aud->sample_rate);
aud->nudge = st->start_time;
aud->reading = -1;
- if( opt_audio_filter )
- ret = aud->create_filter(opt_audio_filter, avpar);
+ ret = aud->create_filter(opt_audio_filter);
break; }
default: break;
}
}
if( bad_time && !(fflags & FF_BAD_TIMES) ) {
fflags |= FF_BAD_TIMES;
- printf("FFMPEG::open_decoder: some stream have bad times: %s\n",
+ printf(_("FFMPEG::open_decoder: some stream have bad times: %s\n"),
fmt_ctx->url);
}
ff_unlock();
vid->width = asset->width;
vid->height = asset->height;
vid->frame_rate = asset->frame_rate;
-
- AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
+ if( (vid->color_range = asset->ff_color_range) < 0 )
+ vid->color_range = file_base->file->preferences->yuv_color_range;
+ switch( vid->color_range ) {
+ case BC_COLORS_MPEG: ctx->color_range = AVCOL_RANGE_MPEG; break;
+ case BC_COLORS_JPEG: ctx->color_range = AVCOL_RANGE_JPEG; break;
+ }
+ if( (vid->color_space = asset->ff_color_space) < 0 )
+ vid->color_space = file_base->file->preferences->yuv_color_space;
+ switch( vid->color_space ) {
+ case BC_COLORS_BT601: ctx->colorspace = AVCOL_SPC_SMPTE170M; break;
+ case BC_COLORS_BT709: ctx->colorspace = AVCOL_SPC_BT709; break;
+ case BC_COLORS_BT2020: ctx->colorspace = AVCOL_SPC_BT2020_NCL; break;
+ }
+ AVPixelFormat pix_fmt = av_get_pix_fmt(asset->ff_pixel_format);
if( opt_hw_dev != 0 ) {
AVHWDeviceType hw_type = vid->encode_hw_activate(opt_hw_dev);
switch( hw_type ) {
pix_fmt = AV_PIX_FMT_VAAPI;
break;
case AV_HWDEVICE_TYPE_NONE:
- default:
- pix_fmt = av_get_pix_fmt(asset->ff_pixel_format);
- break;
+ default: break;
}
}
if( pix_fmt == AV_PIX_FMT_NONE )
int mask_h = (1<<desc->log2_chroma_h)-1;
ctx->height = (vid->height+mask_h) & ~mask_h;
ctx->sample_aspect_ratio = to_sample_aspect_ratio(asset);
- AVRational frame_rate = check_frame_rate(codec, vid->frame_rate);
+ AVRational frame_rate = check_frame_rate(codec->supported_framerates, vid->frame_rate);
if( !frame_rate.num || !frame_rate.den ) {
eprintf(_("check_frame_rate failed %s\n"), filename);
ret = 1;
fmt_ctx->url);
return -1;
}
-
+ if( !strcmp(file_format, "image2") ) {
+ Asset *asset = file_base->asset;
+ const char *filename = asset->path;
+ FILE *fp = fopen(filename,"w");
+ if( !fp ) {
+ eprintf(_("Cant write image2 header file: %s\n %m"), filename);
+ return 1;
+ }
+ fprintf(fp, "IMAGE2\n");
+ fprintf(fp, "# Frame rate: %f\n", asset->frame_rate);
+ fprintf(fp, "# Width: %d\n", asset->width);
+ fprintf(fp, "# Height: %d\n", asset->height);
+ fclose(fp);
+ }
int prog_id = 1;
AVProgram *prog = av_new_program(fmt_ctx, prog_id);
for( int i=0; i< ffvideo.size(); ++i )
char option_path[BCTEXTLEN];
set_option_path(option_path, "format/%s", file_format);
read_options(option_path, fopts, 1);
- ret = avformat_write_header(fmt_ctx, &fopts);
+ av_dict_copy(&fopts, opts, 0);
+ if( scan_options(file_base->asset->ff_format_options, fopts, 0) ) {
+ eprintf(_("bad format options %s\n"), file_base->asset->path);
+ ret = -1;
+ }
+ if( ret >= 0 )
+ ret = avformat_write_header(fmt_ctx, &fopts);
if( ret < 0 ) {
ff_err(ret, "FFMPEG::encode_activate: write header failed %s\n",
fmt_ctx->url);
int FFMPEG::ff_video_width(int stream)
{
- return ffvideo[stream]->width;
+ FFVideoStream *vst = ffvideo[stream];
+ return !vst->transpose ? vst->width : vst->height;
}
int FFMPEG::ff_video_height(int stream)
{
- return ffvideo[stream]->height;
+ FFVideoStream *vst = ffvideo[stream];
+ return !vst->transpose ? vst->height : vst->width;
}
int FFMPEG::ff_set_video_width(int stream, int width)
{
- int w = ffvideo[stream]->width;
- ffvideo[stream]->width = width;
+ FFVideoStream *vst = ffvideo[stream];
+ int *vw = !vst->transpose ? &vst->width : &vst->height, w = *vw;
+ *vw = width;
return w;
}
int FFMPEG::ff_set_video_height(int stream, int height)
{
- int h = ffvideo[stream]->height;
- ffvideo[stream]->height = height;
+ FFVideoStream *vst = ffvideo[stream];
+ int *vh = !vst->transpose ? &vst->height : &vst->width, h = *vh;
+ *vh = height;
return h;
}
return ffvideo[stream]->aspect_ratio;
}
-const char* FFMPEG::ff_video_format(int stream)
+const char* FFMPEG::ff_video_codec(int stream)
{
AVStream *st = ffvideo[stream]->st;
AVCodecID id = st->codecpar->codec_id;
return desc ? desc->name : _("Unknown");
}
+int FFMPEG::ff_color_range(int stream)
+{
+ return ffvideo[stream]->color_range;
+}
+
+int FFMPEG::ff_color_space(int stream)
+{
+ return ffvideo[stream]->color_space;
+}
+
double FFMPEG::ff_frame_rate(int stream)
{
return ffvideo[stream]->frame_rate;
int FFMPEG::ff_cpus()
{
- return file_base->file->cpus;
+ return !file_base ? 1 : file_base->file->cpus;
}
const char *FFMPEG::ff_hw_dev()
return &file_base->file->preferences->use_hw_dev[0];
}
-int FFVideoStream::create_filter(const char *filter_spec, AVCodecParameters *avpar)
+Preferences *FFMPEG::ff_prefs()
+{
+ return !file_base ? 0 : file_base->file->preferences;
+}
+
+double FFVideoStream::get_rotation_angle()
+{
+ int size = 0;
+ int *matrix = (int*)av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, &size);
+ int len = size/sizeof(*matrix);
+ if( !matrix || len < 5 ) return 0;
+ const double s = 1/65536.;
+ double theta = (!matrix[0] && !matrix[3]) || (!matrix[1] && !matrix[4]) ? 0 :
+ atan2( s*matrix[1] / hypot(s*matrix[1], s*matrix[4]),
+ s*matrix[0] / hypot(s*matrix[0], s*matrix[3])) * 180/M_PI;
+ return theta;
+}
+
+int FFVideoStream::flip(double theta)
{
+ int ret = 0;
+ transpose = 0;
+ Preferences *preferences = ffmpeg->ff_prefs();
+ if( !preferences || !preferences->auto_rotate ) return ret;
+ double tolerance = 1;
+ if( fabs(theta-0) < tolerance ) return ret;
+ if( (theta=fmod(theta, 360)) < 0 ) theta += 360;
+ if( fabs(theta-90) < tolerance ) {
+ if( (ret = insert_filter("transpose", "clock")) < 0 )
+ return ret;
+ transpose = 1;
+ }
+ else if( fabs(theta-180) < tolerance ) {
+ if( (ret=insert_filter("hflip", 0)) < 0 )
+ return ret;
+ if( (ret=insert_filter("vflip", 0)) < 0 )
+ return ret;
+ }
+ else if (fabs(theta-270) < tolerance ) {
+ if( (ret=insert_filter("transpose", "cclock")) < 0 )
+ return ret;
+ transpose = 1;
+ }
+ else {
+ char angle[BCSTRLEN];
+ sprintf(angle, "%f", theta*M_PI/180.);
+ if( (ret=insert_filter("rotate", angle)) < 0 )
+ return ret;
+ }
+ return 1;
+}
+
+int FFVideoStream::create_filter(const char *filter_spec)
+{
+ double theta = get_rotation_angle();
+ if( !theta && !filter_spec )
+ return 0;
avfilter_register_all();
- const char *sp = filter_spec;
- char filter_name[BCSTRLEN], *np = filter_name;
- int i = sizeof(filter_name);
- while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
- *np = 0;
- const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
- if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) {
- ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec);
- return -1;
+ if( filter_spec ) {
+ const char *sp = filter_spec;
+ char filter_name[BCSTRLEN], *np = filter_name;
+ int i = sizeof(filter_name);
+ while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+ *np = 0;
+ const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
+ if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) {
+ ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec);
+ return -1;
+ }
}
- filter_graph = avfilter_graph_alloc();
- const AVFilter *buffersrc = avfilter_get_by_name("buffer");
- const AVFilter *buffersink = avfilter_get_by_name("buffersink");
+ AVCodecParameters *avpar = st->codecpar;
+ int sa_num = avpar->sample_aspect_ratio.num;
+ if( !sa_num ) sa_num = 1;
+ int sa_den = avpar->sample_aspect_ratio.den;
+ if( !sa_den ) sa_num = 1;
int ret = 0; char args[BCTEXTLEN];
AVPixelFormat pix_fmt = (AVPixelFormat)avpar->format;
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
avpar->width, avpar->height, (int)pix_fmt,
- st->time_base.num, st->time_base.den,
- avpar->sample_aspect_ratio.num, avpar->sample_aspect_ratio.den);
- if( ret >= 0 )
- ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
- args, NULL, filter_graph);
- if( ret >= 0 )
- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
- NULL, NULL, filter_graph);
+ st->time_base.num, st->time_base.den, sa_num, sa_den);
+ if( ret >= 0 ) {
+ filt_ctx = 0;
+ ret = insert_filter("buffer", args, "in");
+ buffersrc_ctx = filt_ctx;
+ }
if( ret >= 0 )
+ ret = flip(theta);
+ AVFilterContext *fsrc = filt_ctx;
+ if( ret >= 0 ) {
+ filt_ctx = 0;
+ ret = insert_filter("buffersink", 0, "out");
+ buffersink_ctx = filt_ctx;
+ }
+ if( ret >= 0 ) {
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
(uint8_t*)&pix_fmt, sizeof(pix_fmt),
AV_OPT_SEARCH_CHILDREN);
- if( ret < 0 )
- ff_err(ret, "FFVideoStream::create_filter");
+ }
+ if( ret >= 0 )
+ ret = config_filters(filter_spec, fsrc);
else
- ret = FFStream::create_filter(filter_spec);
+ ff_err(ret, "FFVideoStream::create_filter");
return ret >= 0 ? 0 : -1;
}
-int FFAudioStream::create_filter(const char *filter_spec, AVCodecParameters *avpar)
+int FFAudioStream::create_filter(const char *filter_spec)
{
+ if( !filter_spec )
+ return 0;
avfilter_register_all();
- const char *sp = filter_spec;
- char filter_name[BCSTRLEN], *np = filter_name;
- int i = sizeof(filter_name);
- while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
- *np = 0;
- const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
- if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) {
- ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec);
- return -1;
+ if( filter_spec ) {
+ const char *sp = filter_spec;
+ char filter_name[BCSTRLEN], *np = filter_name;
+ int i = sizeof(filter_name);
+ while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
+ *np = 0;
+ const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
+ if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) {
+ ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec);
+ return -1;
+ }
}
- filter_graph = avfilter_graph_alloc();
- const AVFilter *buffersrc = avfilter_get_by_name("abuffer");
- const AVFilter *buffersink = avfilter_get_by_name("abuffersink");
int ret = 0; char args[BCTEXTLEN];
+ AVCodecParameters *avpar = st->codecpar;
AVSampleFormat sample_fmt = (AVSampleFormat)avpar->format;
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%jx",
st->time_base.num, st->time_base.den, avpar->sample_rate,
av_get_sample_fmt_name(sample_fmt), avpar->channel_layout);
- if( ret >= 0 )
- ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
- args, NULL, filter_graph);
- if( ret >= 0 )
- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
- NULL, NULL, filter_graph);
+ if( ret >= 0 ) {
+ filt_ctx = 0;
+ ret = insert_filter("abuffer", args, "in");
+ buffersrc_ctx = filt_ctx;
+ }
+ AVFilterContext *fsrc = filt_ctx;
+ if( ret >= 0 ) {
+ filt_ctx = 0;
+ ret = insert_filter("abuffersink", 0, "out");
+ buffersink_ctx = filt_ctx;
+ }
if( ret >= 0 )
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
(uint8_t*)&sample_fmt, sizeof(sample_fmt),
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
(uint8_t*)&sample_rate, sizeof(sample_rate),
AV_OPT_SEARCH_CHILDREN);
- if( ret < 0 )
- ff_err(ret, "FFAudioStream::create_filter");
+ if( ret >= 0 )
+ ret = config_filters(filter_spec, fsrc);
else
- ret = FFStream::create_filter(filter_spec);
+ ff_err(ret, "FFAudioStream::create_filter");
return ret >= 0 ? 0 : -1;
}
-int FFStream::create_filter(const char *filter_spec)
+int FFStream::insert_filter(const char *name, const char *arg, const char *inst_name)
{
- /* Endpoints for the filter graph. */
- AVFilterInOut *outputs = avfilter_inout_alloc();
- outputs->name = av_strdup("in");
- outputs->filter_ctx = buffersrc_ctx;
- outputs->pad_idx = 0;
- outputs->next = 0;
-
- AVFilterInOut *inputs = avfilter_inout_alloc();
- inputs->name = av_strdup("out");
- inputs->filter_ctx = buffersink_ctx;
- inputs->pad_idx = 0;
- inputs->next = 0;
-
- int ret = !outputs->name || !inputs->name ? -1 : 0;
+ const AVFilter *filter = avfilter_get_by_name(name);
+ if( !filter ) return -1;
+ char filt_inst[BCSTRLEN];
+ if( !inst_name ) {
+ snprintf(filt_inst, sizeof(filt_inst), "%s_%d", name, ++filt_id);
+ inst_name = filt_inst;
+ }
+ if( !filter_graph )
+ filter_graph = avfilter_graph_alloc();
+ AVFilterContext *fctx = 0;
+ int ret = avfilter_graph_create_filter(&fctx,
+ filter, inst_name, arg, NULL, filter_graph);
+ if( ret >= 0 && filt_ctx )
+ ret = avfilter_link(filt_ctx, 0, fctx, 0);
if( ret >= 0 )
- ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
- &inputs, &outputs, NULL);
+ filt_ctx = fctx;
+ else
+ avfilter_free(fctx);
+ return ret;
+}
+
+int FFStream::config_filters(const char *filter_spec, AVFilterContext *fsrc)
+{
+ int ret = 0;
+ AVFilterContext *fsink = buffersink_ctx;
+ if( filter_spec ) {
+ /* Endpoints for the filter graph. */
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ if( !inputs || !outputs ) ret = -1;
+ if( ret >= 0 ) {
+ outputs->filter_ctx = fsrc;
+ outputs->pad_idx = 0;
+ outputs->next = 0;
+ if( !(outputs->name = av_strdup(fsrc->name)) ) ret = -1;
+ }
+ if( ret >= 0 ) {
+ inputs->filter_ctx = fsink;
+ inputs->pad_idx = 0;
+ inputs->next = 0;
+ if( !(inputs->name = av_strdup(fsink->name)) ) ret = -1;
+ }
+ if( ret >= 0 ) {
+ int len = strlen(fsrc->name)+2 + strlen(filter_spec) + 1;
+ char spec[len]; sprintf(spec, "[%s]%s", fsrc->name, filter_spec);
+ ret = avfilter_graph_parse_ptr(filter_graph, spec,
+ &inputs, &outputs, NULL);
+ }
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+ }
+ else
+ ret = avfilter_link(fsrc, 0, fsink, 0);
if( ret >= 0 )
ret = avfilter_graph_config(filter_graph, NULL);
-
if( ret < 0 ) {
ff_err(ret, "FFStream::create_filter");
avfilter_graph_free(&filter_graph);
filter_graph = 0;
}
- avfilter_inout_free(&inputs);
- avfilter_inout_free(&outputs);
return ret;
}
+
+AVCodecContext *FFMPEG::activate_decoder(AVStream *st)
+{
+ AVDictionary *copts = 0;
+ av_dict_copy(&copts, opts, 0);
+ AVCodecID codec_id = st->codecpar->codec_id;
+ AVCodec *decoder = 0;
+ switch( st->codecpar->codec_type ) {
+ case AVMEDIA_TYPE_VIDEO:
+ if( opt_video_decoder )
+ decoder = avcodec_find_decoder_by_name(opt_video_decoder);
+ else
+ video_codec_remaps.update(codec_id, decoder);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ if( opt_audio_decoder )
+ decoder = avcodec_find_decoder_by_name(opt_audio_decoder);
+ else
+ audio_codec_remaps.update(codec_id, decoder);
+ break;
+ default:
+ return 0;
+ }
+ if( !decoder && !(decoder = avcodec_find_decoder(codec_id)) ) {
+ eprintf(_("cant find decoder codec %d\n"), (int)codec_id);
+ return 0;
+ }
+ AVCodecContext *avctx = avcodec_alloc_context3(decoder);
+ if( !avctx ) {
+ eprintf(_("cant allocate codec context\n"));
+ return 0;
+ }
+ avcodec_parameters_to_context(avctx, st->codecpar);
+ if( !av_dict_get(copts, "threads", NULL, 0) )
+ avctx->thread_count = ff_cpus();
+ int ret = avcodec_open2(avctx, decoder, &copts);
+ av_dict_free(&copts);
+ if( ret < 0 ) {
+ avcodec_free_context(&avctx);
+ avctx = 0;
+ }
+ return avctx;
+}
+
int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled)
{
AVPacket pkt;
index_state->add_audio_markers(ffaudio.size());
for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) {
- int ret = 0;
- AVDictionary *copts = 0;
- av_dict_copy(&copts, opts, 0);
AVStream *st = fmt_ctx->streams[i];
- AVCodecID codec_id = st->codecpar->codec_id;
- AVCodec *decoder = avcodec_find_decoder(codec_id);
- AVCodecContext *avctx = avcodec_alloc_context3(decoder);
- if( !avctx ) {
- eprintf(_("cant allocate codec context\n"));
- ret = AVERROR(ENOMEM);
- }
- if( ret >= 0 ) {
- avcodec_parameters_to_context(avctx, st->codecpar);
- if( !av_dict_get(copts, "threads", NULL, 0) )
- avctx->thread_count = ff_cpus();
- ret = avcodec_open2(avctx, decoder, &copts);
- }
- av_dict_free(&copts);
- if( ret >= 0 ) {
+ AVCodecContext *avctx = activate_decoder(st);
+ if( avctx ) {
AVCodecParameters *avpar = st->codecpar;
switch( avpar->codec_type ) {
case AVMEDIA_TYPE_VIDEO: {
if( vidx < 0 ) break;
FFVideoStream *vid = ffvideo[vidx];
if( !vid->avctx ) break;
- int64_t tstmp = pkt.dts;
- if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.pts;
+ int64_t tstmp = pkt.pts;
+ if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.dts;
if( tstmp != AV_NOPTS_VALUE && (pkt.flags & AV_PKT_FLAG_KEY) && pkt.pos > 0 ) {
if( vid->nudge != AV_NOPTS_VALUE ) tstmp -= vid->nudge;
double secs = to_secs(tstmp, st->time_base);
}
}
+
+/*
+ * 1) if the format context has a timecode
+ * return fmt_ctx->timecode - 0
+ * 2) if the layer/channel has a timecode
+ * return st->timecode - (start_time-nudge)
+ * 3) find the 1st program with stream, find 1st program video stream,
+ * if video stream has a timecode, return st->timecode - (start_time-nudge)
+ * 4) find timecode in any stream, return st->timecode
+ * 5) read 100 packets, save ofs=pkt.pts*st->time_base - st->nudge:
+ * decode frame for video stream of 1st program
+ * if frame->timecode has a timecode, return frame->timecode - ofs
+ * if side_data has gop timecode, return gop->timecode - ofs
+ * if side_data has smpte timecode, return smpte->timecode - ofs
+ * 6) if the filename/url scans *date_time.ext, return date_time
+ * 7) if stat works on the filename/url, return mtime
+ * 8) return -1 failure
+*/
+double FFMPEG::get_initial_timecode(int data_type, int channel, double frame_rate)
+{
+ AVRational rate = check_frame_rate(0, frame_rate);
+ if( !rate.num ) return -1;
+// format context timecode
+ AVDictionaryEntry *tc = av_dict_get(fmt_ctx->metadata, "timecode", 0, 0);
+ if( tc ) return ff_get_timecode(tc->value, rate, 0);
+// stream timecode
+ if( open_decoder() ) return -1;
+ AVStream *st = 0;
+ int64_t nudge = 0;
+ int codec_type = -1, fidx = -1;
+ switch( data_type ) {
+ case TRACK_AUDIO: {
+ codec_type = AVMEDIA_TYPE_AUDIO;
+ int aidx = astrm_index[channel].st_idx;
+ FFAudioStream *aud = ffaudio[aidx];
+ fidx = aud->fidx;
+ nudge = aud->nudge;
+ st = aud->st;
+ AVDictionaryEntry *tref = av_dict_get(fmt_ctx->metadata, "time_reference", 0, 0);
+ if( tref && aud && aud->sample_rate )
+ return strtod(tref->value, 0) / aud->sample_rate;
+ break; }
+ case TRACK_VIDEO: {
+ codec_type = AVMEDIA_TYPE_VIDEO;
+ int vidx = vstrm_index[channel].st_idx;
+ FFVideoStream *vid = ffvideo[vidx];
+ fidx = vid->fidx;
+ nudge = vid->nudge;
+ st = vid->st;
+ break; }
+ }
+ if( codec_type < 0 ) return -1;
+ if( st )
+ tc = av_dict_get(st->metadata, "timecode", 0, 0);
+ if( !tc ) {
+ st = 0;
+// find first program which references this stream
+ int pidx = -1;
+ for( int i=0, m=fmt_ctx->nb_programs; pidx<0 && i<m; ++i ) {
+ AVProgram *pgrm = fmt_ctx->programs[i];
+ for( int j=0, n=pgrm->nb_stream_indexes; j<n; ++j ) {
+ int st_idx = pgrm->stream_index[j];
+ if( st_idx == fidx ) { pidx = i; break; }
+ }
+ }
+ fidx = -1;
+ if( pidx >= 0 ) {
+ AVProgram *pgrm = fmt_ctx->programs[pidx];
+ for( int j=0, n=pgrm->nb_stream_indexes; j<n; ++j ) {
+ int st_idx = pgrm->stream_index[j];
+ AVStream *tst = fmt_ctx->streams[st_idx];
+ if( !tst ) continue;
+ if( tst->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
+ st = tst; fidx = st_idx;
+ break;
+ }
+ }
+ }
+ else {
+ for( int i=0, n=fmt_ctx->nb_streams; i<n; ++i ) {
+ AVStream *tst = fmt_ctx->streams[i];
+ if( !tst ) continue;
+ if( tst->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
+ st = tst; fidx = i;
+ break;
+ }
+ }
+ }
+ if( st )
+ tc = av_dict_get(st->metadata, "timecode", 0, 0);
+ }
+
+ if( !tc ) {
+ // any timecode, includes -data- streams
+ for( int i=0, n=fmt_ctx->nb_streams; i<n; ++i ) {
+ AVStream *tst = fmt_ctx->streams[i];
+ if( !tst ) continue;
+ if( (tc = av_dict_get(tst->metadata, "timecode", 0, 0)) ) {
+ st = tst; fidx = i;
+ break;
+ }
+ }
+ }
+
+ if( st && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
+ if( st->r_frame_rate.num && st->r_frame_rate.den )
+ rate = st->r_frame_rate;
+ nudge = st->start_time;
+ for( int i=0; i<ffvideo.size(); ++i ) {
+ if( ffvideo[i]->st == st ) {
+ nudge = ffvideo[i]->nudge;
+ break;
+ }
+ }
+ }
+
+ if( tc ) { // return timecode
+ double secs = st->start_time == AV_NOPTS_VALUE ? 0 :
+ to_secs(st->start_time - nudge, st->time_base);
+ return ff_get_timecode(tc->value, rate, secs);
+ }
+
+ if( !st || fidx < 0 ) return -1;
+
+ decode_activate();
+ AVCodecContext *av_ctx = activate_decoder(st);
+ if( !av_ctx ) {
+ fprintf(stderr,"activate_decoder failed\n");
+ return -1;
+ }
+ avCodecContext avctx(av_ctx); // auto deletes
+ if( avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
+ avctx->framerate.num && avctx->framerate.den )
+ rate = avctx->framerate;
+
+ avPacket pkt; // auto deletes
+ avFrame frame; // auto deletes
+ if( !frame ) {
+ fprintf(stderr,"av_frame_alloc failed\n");
+ return -1;
+ }
+ int errs = 0;
+ int64_t max_packets = 100;
+ char tcbuf[AV_TIMECODE_STR_SIZE];
+
+ for( int64_t count=0; count<max_packets; ++count ) {
+ av_packet_unref(pkt);
+ pkt->data = 0; pkt->size = 0;
+
+ int ret = av_read_frame(fmt_ctx, pkt);
+ if( ret < 0 ) {
+ if( ret == AVERROR_EOF ) break;
+ if( ++errs > 100 ) {
+ fprintf(stderr,"over 100 read_frame errs\n");
+ break;
+ }
+ continue;
+ }
+ if( !pkt->data ) continue;
+ int i = pkt->stream_index;
+ if( i != fidx ) continue;
+ int64_t tstmp = pkt->pts;
+ if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt->dts;
+ double secs = to_secs(tstmp - nudge, st->time_base);
+ ret = avcodec_send_packet(avctx, pkt);
+ if( ret < 0 ) return -1;
+
+ while( (ret = avcodec_receive_frame(avctx, frame)) >= 0 ) {
+ if( (tc = av_dict_get(frame->metadata, "timecode", 0, 0)) )
+ return ff_get_timecode(tc->value, rate, secs);
+ int k = frame->nb_side_data;
+ AVFrameSideData *side_data = 0;
+ while( --k >= 0 ) {
+ side_data = frame->side_data[k];
+ switch( side_data->type ) {
+ case AV_FRAME_DATA_GOP_TIMECODE: {
+ int64_t data = *(int64_t *)side_data->data;
+ int sz = sizeof(data);
+ if( side_data->size >= sz ) {
+ av_timecode_make_mpeg_tc_string(tcbuf, data);
+ return ff_get_timecode(tcbuf, rate, secs);
+ }
+ break; }
+ case AV_FRAME_DATA_S12M_TIMECODE: {
+ uint32_t *data = (uint32_t *)side_data->data;
+ int n = data[0], sz = (n+1)*sizeof(*data);
+ if( side_data->size >= sz ) {
+ av_timecode_make_smpte_tc_string(tcbuf, data[n], 0);
+ return ff_get_timecode(tcbuf, rate, secs);
+ }
+ break; }
+ default:
+ break;
+ }
+ }
+ }
+ }
+ char *path = fmt_ctx->url;
+ char *bp = strrchr(path, '/');
+ if( !bp ) bp = path; else ++bp;
+ char *cp = strrchr(bp, '.');
+ if( cp && (cp-=(8+1+6)) >= bp ) {
+ char sep[BCSTRLEN];
+ int year,mon,day, hour,min,sec, frm=0;
+ if( sscanf(cp,"%4d%2d%2d%[_-]%2d%2d%2d",
+ &year,&mon,&day, sep, &hour,&min,&sec) == 7 ) {
+ int ch = sep[0];
+ // year>=1970,mon=1..12,day=1..31, hour=0..23,min=0..59,sec=0..60
+ if( (ch=='_' || ch=='-' ) &&
+ year >= 1970 && mon>=1 && mon<=12 && day>=1 && day<=31 &&
+ hour>=0 && hour<24 && min>=0 && min<60 && sec>=0 && sec<=60 ) {
+ sprintf(tcbuf,"%d:%02d:%02d:%02d", hour,min,sec, frm);
+ return ff_get_timecode(tcbuf, rate, 0);
+ }
+ }
+ }
+ struct stat tst;
+ if( stat(path, &tst) >= 0 ) {
+ time_t t = (time_t)tst.st_mtim.tv_sec;
+ struct tm tm;
+ localtime_r(&t, &tm);
+ int64_t us = tst.st_mtim.tv_nsec / 1000;
+ int frm = us/1000000. * frame_rate;
+ sprintf(tcbuf,"%d:%02d:%02d:%02d", tm.tm_hour, tm.tm_min, tm.tm_sec, frm);
+ return ff_get_timecode(tcbuf, rate, 0);
+ }
+ return -1;
+}
+
+double FFMPEG::ff_get_timecode(char *str, AVRational rate, double pos)
+{
+ AVTimecode tc;
+ if( av_timecode_init_from_string(&tc, rate, str, fmt_ctx) )
+ return -1;
+ double secs = (double)tc.start / tc.fps - pos;
+ if( secs < 0 ) secs = 0;
+ return secs;
+}
+
+double FFMPEG::get_timecode(const char *path, int data_type, int channel, double rate)
+{
+ FFMPEG ffmpeg(0);
+ if( ffmpeg.init_decoder(path) ) return -1;
+ return ffmpeg.get_initial_timecode(data_type, channel, rate);
+}
+