#include "libmjpeg.h"
#include "mainerror.h"
#include "mwindow.h"
+#include "preferences.h"
#include "vframe.h"
#ifdef FFMPEG3
fst->dequeue(this);
}
+void FFrame::set_hw_frame(AVFrame *frame)
+{
+ av_frame_free(&frm);
+ frm = frame;
+}
+
int FFAudioStream::read(float *fp, long len)
{
long n = len * nch;
seek_pos = curr_pos = 0;
seeked = 1; eof = 0;
reading = writing = 0;
+ hw_pixfmt = AV_PIX_FMT_NONE;
+ hw_device_ctx = 0;
flushed = 0;
need_packet = 1;
frame = fframe = 0;
+ probe_frame = 0;
bsfc = 0;
stats_fp = 0;
stats_filename = 0;
if( reading > 0 || writing > 0 ) avcodec_close(avctx);
if( avctx ) avcodec_free_context(&avctx);
if( fmt_ctx ) avformat_close_input(&fmt_ctx);
+ if( hw_device_ctx ) av_buffer_unref(&hw_device_ctx);
if( bsfc ) av_bsf_free(&bsfc);
while( frms.first ) frms.remove(frms.first);
if( filter_graph ) avfilter_graph_free(&filter_graph);
if( frame ) av_frame_free(&frame);
if( fframe ) av_frame_free(&fframe);
+ if( probe_frame ) av_frame_free(&probe_frame);
delete frm_lock;
if( stats_fp ) fclose(stats_fp);
if( stats_in ) av_freep(&stats_in);
return writing;
}
+// this is a global parameter that really should be in the context
+static AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE; // protected by ff_lock
+
+// goofy maneuver to attach a hw_format to an av_context
+#define GET_HW_PIXFMT(fn, fmt) \
+static AVPixelFormat get_hw_##fn(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts) { \
+ return fmt; \
+}
+GET_HW_PIXFMT(vaapi, AV_PIX_FMT_VAAPI)
+GET_HW_PIXFMT(vdpau, AV_PIX_FMT_VDPAU)
+GET_HW_PIXFMT(cuda, AV_PIX_FMT_CUDA)
+GET_HW_PIXFMT(nv12, AV_PIX_FMT_NV12)
+
+static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
+ const enum AVPixelFormat *pix_fmts)
+{
+ for( const enum AVPixelFormat *p=pix_fmts; *p!=AV_PIX_FMT_NONE; ++p ) {
+ if( *p != hw_pix_fmt ) continue;
+ switch( *p ) {
+ case AV_PIX_FMT_VAAPI: ctx->get_format = get_hw_vaapi; return *p;
+ case AV_PIX_FMT_VDPAU: ctx->get_format = get_hw_vdpau; return *p;
+ case AV_PIX_FMT_CUDA: ctx->get_format = get_hw_cuda; return *p;
+ case AV_PIX_FMT_NV12: ctx->get_format = get_hw_nv12; return *p;
+ default:
+ fprintf(stderr, "Unknown HW surface format: %s\n",
+ av_get_pix_fmt_name(*p));
+ continue;
+ }
+ }
+ fprintf(stderr, "Failed to get HW surface format.\n");
+ return hw_pix_fmt = AV_PIX_FMT_NONE;
+}
+
+
+AVHWDeviceType FFStream::decode_hw_activate()
+{
+ return AV_HWDEVICE_TYPE_NONE;
+}
+
+int FFStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
+{
+ return 0;
+}
+
int FFStream::decode_activate()
{
if( reading < 0 && (reading=ffmpeg->decode_activate()) > 0 ) {
AVDictionary *copts = 0;
av_dict_copy(&copts, ffmpeg->opts, 0);
int ret = 0;
+ AVHWDeviceType hw_type = decode_hw_activate();
+
// this should be avformat_copy_context(), but no copy avail
ret = avformat_open_input(&fmt_ctx,
ffmpeg->fmt_ctx->url, ffmpeg->fmt_ctx->iformat, &copts);
st = fmt_ctx->streams[fidx];
load_markers();
}
- if( ret >= 0 && st != 0 ) {
+ while( ret >= 0 && st != 0 && !reading ) {
AVCodecID codec_id = st->codecpar->codec_id;
- AVCodec *decoder = avcodec_find_decoder(codec_id);
+ AVCodec *decoder = 0;
+ if( is_video() ) {
+ if( ffmpeg->opt_video_decoder )
+ decoder = avcodec_find_decoder_by_name(ffmpeg->opt_video_decoder);
+ else
+ ffmpeg->video_codec_remaps.update(codec_id, decoder);
+ }
+ else if( is_audio() ) {
+ if( ffmpeg->opt_audio_decoder )
+ decoder = avcodec_find_decoder_by_name(ffmpeg->opt_audio_decoder);
+ else
+ ffmpeg->audio_codec_remaps.update(codec_id, decoder);
+ }
+ if( !decoder )
+ decoder = avcodec_find_decoder(codec_id);
avctx = avcodec_alloc_context3(decoder);
if( !avctx ) {
eprintf(_("cant allocate codec context\n"));
ret = AVERROR(ENOMEM);
}
+ if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
+ ret = decode_hw_format(decoder, hw_type);
+ }
if( ret >= 0 ) {
avcodec_parameters_to_context(avctx, st->codecpar);
if( !av_dict_get(copts, "threads", NULL, 0) )
avctx->thread_count = ffmpeg->ff_cpus();
ret = avcodec_open2(avctx, decoder, &copts);
}
- if( ret >= 0 ) {
- reading = 1;
+ if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
+ AVFrame *frame = av_frame_alloc();
+ if( !frame ) {
+ fprintf(stderr, "FFStream::decode_activate: av_frame_alloc failed\n");
+ ret = AVERROR(ENOMEM);
+ }
+ if( ret >= 0 )
+ ret = decode(frame);
+ }
+ if( ret < 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
+ ff_err(ret, "HW device init failed, using SW decode.\nfile:%s\n",
+ ffmpeg->fmt_ctx->url);
+ avcodec_close(avctx);
+ avcodec_free_context(&avctx);
+ av_buffer_unref(&hw_device_ctx);
+ hw_device_ctx = 0;
+ av_frame_free(&frame);
+ hw_type = AV_HWDEVICE_TYPE_NONE;
+ int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
+ int idx = st->index;
+ av_seek_frame(fmt_ctx, idx, 0, flags);
+ need_packet = 1; flushed = 0;
+ seeked = 1; st_eof(0);
+ ret = 0;
+ continue;
}
+ probe_frame = frame;
+ if( ret >= 0 )
+ reading = 1;
else
eprintf(_("open decoder failed\n"));
}
- else
- eprintf(_("can't clone input file\n"));
+ if( ret < 0 )
+ eprintf(_("can't open input file: %s\n"), ffmpeg->fmt_ctx->url);
av_dict_free(&copts);
ff_unlock();
}
int FFStream::decode(AVFrame *frame)
{
+ if( probe_frame ) { // hw probe reads first frame
+ av_frame_ref(frame, probe_frame);
+ av_frame_free(&probe_frame);
+ return 1;
+ }
int ret = 0;
int retries = MAX_RETRY;
AVPacket *pkt = ret > 0 ? (AVPacket*)ipkt : 0;
if( pkt ) {
if( pkt->stream_index != st->index ) continue;
- if( !pkt->data | !pkt->size ) continue;
+ if( !pkt->data || !pkt->size ) continue;
}
if( (ret=avcodec_send_packet(avctx, pkt)) < 0 ) {
- ff_err(ret, "FFStream::decode: avcodec_send_packet failed\n");
+ ff_err(ret, "FFStream::decode: avcodec_send_packet failed.\nfile:%s\n",
+ ffmpeg->fmt_ctx->url);
break;
}
need_packet = 0;
}
}
if( ret < 0 )
- ff_err(ret, "FFStream::write_packet: write packet failed\n");
+ ff_err(ret, "FFStream::write_packet: write packet failed.\nfile:%s\n",
+ ffmpeg->fmt_ctx->url);
return ret;
}
if( ret < 0 ) break;
}
}
- ff_err(ret, "FFStream::encode_frame: encode failed\n");
+ ff_err(ret, "FFStream::encode_frame: encode failed.\nfile: %s\n",
+ ffmpeg->fmt_ctx->url);
return -1;
}
close_stats_file();
}
if( ret < 0 )
- ff_err(ret, "FFStream::flush");
+ ff_err(ret, "FFStream::flush failed\n:file:%s\n",
+ ffmpeg->fmt_ctx->url);
return ret >= 0 ? 0 : 1;
}
if( avctx->stats_out && (ret=strlen(avctx->stats_out)) > 0 ) {
int len = fwrite(avctx->stats_out, 1, ret, stats_fp);
if( ret != len )
- ff_err(ret = AVERROR(errno), "FFStream::write_stats_file");
+ ff_err(ret = AVERROR(errno), "FFStream::write_stats_file.\n%file:%s\n",
+ ffmpeg->fmt_ctx->url);
}
return ret;
}
tstmp = av_rescale_q(tstmp, time_base, AV_TIME_BASE_Q);
idx = -1;
#endif
-
+ av_frame_free(&probe_frame);
avcodec_flush_buffers(avctx);
avformat_flush(fmt_ctx);
#if 0
//some codecs need more than one pkt to resync
if( ret == AVERROR_INVALIDDATA ) ret = 0;
if( ret < 0 ) {
- ff_err(ret, "FFStream::avcodec_send_packet failed\n");
+ ff_err(ret, "FFStream::avcodec_send_packet failed.\nseek:%s\n",
+ ffmpeg->fmt_ctx->url);
break;
}
}
frame->best_effort_timestamp = AV_NOPTS_VALUE;
int ret = avcodec_receive_frame(avctx, frame);
if( ret < 0 ) {
- if( first_frame || ret == AVERROR(EAGAIN) ) return 0;
+ if( first_frame ) return 0;
+ if( ret == AVERROR(EAGAIN) ) return 0;
if( ret == AVERROR_EOF ) { st_eof(1); return 0; }
- ff_err(ret, "FFAudioStream::decode_frame: Could not read audio frame\n");
+ ff_err(ret, "FFAudioStream::decode_frame: Could not read audio frame.\nfile:%s\n",
+ ffmpeg->fmt_ctx->url);
return -1;
}
int64_t pkt_ts = frame->best_effort_timestamp;
}
FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx)
- : FFStream(ffmpeg, strm, fidx)
+ : FFStream(ffmpeg, strm, fidx),
+ FFVideoConvert(ffmpeg->ff_prefs())
{
this->idx = idx;
width = height = 0;
length = 0;
interlaced = 0;
top_field_first = 0;
+ color_space = -1;
+ color_range = -1;
}
FFVideoStream::~FFVideoStream()
{
}
+AVHWDeviceType FFVideoStream::decode_hw_activate()
+{
+ AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
+ const char *hw_dev = ffmpeg->opt_hw_dev;
+ if( !hw_dev ) hw_dev = getenv("CIN_HW_DEV");
+ if( !hw_dev ) hw_dev = ffmpeg->ff_hw_dev();
+ if( hw_dev && *hw_dev &&
+ strcmp("none", hw_dev) && strcmp(_("none"), hw_dev) ) {
+ type = av_hwdevice_find_type_by_name(hw_dev);
+ if( type == AV_HWDEVICE_TYPE_NONE ) {
+ fprintf(stderr, "Device type %s is not supported.\n", hw_dev);
+ fprintf(stderr, "Available device types:");
+ while( (type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE )
+ fprintf(stderr, " %s", av_hwdevice_get_type_name(type));
+ fprintf(stderr, "\n");
+ }
+ }
+ return type;
+}
+
+int FFVideoStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
+{
+ int ret = 0;
+ hw_pix_fmt = AV_PIX_FMT_NONE;
+ for( int i=0; ; ++i ) {
+ const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
+ if( !config ) {
+ fprintf(stderr, "Decoder %s does not support device type %s.\n",
+ decoder->name, av_hwdevice_get_type_name(type));
+ ret = -1;
+ break;
+ }
+ if( (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) != 0 &&
+ config->device_type == type ) {
+ hw_pix_fmt = config->pix_fmt;
+ break;
+ }
+ }
+ if( hw_pix_fmt >= 0 ) {
+ hw_pixfmt = hw_pix_fmt;
+ avctx->get_format = get_hw_format;
+ ret = av_hwdevice_ctx_create(&hw_device_ctx, type, 0, 0, 0);
+ if( ret >= 0 ) {
+ avctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
+ ret = 1;
+ }
+ else {
+ ff_err(ret, "Failed HW device create.\ndev:%s\n",
+ av_hwdevice_get_type_name(type));
+ ret = -1;
+ }
+ }
+ return ret;
+}
+
+AVHWDeviceType FFVideoStream::encode_hw_activate(const char *hw_dev)
+{
+ AVBufferRef *hw_device_ctx = 0;
+ AVBufferRef *hw_frames_ref = 0;
+ AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
+ if( strcmp(_("none"), hw_dev) ) {
+ type = av_hwdevice_find_type_by_name(hw_dev);
+ if( type != AV_HWDEVICE_TYPE_VAAPI ) {
+ fprintf(stderr, "currently, only vaapi hw encode is supported\n");
+ type = AV_HWDEVICE_TYPE_NONE;
+ }
+ }
+ if( type != AV_HWDEVICE_TYPE_NONE ) {
+ int ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, 0, 0, 0);
+ if( ret < 0 ) {
+ ff_err(ret, "Failed to create a HW device.\n");
+ type = AV_HWDEVICE_TYPE_NONE;
+ }
+ }
+ if( type != AV_HWDEVICE_TYPE_NONE ) {
+ hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx);
+ if( !hw_frames_ref ) {
+ fprintf(stderr, "Failed to create HW frame context.\n");
+ type = AV_HWDEVICE_TYPE_NONE;
+ }
+ }
+ if( type != AV_HWDEVICE_TYPE_NONE ) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
+ frames_ctx->format = AV_PIX_FMT_VAAPI;
+ frames_ctx->sw_format = AV_PIX_FMT_NV12;
+ frames_ctx->width = width;
+ frames_ctx->height = height;
+ frames_ctx->initial_pool_size = 0; // 200;
+ int ret = av_hwframe_ctx_init(hw_frames_ref);
+ if( ret >= 0 ) {
+ avctx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
+ if( !avctx->hw_frames_ctx ) ret = AVERROR(ENOMEM);
+ }
+ if( ret < 0 ) {
+ ff_err(ret, "Failed to initialize HW frame context.\n");
+ type = AV_HWDEVICE_TYPE_NONE;
+ }
+ av_buffer_unref(&hw_frames_ref);
+ }
+ return type;
+}
+
+int FFVideoStream::encode_hw_write(FFrame *picture)
+{
+ int ret = 0;
+ AVFrame *hw_frm = 0;
+ switch( avctx->pix_fmt ) {
+ case AV_PIX_FMT_VAAPI:
+ hw_frm = av_frame_alloc();
+ if( !hw_frm ) { ret = AVERROR(ENOMEM); break; }
+ ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, hw_frm, 0);
+ if( ret < 0 ) break;
+ ret = av_hwframe_transfer_data(hw_frm, *picture, 0);
+ if( ret < 0 ) break;
+ picture->set_hw_frame(hw_frm);
+ return 0;
+ default:
+ return 0;
+ }
+ av_frame_free(&hw_frm);
+ ff_err(ret, "Error while transferring frame data to GPU.\n");
+ return ret;
+}
+
int FFVideoStream::decode_frame(AVFrame *frame)
{
int first_frame = seeked; seeked = 0;
int ret = avcodec_receive_frame(avctx, frame);
if( ret < 0 ) {
- if( first_frame || ret == AVERROR(EAGAIN) ) return 0;
+ if( first_frame ) return 0;
if( ret == AVERROR(EAGAIN) ) return 0;
if( ret == AVERROR_EOF ) { st_eof(1); return 0; }
- ff_err(ret, "FFVideoStream::decode_frame: Could not read video frame\n");
+ ff_err(ret, "FFVideoStream::decode_frame: Could not read video frame.\nfile:%s\n,",
+ ffmpeg->fmt_ctx->url);
return -1;
}
int64_t pkt_ts = frame->best_effort_timestamp;
int FFVideoStream::init_frame(AVFrame *picture)
{
- picture->format = avctx->pix_fmt;
+ switch( avctx->pix_fmt ) {
+ case AV_PIX_FMT_VAAPI:
+ picture->format = AV_PIX_FMT_NV12;
+ break;
+ default:
+ picture->format = avctx->pix_fmt;
+ break;
+ }
picture->width = avctx->width;
picture->height = avctx->height;
int ret = av_frame_get_buffer(picture, 32);
frame->pts = curr_pos;
ret = convert_pixfmt(vframe, frame);
}
+ if( ret >= 0 && avctx->hw_frames_ctx )
+ encode_hw_write(picture);
if( ret >= 0 ) {
picture->queue(curr_pos);
++curr_pos;
frame->interlaced_frame = interlaced;
frame->top_field_first = top_field_first;
}
+ if( frame && frame->format == AV_PIX_FMT_VAAPI ) { // ugly
+ int ret = avcodec_send_frame(avctx, frame);
+ for( int retry=MAX_RETRY; !ret && --retry>=0; ) {
+ FFPacket pkt; av_init_packet(pkt);
+ pkt->data = NULL; pkt->size = 0;
+ if( (ret=avcodec_receive_packet(avctx, pkt)) < 0 ) {
+ if( ret == AVERROR(EAGAIN) ) ret = 0; // weird
+ break;
+ }
+ ret = write_packet(pkt);
+ pkt->stream_index = 0;
+ av_packet_unref(pkt);
+ }
+ if( ret < 0 ) {
+ ff_err(ret, "FFStream::encode_frame: vaapi encode failed.\nfile: %s\n",
+ ffmpeg->fmt_ctx->url);
+ return -1;
+ }
+ return 0;
+ }
return FFStream::encode_frame(frame);
}
}
int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *ipic)
-{
+{ // picture = vframe
int cmodel = frame->get_color_model();
AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel);
if( ofmt == AV_PIX_FMT_NB ) return -1;
}
AVPixelFormat pix_fmt = (AVPixelFormat)ip->format;
+ FFVideoStream *vid =(FFVideoStream *)this;
+ if( pix_fmt == vid->hw_pixfmt ) {
+ int ret = 0;
+ if( !sw_frame && !(sw_frame=av_frame_alloc()) )
+ ret = AVERROR(ENOMEM);
+ if( !ret ) {
+ ret = av_hwframe_transfer_data(sw_frame, ip, 0);
+ ip = sw_frame;
+ pix_fmt = (AVPixelFormat)ip->format;
+ }
+ if( ret < 0 ) {
+ eprintf(_("Error retrieving data from GPU to CPU\nfile: %s\n"),
+ vid->ffmpeg->fmt_ctx->url);
+ return -1;
+ }
+ }
convert_ctx = sws_getCachedContext(convert_ctx, ip->width, ip->height, pix_fmt,
frame->get_w(), frame->get_h(), ofmt, SWS_POINT, NULL, NULL, NULL);
if( !convert_ctx ) {
" sws_getCachedContext() failed\n");
return -1;
}
+
+ int color_range = 0;
+ switch( preferences->yuv_color_range ) {
+ case BC_COLORS_JPEG: color_range = 1; break;
+ case BC_COLORS_MPEG: color_range = 0; break;
+ }
+ int color_space = SWS_CS_ITU601;
+ switch( preferences->yuv_color_space ) {
+ case BC_COLORS_BT601: color_space = SWS_CS_ITU601; break;
+ case BC_COLORS_BT709: color_space = SWS_CS_ITU709; break;
+ case BC_COLORS_BT2020: color_space = SWS_CS_BT2020; break;
+ }
+ const int *color_table = sws_getCoefficients(color_space);
+
+ int *inv_table, *table, src_range, dst_range;
+ int brightness, contrast, saturation;
+ if( !sws_getColorspaceDetails(convert_ctx,
+ &inv_table, &src_range, &table, &dst_range,
+ &brightness, &contrast, &saturation) ) {
+ if( src_range != color_range || dst_range != color_range ||
+ inv_table != color_table || table != color_table )
+ sws_setColorspaceDetails(convert_ctx,
+ color_table, color_range, color_table, color_range,
+ brightness, contrast, saturation);
+ }
+
int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height,
ipic->data, ipic->linesize);
if( ret < 0 ) {
- ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\n");
+ ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\nfile: %s\n",
+ vid->ffmpeg->fmt_ctx->url);
return -1;
}
return 0;
}
int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *opic)
-{
+{ // vframe = picture
int cmodel = frame->get_color_model();
AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel);
if( ifmt == AV_PIX_FMT_NB ) return -1;
" sws_getCachedContext() failed\n");
return -1;
}
+
+
+ int color_range = 0;
+ switch( preferences->yuv_color_range ) {
+ case BC_COLORS_JPEG: color_range = 1; break;
+ case BC_COLORS_MPEG: color_range = 0; break;
+ }
+ int color_space = SWS_CS_ITU601;
+ switch( preferences->yuv_color_space ) {
+ case BC_COLORS_BT601: color_space = SWS_CS_ITU601; break;
+ case BC_COLORS_BT709: color_space = SWS_CS_ITU709; break;
+ case BC_COLORS_BT2020: color_space = SWS_CS_BT2020; break;
+ }
+ const int *color_table = sws_getCoefficients(color_space);
+
+ int *inv_table, *table, src_range, dst_range;
+ int brightness, contrast, saturation;
+ if( !sws_getColorspaceDetails(convert_ctx,
+ &inv_table, &src_range, &table, &dst_range,
+ &brightness, &contrast, &saturation) ) {
+ if( dst_range != color_range || table != color_table )
+ sws_setColorspaceDetails(convert_ctx,
+ inv_table, src_range, color_table, color_range,
+ brightness, contrast, saturation);
+ }
+
int ret = sws_scale(convert_ctx, opic->data, opic->linesize, 0, frame->get_h(),
op->data, op->linesize);
if( ret < 0 ) {
opt_duration = -1;
opt_video_filter = 0;
opt_audio_filter = 0;
+ opt_hw_dev = 0;
+ opt_video_decoder = 0;
+ opt_audio_decoder = 0;
fflags = 0;
char option_path[BCTEXTLEN];
set_option_path(option_path, "%s", "ffmpeg.opts");
av_dict_free(&opts);
delete [] opt_video_filter;
delete [] opt_audio_filter;
+ delete [] opt_hw_dev;
}
int FFMPEG::check_sample_rate(AVCodec *codec, int sample_rate)
int FFMPEG::get_ff_option(const char *nm, const char *options, char *value)
{
- for( const char *cp=options; *cp!=0; ) {
- char line[BCTEXTLEN], *bp = line, *ep = bp+sizeof(line)-1;
- while( bp < ep && *cp && *cp!='\n' ) *bp++ = *cp++;
- if( *cp ) ++cp;
- *bp = 0;
- if( !line[0] || line[0] == '#' || line[0] == ';' ) continue;
- char key[BCSTRLEN], val[BCTEXTLEN];
- if( FFMPEG::scan_option_line(line, key, val) ) continue;
- if( !strcmp(key, nm) ) {
- strncpy(value, val, BCSTRLEN);
- return 0;
- }
- }
- return 1;
+ for( const char *cp=options; *cp!=0; ) {
+ char line[BCTEXTLEN], *bp = line, *ep = bp+sizeof(line)-1;
+ while( bp < ep && *cp && *cp!='\n' ) *bp++ = *cp++;
+ if( *cp ) ++cp;
+ *bp = 0;
+ if( !line[0] || line[0] == '#' || line[0] == ';' ) continue;
+ char key[BCSTRLEN], val[BCTEXTLEN];
+ if( FFMPEG::scan_option_line(line, key, val) ) continue;
+ if( !strcmp(key, nm) ) {
+ strncpy(value, val, BCSTRLEN);
+ return 0;
+ }
+ }
+ return 1;
}
void FFMPEG::scan_audio_options(Asset *asset, EDL *edl)
{
char options_path[BCTEXTLEN];
set_option_path(options_path, "audio/%s", asset->acodec);
- if( !load_options(options_path,
+ if( !load_options(options_path,
asset->ff_audio_options,
sizeof(asset->ff_audio_options)) )
scan_audio_options(asset, edl);
{
char options_path[BCTEXTLEN];
set_option_path(options_path, "video/%s", asset->vcodec);
- if( !load_options(options_path,
+ if( !load_options(options_path,
asset->ff_video_options,
sizeof(asset->ff_video_options)) )
scan_video_options(asset, edl);
}
+void FFMPEG::scan_format_options(Asset *asset, EDL *edl)
+{
+}
+
+void FFMPEG::load_format_options(Asset *asset, EDL *edl)
+{
+ char options_path[BCTEXTLEN];
+ set_option_path(options_path, "format/%s", asset->fformat);
+ if( !load_options(options_path,
+ asset->ff_format_options,
+ sizeof(asset->ff_format_options)) )
+ scan_format_options(asset, edl);
+}
+
int FFMPEG::load_defaults(const char *path, const char *type,
char *codec, char *codec_options, int len)
{
if( asset->format != FILE_FFMPEG ) return;
if( text != asset->fformat )
strcpy(asset->fformat, text);
+ if( !asset->ff_format_options[0] )
+ load_format_options(asset, edl);
if( asset->audio_data && !asset->ff_audio_options[0] ) {
if( !load_defaults("audio", text, asset->acodec,
asset->ff_audio_options, sizeof(asset->ff_audio_options)) )
if( !fp ) return 0;
int ret = read_options(fp, options, opts);
fclose(fp);
- AVDictionaryEntry *tag = av_dict_get(opts, "id", NULL, 0);
- if( tag ) st->id = strtol(tag->value,0,0);
+ if( !ret && st ) {
+ AVDictionaryEntry *tag = av_dict_get(opts, "id", NULL, 0);
+ if( tag ) st->id = strtol(tag->value,0,0);
+ }
return ret;
}
+FFCodecRemap::FFCodecRemap()
+{
+ old_codec = 0;
+ new_codec = 0;
+}
+FFCodecRemap::~FFCodecRemap()
+{
+ delete [] old_codec;
+ delete [] new_codec;
+}
+
+int FFCodecRemaps::add(const char *val)
+{
+ char old_codec[BCSTRLEN], new_codec[BCSTRLEN];
+ if( sscanf(val, " %63[a-zA-z0-9_-] = %63[a-z0-9_-]",
+ &old_codec[0], &new_codec[0]) != 2 ) return 1;
+ FFCodecRemap &remap = append();
+ remap.old_codec = cstrdup(old_codec);
+ remap.new_codec = cstrdup(new_codec);
+ return 0;
+}
+
+
+int FFCodecRemaps::update(AVCodecID &codec_id, AVCodec *&decoder)
+{
+ AVCodec *codec = avcodec_find_decoder(codec_id);
+ if( !codec ) return -1;
+ const char *name = codec->name;
+ FFCodecRemaps &map = *this;
+ int k = map.size();
+ while( --k >= 0 && strcmp(map[k].old_codec, name) );
+ if( k < 0 ) return 1;
+ const char *new_codec = map[k].new_codec;
+ codec = avcodec_find_decoder_by_name(new_codec);
+ if( !codec ) return -1;
+ decoder = codec;
+ return 0;
+}
+
int FFMPEG::read_options(FILE *fp, const char *options, AVDictionary *&opts)
{
int ret = 0, no = 0;
if( !ret ) {
if( !strcmp(key, "duration") )
opt_duration = strtod(val, 0);
+ else if( !strcmp(key, "video_decoder") )
+ opt_video_decoder = cstrdup(val);
+ else if( !strcmp(key, "audio_decoder") )
+ opt_audio_decoder = cstrdup(val);
+ else if( !strcmp(key, "remap_video_decoder") )
+ video_codec_remaps.add(val);
+ else if( !strcmp(key, "remap_audio_decoder") )
+ audio_codec_remaps.add(val);
else if( !strcmp(key, "video_filter") )
opt_video_filter = cstrdup(val);
else if( !strcmp(key, "audio_filter") )
opt_audio_filter = cstrdup(val);
+ else if( !strcmp(key, "cin_hw_dev") )
+ opt_hw_dev = cstrdup(val);
else if( !strcmp(key, "loglevel") )
set_loglevel(val);
else
if( ffvideo.size() > 0 )
report("\n%d video stream%s\n",ffvideo.size(), ffvideo.size()!=1 ? "s" : "");
for( int vidx=0; vidx<ffvideo.size(); ++vidx ) {
+ const char *unkn = _("(unkn)");
FFVideoStream *vid = ffvideo[vidx];
AVStream *st = vid->st;
AVCodecID codec_id = st->codecpar->codec_id;
report(_("vid%d (%d), id 0x%06x:\n"), vid->idx, vid->fidx, codec_id);
const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
- report(" video%d %s", vidx+1, desc ? desc->name : " (unkn)");
+ report(" video%d %s ", vidx+1, desc ? desc->name : unkn);
report(" %dx%d %5.2f", vid->width, vid->height, vid->frame_rate);
AVPixelFormat pix_fmt = (AVPixelFormat)st->codecpar->format;
const char *pfn = av_get_pix_fmt_name(pix_fmt);
- report(" pix %s\n", pfn ? pfn : "(unkn)");
+ report(" pix %s\n", pfn ? pfn : unkn);
+ enum AVColorSpace space = st->codecpar->color_space;
+ const char *nm = av_color_space_name(space);
+ report(" color space:%s", nm ? nm : unkn);
+ enum AVColorRange range = st->codecpar->color_range;
+ const char *rg = av_color_range_name(range);
+ report("/ range:%s\n", rg ? rg : unkn);
double secs = to_secs(st->duration, st->time_base);
int64_t length = secs * vid->frame_rate + 0.5;
double ofs = to_secs((vid->nudge - st->start_time), st->time_base);
ff_lock("FFMPEG::init_decoder");
av_register_all();
char file_opts[BCTEXTLEN];
- char *bp = strrchr(strcpy(file_opts, filename), '/');
- char *sp = strrchr(!bp ? file_opts : bp, '.');
+ strcpy(file_opts, filename);
+ char *bp = strrchr(file_opts, '/');
+ if( !bp ) bp = file_opts;
+ char *sp = strrchr(bp, '.');
if( !sp ) sp = bp + strlen(bp);
FILE *fp = 0;
AVInputFormat *ifmt = 0;
vid->width = avpar->width;
vid->height = avpar->height;
vid->frame_rate = !framerate.den ? 0 : (double)framerate.num / framerate.den;
+ switch( avpar->color_range ) {
+ case AVCOL_RANGE_MPEG:
+ vid->color_range = BC_COLORS_MPEG;
+ break;
+ case AVCOL_RANGE_JPEG:
+ vid->color_range = BC_COLORS_JPEG;
+ break;
+ default:
+ vid->color_range = !file_base ? BC_COLORS_JPEG :
+ file_base->file->preferences->yuv_color_range;
+ break;
+ }
+ switch( avpar->color_space ) {
+ case AVCOL_SPC_BT470BG:
+ case AVCOL_SPC_SMPTE170M:
+ vid->color_space = BC_COLORS_BT601;
+ break;
+ case AVCOL_SPC_BT709:
+ vid->color_space = BC_COLORS_BT709;
+ break;
+ case AVCOL_SPC_BT2020_NCL:
+ case AVCOL_SPC_BT2020_CL:
+ vid->color_space = BC_COLORS_BT2020;
+ break;
+ default:
+ vid->color_space = !file_base ? BC_COLORS_BT601 :
+ file_base->file->preferences->yuv_color_space;
+ break;
+ }
double secs = to_secs(st->duration, st->time_base);
vid->length = secs * vid->frame_rate;
vid->aspect_ratio = (double)st->sample_aspect_ratio.num / st->sample_aspect_ratio.den;
}
if( bad_time && !(fflags & FF_BAD_TIMES) ) {
fflags |= FF_BAD_TIMES;
- printf("FFMPEG::open_decoder: some stream have bad times: %s\n",
+ printf(_("FFMPEG::open_decoder: some stream have bad times: %s\n"),
fmt_ctx->url);
}
ff_unlock();
vid->width = asset->width;
vid->height = asset->height;
vid->frame_rate = asset->frame_rate;
-
+ if( (vid->color_range = asset->ff_color_range) < 0 )
+ vid->color_range = file_base->file->preferences->yuv_color_range;
+ switch( vid->color_range ) {
+ case BC_COLORS_MPEG: ctx->color_range = AVCOL_RANGE_MPEG; break;
+ case BC_COLORS_JPEG: ctx->color_range = AVCOL_RANGE_JPEG; break;
+ }
+ if( (vid->color_space = asset->ff_color_space) < 0 )
+ vid->color_space = file_base->file->preferences->yuv_color_space;
+ switch( vid->color_space ) {
+ case BC_COLORS_BT601: ctx->colorspace = AVCOL_SPC_SMPTE170M; break;
+ case BC_COLORS_BT709: ctx->colorspace = AVCOL_SPC_BT709; break;
+ case BC_COLORS_BT2020: ctx->colorspace = AVCOL_SPC_BT2020_NCL; break;
+ }
AVPixelFormat pix_fmt = av_get_pix_fmt(asset->ff_pixel_format);
+ if( opt_hw_dev != 0 ) {
+ AVHWDeviceType hw_type = vid->encode_hw_activate(opt_hw_dev);
+ switch( hw_type ) {
+ case AV_HWDEVICE_TYPE_VAAPI:
+ pix_fmt = AV_PIX_FMT_VAAPI;
+ break;
+ case AV_HWDEVICE_TYPE_NONE:
+ default: break;
+ }
+ }
if( pix_fmt == AV_PIX_FMT_NONE )
pix_fmt = codec->pix_fmts ? codec->pix_fmts[0] : AV_PIX_FMT_YUV420P;
ctx->pix_fmt = pix_fmt;
+
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int mask_w = (1<<desc->log2_chroma_w)-1;
ctx->width = (vid->width+mask_w) & ~mask_w;
fmt_ctx->url);
return -1;
}
-
+ if( !strcmp(file_format, "image2") ) {
+ Asset *asset = file_base->asset;
+ const char *filename = asset->path;
+ FILE *fp = fopen(filename,"w");
+ if( !fp ) {
+ eprintf(_("Cant write image2 header file: %s\n %m"), filename);
+ return 1;
+ }
+ fprintf(fp, "IMAGE2\n");
+ fprintf(fp, "# Frame rate: %f\n", asset->frame_rate);
+ fprintf(fp, "# Width: %d\n", asset->width);
+ fprintf(fp, "# Height: %d\n", asset->height);
+ fclose(fp);
+ }
int prog_id = 1;
AVProgram *prog = av_new_program(fmt_ctx, prog_id);
for( int i=0; i< ffvideo.size(); ++i )
char option_path[BCTEXTLEN];
set_option_path(option_path, "format/%s", file_format);
read_options(option_path, fopts, 1);
- ret = avformat_write_header(fmt_ctx, &fopts);
+ av_dict_copy(&fopts, opts, 0);
+ if( scan_options(file_base->asset->ff_format_options, fopts, 0) ) {
+ eprintf(_("bad format options %s\n"), file_base->asset->path);
+ ret = -1;
+ }
+ if( ret >= 0 )
+ ret = avformat_write_header(fmt_ctx, &fopts);
if( ret < 0 ) {
ff_err(ret, "FFMPEG::encode_activate: write header failed %s\n",
fmt_ctx->url);
return ffvideo[stream]->aspect_ratio;
}
-const char* FFMPEG::ff_video_format(int stream)
+const char* FFMPEG::ff_video_codec(int stream)
{
AVStream *st = ffvideo[stream]->st;
AVCodecID id = st->codecpar->codec_id;
return desc ? desc->name : _("Unknown");
}
+int FFMPEG::ff_color_range(int stream)
+{
+ return ffvideo[stream]->color_range;
+}
+
+int FFMPEG::ff_color_space(int stream)
+{
+ return ffvideo[stream]->color_space;
+}
+
double FFMPEG::ff_frame_rate(int stream)
{
return ffvideo[stream]->frame_rate;
return file_base->file->cpus;
}
+const char *FFMPEG::ff_hw_dev()
+{
+ return &file_base->file->preferences->use_hw_dev[0];
+}
+
+Preferences *FFMPEG::ff_prefs()
+{
+ return !file_base ? 0 : file_base->file->preferences;
+}
+
int FFVideoStream::create_filter(const char *filter_spec, AVCodecParameters *avpar)
{
avfilter_register_all();