#include "libmjpeg.h"
#include "mainerror.h"
#include "mwindow.h"
+#include "preferences.h"
#include "vframe.h"
#ifdef FFMPEG3
fst->dequeue(this);
}
+void FFrame::set_hw_frame(AVFrame *frame)
+{
+ av_frame_free(&frm);
+ frm = frame;
+}
+
int FFAudioStream::read(float *fp, long len)
{
long n = len * nch;
seek_pos = curr_pos = 0;
seeked = 1; eof = 0;
reading = writing = 0;
- hw_dev = 0;
hw_pixfmt = AV_PIX_FMT_NONE;
hw_device_ctx = 0;
flushed = 0;
return writing;
}
+// this is a global parameter that really should be in the context
static AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE; // protected by ff_lock
+
+// goofy maneuver to attach a hw_format to an av_context
+#define GET_HW_PIXFMT(fn, fmt) \
+static AVPixelFormat get_hw_##fn(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts) { \
+ return fmt; \
+}
+GET_HW_PIXFMT(vaapi, AV_PIX_FMT_VAAPI)
+GET_HW_PIXFMT(vdpau, AV_PIX_FMT_VDPAU)
+GET_HW_PIXFMT(cuda, AV_PIX_FMT_CUDA)
+GET_HW_PIXFMT(nv12, AV_PIX_FMT_NV12)
+
static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
const enum AVPixelFormat *pix_fmts)
{
- for( const enum AVPixelFormat *p=pix_fmts; *p!=AV_PIX_FMT_NONE; ++p )
- if( *p == hw_pix_fmt ) return *p;
+ for( const enum AVPixelFormat *p=pix_fmts; *p!=AV_PIX_FMT_NONE; ++p ) {
+ if( *p != hw_pix_fmt ) continue;
+ switch( *p ) {
+ case AV_PIX_FMT_VAAPI: ctx->get_format = get_hw_vaapi; return *p;
+ case AV_PIX_FMT_VDPAU: ctx->get_format = get_hw_vdpau; return *p;
+ case AV_PIX_FMT_CUDA: ctx->get_format = get_hw_cuda; return *p;
+ case AV_PIX_FMT_NV12: ctx->get_format = get_hw_nv12; return *p;
+ default:
+ fprintf(stderr, "Unknown HW surface format: %s\n",
+ av_get_pix_fmt_name(*p));
+ continue;
+ }
+ }
fprintf(stderr, "Failed to get HW surface format.\n");
return hw_pix_fmt = AV_PIX_FMT_NONE;
}
return AV_HWDEVICE_TYPE_NONE;
}
-void FFStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
+int FFStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
{
+ return 0;
}
int FFStream::decode_activate()
}
while( ret >= 0 && st != 0 && !reading ) {
AVCodecID codec_id = st->codecpar->codec_id;
- AVCodec *decoder = avcodec_find_decoder(codec_id);
+ AVCodec *decoder = 0;
+ if( is_video() ) {
+ if( ffmpeg->opt_video_decoder )
+ decoder = avcodec_find_decoder_by_name(ffmpeg->opt_video_decoder);
+ else
+ ffmpeg->video_codec_remaps.update(codec_id, decoder);
+ }
+ else if( is_audio() ) {
+ if( ffmpeg->opt_audio_decoder )
+ decoder = avcodec_find_decoder_by_name(ffmpeg->opt_audio_decoder);
+ else
+ ffmpeg->audio_codec_remaps.update(codec_id, decoder);
+ }
+ if( !decoder )
+ decoder = avcodec_find_decoder(codec_id);
avctx = avcodec_alloc_context3(decoder);
if( !avctx ) {
eprintf(_("cant allocate codec context\n"));
ret = AVERROR(ENOMEM);
}
- if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE )
- decode_hw_format(decoder, hw_type);
-
+ if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
+ ret = decode_hw_format(decoder, hw_type);
+ if( !ret ) hw_type = AV_HWDEVICE_TYPE_NONE;
+ }
if( ret >= 0 ) {
avcodec_parameters_to_context(avctx, st->codecpar);
if( !av_dict_get(copts, "threads", NULL, 0) )
ret = avcodec_open2(avctx, decoder, &copts);
}
if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
- ret = read_packet();
+ if( need_packet ) {
+ need_packet = 0;
+ ret = read_packet();
+ }
if( ret >= 0 ) {
AVPacket *pkt = (AVPacket*)ipkt;
- need_packet = 0;
ret = avcodec_send_packet(avctx, pkt);
if( ret < 0 || hw_pix_fmt == AV_PIX_FMT_NONE ) {
ff_err(ret, "HW device init failed, using SW decode.\nfile:%s\n",
av_buffer_unref(&hw_device_ctx);
hw_device_ctx = 0;
hw_type = AV_HWDEVICE_TYPE_NONE;
- flushed = 0;
- st_eof(0);
- need_packet = 1;
+ int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
+ int idx = st->index;
+ av_seek_frame(fmt_ctx, idx, INT64_MIN, flags);
+ need_packet = 1; flushed = 0;
+ seeked = 1; st_eof(0);
ret = 0;
continue;
}
AVPacket *pkt = ret > 0 ? (AVPacket*)ipkt : 0;
if( pkt ) {
if( pkt->stream_index != st->index ) continue;
- if( !pkt->data | !pkt->size ) continue;
+ if( !pkt->data || !pkt->size ) continue;
}
if( (ret=avcodec_send_packet(avctx, pkt)) < 0 ) {
ff_err(ret, "FFStream::decode: avcodec_send_packet failed.\nfile:%s\n",
frame->best_effort_timestamp = AV_NOPTS_VALUE;
int ret = avcodec_receive_frame(avctx, frame);
if( ret < 0 ) {
- if( first_frame || ret == AVERROR(EAGAIN) ) return 0;
+ if( first_frame ) return 0;
+ if( ret == AVERROR(EAGAIN) ) return 0;
if( ret == AVERROR_EOF ) { st_eof(1); return 0; }
ff_err(ret, "FFAudioStream::decode_frame: Could not read audio frame.\nfile:%s\n",
ffmpeg->fmt_ctx->url);
AVHWDeviceType FFVideoStream::decode_hw_activate()
{
AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
- const char *hw_dev = getenv("CIN_HW_DEV");
- if( hw_dev ) {
+ const char *hw_dev = ffmpeg->opt_hw_dev;
+ if( !hw_dev ) hw_dev = getenv("CIN_HW_DEV");
+ if( !hw_dev ) hw_dev = ffmpeg->ff_hw_dev();
+ if( hw_dev && *hw_dev &&
+ strcmp("none", hw_dev) && strcmp(_("none"), hw_dev) ) {
type = av_hwdevice_find_type_by_name(hw_dev);
if( type == AV_HWDEVICE_TYPE_NONE ) {
fprintf(stderr, "Device type %s is not supported.\n", hw_dev);
return type;
}
-void FFVideoStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
+int FFVideoStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
{
+ int ret = 0;
hw_pix_fmt = AV_PIX_FMT_NONE;
for( int i=0; ; ++i ) {
const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
if( hw_pix_fmt >= 0 ) {
hw_pixfmt = hw_pix_fmt;
avctx->get_format = get_hw_format;
- int ret = av_hwdevice_ctx_create(&hw_device_ctx, type, 0, 0, 0);
- if( ret >= 0 )
+ ret = av_hwdevice_ctx_create(&hw_device_ctx, type, 0, 0, 0);
+ if( ret >= 0 ) {
avctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
+ ret = 1;
+ }
else
ff_err(ret, "Failed HW device create.\ndev:%s\n",
av_hwdevice_get_type_name(type));
}
+ return ret;
+}
+
+AVHWDeviceType FFVideoStream::encode_hw_activate(const char *hw_dev)
+{
+ AVBufferRef *hw_device_ctx = 0;
+ AVBufferRef *hw_frames_ref = 0;
+ AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
+ if( strcmp(_("none"), hw_dev) ) {
+ type = av_hwdevice_find_type_by_name(hw_dev);
+ if( type != AV_HWDEVICE_TYPE_VAAPI ) {
+ fprintf(stderr, "currently, only vaapi hw encode is supported\n");
+ type = AV_HWDEVICE_TYPE_NONE;
+ }
+ }
+ if( type != AV_HWDEVICE_TYPE_NONE ) {
+ int ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, 0, 0, 0);
+ if( ret < 0 ) {
+ ff_err(ret, "Failed to create a HW device.\n");
+ type = AV_HWDEVICE_TYPE_NONE;
+ }
+ }
+ if( type != AV_HWDEVICE_TYPE_NONE ) {
+ hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx);
+ if( !hw_frames_ref ) {
+ fprintf(stderr, "Failed to create HW frame context.\n");
+ type = AV_HWDEVICE_TYPE_NONE;
+ }
+ }
+ if( type != AV_HWDEVICE_TYPE_NONE ) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
+ frames_ctx->format = AV_PIX_FMT_VAAPI;
+ frames_ctx->sw_format = AV_PIX_FMT_NV12;
+ frames_ctx->width = width;
+ frames_ctx->height = height;
+ frames_ctx->initial_pool_size = 0; // 200;
+ int ret = av_hwframe_ctx_init(hw_frames_ref);
+ if( ret >= 0 ) {
+ avctx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
+ if( !avctx->hw_frames_ctx ) ret = AVERROR(ENOMEM);
+ }
+ if( ret < 0 ) {
+ ff_err(ret, "Failed to initialize HW frame context.\n");
+ type = AV_HWDEVICE_TYPE_NONE;
+ }
+ av_buffer_unref(&hw_frames_ref);
+ }
+ return type;
+}
+
+int FFVideoStream::encode_hw_write(FFrame *picture)
+{
+ int ret = 0;
+ AVFrame *hw_frm = 0;
+ switch( avctx->pix_fmt ) {
+ case AV_PIX_FMT_VAAPI:
+ hw_frm = av_frame_alloc();
+ if( !hw_frm ) { ret = AVERROR(ENOMEM); break; }
+ ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, hw_frm, 0);
+ if( ret < 0 ) break;
+ ret = av_hwframe_transfer_data(hw_frm, *picture, 0);
+ if( ret < 0 ) break;
+ picture->set_hw_frame(hw_frm);
+ return 0;
+ default:
+ return 0;
+ }
+ av_frame_free(&hw_frm);
+ ff_err(ret, "Error while transferring frame data to GPU.\n");
+ return ret;
}
int FFVideoStream::decode_frame(AVFrame *frame)
int first_frame = seeked; seeked = 0;
int ret = avcodec_receive_frame(avctx, frame);
if( ret < 0 ) {
- if( first_frame || ret == AVERROR(EAGAIN) ) return 0;
+ if( first_frame ) return 0;
if( ret == AVERROR(EAGAIN) ) return 0;
if( ret == AVERROR_EOF ) { st_eof(1); return 0; }
ff_err(ret, "FFVideoStream::decode_frame: Could not read video frame.\nfile:%s\n,",
int FFVideoStream::init_frame(AVFrame *picture)
{
- picture->format = avctx->pix_fmt;
+ switch( avctx->pix_fmt ) {
+ case AV_PIX_FMT_VAAPI:
+ picture->format = AV_PIX_FMT_NV12;
+ break;
+ default:
+ picture->format = avctx->pix_fmt;
+ break;
+ }
picture->width = avctx->width;
picture->height = avctx->height;
int ret = av_frame_get_buffer(picture, 32);
frame->pts = curr_pos;
ret = convert_pixfmt(vframe, frame);
}
+ if( ret >= 0 && avctx->hw_frames_ctx )
+ encode_hw_write(picture);
if( ret >= 0 ) {
picture->queue(curr_pos);
++curr_pos;
frame->interlaced_frame = interlaced;
frame->top_field_first = top_field_first;
}
+ if( frame && frame->format == AV_PIX_FMT_VAAPI ) { // ugly
+ int ret = avcodec_send_frame(avctx, frame);
+ for( int retry=MAX_RETRY; !ret && --retry>=0; ) {
+ FFPacket pkt; av_init_packet(pkt);
+ pkt->data = NULL; pkt->size = 0;
+ if( (ret=avcodec_receive_packet(avctx, pkt)) < 0 ) {
+ if( ret == AVERROR(EAGAIN) ) ret = 0; // weird
+ break;
+ }
+ ret = write_packet(pkt);
+ pkt->stream_index = 0;
+ av_packet_unref(pkt);
+ }
+ if( ret < 0 ) {
+ ff_err(ret, "FFStream::encode_frame: vaapi encode failed.\nfile: %s\n",
+ ffmpeg->fmt_ctx->url);
+ return -1;
+ }
+ return 0;
+ }
return FFStream::encode_frame(frame);
}
}
AVPixelFormat pix_fmt = (AVPixelFormat)ip->format;
- if( pix_fmt == ((FFVideoStream *)this)->hw_pixfmt ) {
+ FFVideoStream *vid =(FFVideoStream *)this;
+ if( pix_fmt == vid->hw_pixfmt ) {
int ret = 0;
if( !sw_frame && !(sw_frame=av_frame_alloc()) )
ret = AVERROR(ENOMEM);
pix_fmt = (AVPixelFormat)ip->format;
}
if( ret < 0 ) {
- ff_err(ret, "Error retrieving data from GPU to CPU\n");
+ eprintf(_("Error retrieving data from GPU to CPU\nfile: %s\n"),
+ vid->ffmpeg->fmt_ctx->url);
return -1;
}
}
int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height,
ipic->data, ipic->linesize);
if( ret < 0 ) {
- ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\n");
+ ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\nfile: %s\n",
+ vid->ffmpeg->fmt_ctx->url);
return -1;
}
return 0;
opt_duration = -1;
opt_video_filter = 0;
opt_audio_filter = 0;
+ opt_hw_dev = 0;
+ opt_video_decoder = 0;
+ opt_audio_decoder = 0;
fflags = 0;
char option_path[BCTEXTLEN];
set_option_path(option_path, "%s", "ffmpeg.opts");
av_dict_free(&opts);
delete [] opt_video_filter;
delete [] opt_audio_filter;
+ delete [] opt_hw_dev;
}
int FFMPEG::check_sample_rate(AVCodec *codec, int sample_rate)
return ret;
}
+FFCodecRemap::FFCodecRemap()
+{
+ old_codec = 0;
+ new_codec = 0;
+}
+FFCodecRemap::~FFCodecRemap()
+{
+ delete [] old_codec;
+ delete [] new_codec;
+}
+
+int FFCodecRemaps::add(const char *val)
+{
+ char old_codec[BCSTRLEN], new_codec[BCSTRLEN];
+ if( sscanf(val, " %63[a-zA-z0-9_-] = %63[a-z0-9_-]",
+ &old_codec[0], &new_codec[0]) != 2 ) return 1;
+ FFCodecRemap &remap = append();
+ remap.old_codec = cstrdup(old_codec);
+ remap.new_codec = cstrdup(new_codec);
+ return 0;
+}
+
+
+int FFCodecRemaps::update(AVCodecID &codec_id, AVCodec *&decoder)
+{
+ AVCodec *codec = avcodec_find_decoder(codec_id);
+ if( !codec ) return -1;
+ const char *name = codec->name;
+ FFCodecRemaps &map = *this;
+ int k = map.size();
+ while( --k >= 0 && strcmp(map[k].old_codec, name) );
+ if( k < 0 ) return 1;
+ const char *new_codec = map[k].new_codec;
+ codec = avcodec_find_decoder_by_name(new_codec);
+ if( !codec ) return -1;
+ decoder = codec;
+ return 0;
+}
+
int FFMPEG::read_options(FILE *fp, const char *options, AVDictionary *&opts)
{
int ret = 0, no = 0;
if( !ret ) {
if( !strcmp(key, "duration") )
opt_duration = strtod(val, 0);
+ else if( !strcmp(key, "video_decoder") )
+ opt_video_decoder = cstrdup(val);
+ else if( !strcmp(key, "audio_decoder") )
+ opt_audio_decoder = cstrdup(val);
+ else if( !strcmp(key, "remap_video_decoder") )
+ video_codec_remaps.add(val);
+ else if( !strcmp(key, "remap_audio_decoder") )
+ audio_codec_remaps.add(val);
else if( !strcmp(key, "video_filter") )
opt_video_filter = cstrdup(val);
else if( !strcmp(key, "audio_filter") )
opt_audio_filter = cstrdup(val);
+ else if( !strcmp(key, "cin_hw_dev") )
+ opt_hw_dev = cstrdup(val);
else if( !strcmp(key, "loglevel") )
set_loglevel(val);
else
}
if( bad_time && !(fflags & FF_BAD_TIMES) ) {
fflags |= FF_BAD_TIMES;
- printf("FFMPEG::open_decoder: some stream have bad times: %s\n",
+ printf(_("FFMPEG::open_decoder: some stream have bad times: %s\n"),
fmt_ctx->url);
}
ff_unlock();
vid->frame_rate = asset->frame_rate;
AVPixelFormat pix_fmt = av_get_pix_fmt(asset->ff_pixel_format);
+ if( opt_hw_dev != 0 ) {
+ AVHWDeviceType hw_type = vid->encode_hw_activate(opt_hw_dev);
+ switch( hw_type ) {
+ case AV_HWDEVICE_TYPE_VAAPI:
+ pix_fmt = AV_PIX_FMT_VAAPI;
+ break;
+ case AV_HWDEVICE_TYPE_NONE:
+ default: break;
+ }
+ }
if( pix_fmt == AV_PIX_FMT_NONE )
pix_fmt = codec->pix_fmts ? codec->pix_fmts[0] : AV_PIX_FMT_YUV420P;
ctx->pix_fmt = pix_fmt;
+
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int mask_w = (1<<desc->log2_chroma_w)-1;
ctx->width = (vid->width+mask_w) & ~mask_w;
return file_base->file->cpus;
}
+const char *FFMPEG::ff_hw_dev()
+{
+ return &file_base->file->preferences->use_hw_dev[0];
+}
+
int FFVideoStream::create_filter(const char *filter_spec, AVCodecParameters *avpar)
{
avfilter_register_all();