#include #include #include #include #include #include #include #include // work arounds (centos) #include #ifndef INT64_MAX #define INT64_MAX 9223372036854775807LL #endif #define MAX_RETRY 1000 #include "asset.h" #include "bccmodels.h" #include "bchash.h" #include "fileffmpeg.h" #include "file.h" #include "ffmpeg.h" #include "indexfile.h" #include "libdv.h" #include "libmjpeg.h" #include "mainerror.h" #include "mwindow.h" #include "vframe.h" #define VIDEO_INBUF_SIZE 0x10000 #define AUDIO_INBUF_SIZE 0x10000 #define VIDEO_REFILL_THRESH 0 #define AUDIO_REFILL_THRESH 0x1000 Mutex FFMPEG::fflock("FFMPEG::fflock"); static void ff_err(int ret, const char *fmt, ...) { char msg[BCTEXTLEN]; va_list ap; va_start(ap, fmt); vsnprintf(msg, sizeof(msg), fmt, ap); va_end(ap); char errmsg[BCSTRLEN]; av_strerror(ret, errmsg, sizeof(errmsg)); fprintf(stderr,_("%s err: %s\n"),msg, errmsg); } void FFPacket::init() { av_init_packet(&pkt); pkt.data = 0; pkt.size = 0; } void FFPacket::finit() { av_packet_unref(&pkt); } FFrame::FFrame(FFStream *fst) { this->fst = fst; frm = av_frame_alloc(); init = fst->init_frame(frm); } FFrame::~FFrame() { av_frame_free(&frm); } void FFrame::queue(int64_t pos) { position = pos; fst->queue(this); } void FFrame::dequeue() { fst->dequeue(this); } int FFAudioStream::read(float *fp, long len) { long n = len * nch; float *op = outp; while( n > 0 ) { int k = lmt - op; if( k > n ) k = n; n -= k; while( --k >= 0 ) *fp++ = *op++; if( op >= lmt ) op = bfr; } return len; } void FFAudioStream::realloc(long nsz, int nch, long len) { long bsz = nsz * nch; float *np = new float[bsz]; inp = np + read(np, len) * nch; outp = np; lmt = np + bsz; this->nch = nch; sz = nsz; delete [] bfr; bfr = np; } void FFAudioStream::realloc(long nsz, int nch) { if( nsz > sz || this->nch != nch ) { long len = this->nch != nch ? 0 : hpos; if( len > sz ) len = sz; iseek(len); realloc(nsz, nch, len); } } void FFAudioStream::reserve(long nsz, int nch) { long len = (inp - outp) / nch; nsz += len; if( nsz > sz || this->nch != nch ) { if( this->nch != nch ) len = 0; realloc(nsz, nch, len); return; } if( (len*=nch) > 0 && bfr != outp ) memmove(bfr, outp, len*sizeof(*bfr)); outp = bfr; inp = bfr + len; } long FFAudioStream::used() { long len = inp>=outp ? inp-outp : inp-bfr + lmt-outp; return len / nch; } long FFAudioStream::avail() { float *in1 = inp+1; if( in1 >= lmt ) in1 = bfr; long len = outp >= in1 ? outp-in1 : outp-bfr + lmt-in1; return len / nch; } void FFAudioStream::reset_history() { inp = outp = bfr; hpos = 0; } void FFAudioStream::iseek(int64_t ofs) { outp = inp - ofs*nch; if( outp < bfr ) outp += sz*nch; } float *FFAudioStream::get_outp(int ofs) { float *ret = outp; outp += ofs*nch; return ret; } int64_t FFAudioStream::put_inp(int ofs) { inp += ofs*nch; return (inp-outp) / nch; } int FFAudioStream::write(const float *fp, long len) { long n = len * nch; float *ip = inp; while( n > 0 ) { int k = lmt - ip; if( k > n ) k = n; n -= k; while( --k >= 0 ) *ip++ = *fp++; if( ip >= lmt ) ip = bfr; } inp = ip; hpos += len; return len; } int FFAudioStream::zero(long len) { long n = len * nch; float *ip = inp; while( n > 0 ) { int k = lmt - ip; if( k > n ) k = n; n -= k; while( --k >= 0 ) *ip++ = 0; if( ip >= lmt ) ip = bfr; } inp = ip; hpos += len; return len; } // does not advance outp int FFAudioStream::read(double *dp, long len, int ch) { long n = len; float *op = outp + ch; float *lmt1 = lmt + nch-1; while( n > 0 ) { int k = (lmt1 - op) / nch; if( k > n ) k = n; n -= k; while( --k >= 0 ) { *dp++ = *op; op += nch; } if( op >= lmt ) op -= sz*nch; } return len; } // load linear buffer, no wrapping allowed, does not advance inp int FFAudioStream::write(const double *dp, long len, int ch) { long n = len; float *ip = inp + ch; while( --n >= 0 ) { *ip = *dp++; ip += nch; } return len; } FFStream::FFStream(FFMPEG *ffmpeg, AVStream *st, int fidx) { this->ffmpeg = ffmpeg; this->st = st; this->fidx = fidx; frm_lock = new Mutex("FFStream::frm_lock"); fmt_ctx = 0; filter_graph = 0; buffersrc_ctx = 0; buffersink_ctx = 0; frm_count = 0; nudge = AV_NOPTS_VALUE; seek_pos = curr_pos = 0; seeked = 1; eof = 0; index_markers = 0; reading = writing = 0; flushed = 0; need_packet = 1; frame = fframe = 0; } FFStream::~FFStream() { if( reading > 0 || writing > 0 ) avcodec_close(st->codec); if( fmt_ctx ) avformat_close_input(&fmt_ctx); while( frms.first ) frms.remove(frms.first); if( filter_graph ) avfilter_graph_free(&filter_graph); if( frame ) av_frame_free(&frame); if( fframe ) av_frame_free(&fframe); bsfilter.remove_all_objects(); delete frm_lock; } void FFStream::ff_lock(const char *cp) { FFMPEG::fflock.lock(cp); } void FFStream::ff_unlock() { FFMPEG::fflock.unlock(); } void FFStream::queue(FFrame *frm) { frm_lock->lock("FFStream::queue"); frms.append(frm); ++frm_count; frm_lock->unlock(); ffmpeg->mux_lock->unlock(); } void FFStream::dequeue(FFrame *frm) { frm_lock->lock("FFStream::dequeue"); --frm_count; frms.remove_pointer(frm); frm_lock->unlock(); } int FFStream::encode_activate() { if( writing < 0 ) writing = ffmpeg->encode_activate(); return writing; } int FFStream::decode_activate() { if( reading < 0 && (reading=ffmpeg->decode_activate()) > 0 ) { ff_lock("FFStream::decode_activate"); reading = 0; AVDictionary *copts = 0; av_dict_copy(&copts, ffmpeg->opts, 0); int ret = 0; // this should be avformat_copy_context(), but no copy avail ret = avformat_open_input(&fmt_ctx, ffmpeg->fmt_ctx->filename, NULL, &copts); if( ret >= 0 ) { ret = avformat_find_stream_info(fmt_ctx, 0); st = fmt_ctx->streams[fidx]; load_markers(); } if( ret >= 0 ) { AVCodecID codec_id = st->codec->codec_id; AVCodec *decoder = avcodec_find_decoder(codec_id); ret = avcodec_open2(st->codec, decoder, &copts); if( ret >= 0 ) reading = 1; else eprintf("FFStream::decode_activate: open decoder failed\n"); } else eprintf("FFStream::decode_activate: can't clone input file\n"); av_dict_free(&copts); ff_unlock(); } return reading; } int FFStream::read_packet() { av_packet_unref(ipkt); int ret = av_read_frame(fmt_ctx, ipkt); if( ret < 0 ) { st_eof(1); if( ret == AVERROR_EOF ) { ipkt->stream_index = st->index; return 0; } ff_err(ret, "FFStream::read_packet: av_read_frame failed\n"); flushed = 1; return -1; } return 1; } int FFStream::decode(AVFrame *frame) { int ret = 0; int retries = MAX_RETRY; int got_frame = 0; while( ret >= 0 && !flushed && --retries >= 0 && !got_frame ) { if( need_packet ) { need_packet = 0; if( (ret=read_packet()) < 0 ) break; } if( ipkt->stream_index == st->index ) { while( (ipkt->size > 0 || !ipkt->data) && !got_frame ) { ret = decode_frame(ipkt, frame, got_frame); if( ret < 0 ) need_packet = 1; if( ret <= 0 || !ipkt->data ) break; ipkt->data += ret; ipkt->size -= ret; } retries = MAX_RETRY; } if( !got_frame ) { need_packet = 1; flushed = st_eof(); } } if( retries < 0 ) fprintf(stderr, "FFStream::decode: Retry limit\n"); if( ret >= 0 ) ret = got_frame; else fprintf(stderr, "FFStream::decode: failed\n"); return ret; } int FFStream::load_filter(AVFrame *frame) { int ret = av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF); if( ret < 0 ) { av_frame_unref(frame); eprintf("FFStream::load_filter: av_buffersrc_add_frame_flags failed\n"); } return ret; } int FFStream::read_filter(AVFrame *frame) { int ret = av_buffersink_get_frame(buffersink_ctx, frame); if( ret < 0 ) { if( ret == AVERROR(EAGAIN) ) return 0; if( ret == AVERROR_EOF ) { st_eof(1); return -1; } ff_err(ret, "FFStream::read_filter: av_buffersink_get_frame failed\n"); return ret; } return 1; } int FFStream::read_frame(AVFrame *frame) { if( !filter_graph || !buffersrc_ctx || !buffersink_ctx ) return decode(frame); if( !fframe && !(fframe=av_frame_alloc()) ) { fprintf(stderr, "FFStream::read_frame: av_frame_alloc failed\n"); return -1; } int ret = -1; while( !flushed && !(ret=read_filter(frame)) ) { if( (ret=decode(fframe)) < 0 ) break; if( ret > 0 && (ret=load_filter(fframe)) < 0 ) break; } return ret; } int FFStream::write_packet(FFPacket &pkt) { bs_filter(pkt); av_packet_rescale_ts(pkt, st->codec->time_base, st->time_base); pkt->stream_index = st->index; return av_interleaved_write_frame(ffmpeg->fmt_ctx, pkt); } int FFStream::flush() { int ret = 0; while( ret >= 0 ) { FFPacket pkt; int got_packet = 0; ret = encode_frame(pkt, 0, got_packet); if( ret < 0 || !got_packet ) break; ret = write_packet(pkt); } if( ret < 0 ) ff_err(ret, "FFStream::flush"); return ret >= 0 ? 0 : 1; } int FFStream::seek(int64_t no, double rate) { int64_t tstmp = -INT64_MAX+1; // default ffmpeg native seek int npkts = 1; int64_t pos = no, plmt = -1; if( index_markers && index_markers->size() > 1 ) { IndexMarks &marks = *index_markers; int i = marks.find(pos); int64_t n = i < 0 ? (i=0) : marks[i].no; // if indexed seek point not too far away (<30 secs), use index if( no-n < 30*rate ) { if( n < 0 ) n = 0; pos = n; if( i < marks.size() ) plmt = marks[i].pos; npkts = MAX_RETRY; } } if( pos > 0 ) { double secs = pos / rate; tstmp = secs * st->time_base.den / st->time_base.num; if( nudge != AV_NOPTS_VALUE ) tstmp += nudge; } int ret = avformat_seek_file(fmt_ctx, st->index, -INT64_MAX, tstmp, INT64_MAX, AVSEEK_FLAG_ANY); if( ret >= 0 ) { avcodec_flush_buffers(st->codec); ipkt.finit(); ipkt.init(); need_packet = 0; flushed = 0; seeked = 1; st_eof(0); // read up to retry packets, limited to npkts in stream, and not past pkt.pos plmt for(;;) { if( read_packet() <= 0 ) { ret = -1; break; } if( plmt >= 0 && ipkt->pos >= plmt ) break; if( ipkt->stream_index != st->index ) continue; if( --npkts <= 0 ) break; int64_t pkt_ts = ipkt->dts != AV_NOPTS_VALUE ? ipkt->dts : ipkt->pts; if( pkt_ts == AV_NOPTS_VALUE ) continue; if( pkt_ts >= tstmp ) break; } } if( ret < 0 ) { //printf("** seek fail %ld, %ld\n", pos, tstmp); seeked = need_packet = 0; st_eof(flushed=1); return -1; } //printf("seeked pos = %ld, %ld\n", pos, tstmp); seek_pos = curr_pos = pos; return 0; } FFAudioStream::FFAudioStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx) : FFStream(ffmpeg, strm, fidx) { this->idx = idx; channel0 = channels = 0; sample_rate = 0; mbsz = 0; length = 0; resample_context = 0; aud_bfr_sz = 0; aud_bfr = 0; // history buffer nch = 2; sz = 0x10000; long bsz = sz * nch; bfr = new float[bsz]; lmt = bfr + bsz; reset_history(); } FFAudioStream::~FFAudioStream() { if( resample_context ) swr_free(&resample_context); delete [] aud_bfr; delete [] bfr; } int FFAudioStream::get_samples(float *&samples, uint8_t **data, int len) { samples = *(float **)data; if( resample_context ) { if( len > aud_bfr_sz ) { delete [] aud_bfr; aud_bfr = 0; } if( !aud_bfr ) { aud_bfr_sz = len; aud_bfr = new float[aud_bfr_sz*channels]; } int ret = swr_convert(resample_context, (uint8_t**)&aud_bfr, aud_bfr_sz, (const uint8_t**)data, len); if( ret < 0 ) { ff_err(ret, "FFAudioStream::get_samples: swr_convert failed\n"); return -1; } samples = aud_bfr; len = ret; } return len; } int FFAudioStream::load_history(uint8_t **data, int len) { float *samples; len = get_samples(samples, data, len); if( len > 0 ) { // biggest user bfr since seek + frame realloc(mbsz + len + 1, channels); write(samples, len); } return len; } int FFAudioStream::decode_frame(AVPacket *pkt, AVFrame *frame, int &got_frame) { int first_frame = seeked; seeked = 0; int ret = avcodec_decode_audio4(st->codec, frame, &got_frame, pkt); if( ret < 0 ) { if( first_frame ) return 0; ff_err(ret, "FFAudioStream::decode_frame: Could not read audio frame\n"); return -1; } if( got_frame ) { int64_t pkt_ts = av_frame_get_best_effort_timestamp(frame); if( pkt_ts != AV_NOPTS_VALUE ) curr_pos = ffmpeg->to_secs(pkt_ts - nudge, st->time_base) * sample_rate + 0.5; } return ret; } int FFAudioStream::encode_activate() { if( writing >= 0 ) return writing; AVCodecContext *ctx = st->codec; frame_sz = ctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ? 10000 : ctx->frame_size; return FFStream::encode_activate(); } int FFAudioStream::nb_samples() { AVCodecContext *ctx = st->codec; return ctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ? 10000 : ctx->frame_size; } int64_t FFAudioStream::load_buffer(double ** const sp, int len) { reserve(len+1, st->codec->channels); for( int ch=0; ch curr_pos ) return 0; int64_t len = hpos; if( len > sz ) len = sz; if( pos < curr_pos - len ) return 0; return 1; } int FFAudioStream::init_frame(AVFrame *frame) { AVCodecContext *ctx = st->codec; frame->nb_samples = frame_sz; frame->format = ctx->sample_fmt; frame->channel_layout = ctx->channel_layout; frame->sample_rate = ctx->sample_rate; int ret = av_frame_get_buffer(frame, 0); if (ret < 0) ff_err(ret, "FFAudioStream::init_frame: av_frame_get_buffer failed\n"); return ret; } int FFAudioStream::load(int64_t pos, int len) { if( audio_seek(pos) < 0 ) return -1; if( !frame && !(frame=av_frame_alloc()) ) { fprintf(stderr, "FFAudioStream::load: av_frame_alloc failed\n"); return -1; } if( mbsz < len ) mbsz = len; int64_t end_pos = pos + len; int ret = 0; for( int i=0; ret>=0 && !flushed && curr_pos 0 ) { load_history(&frame->extended_data[0], frame->nb_samples); curr_pos += frame->nb_samples; } } if( end_pos > curr_pos ) { zero(end_pos - curr_pos); curr_pos = end_pos; } len = curr_pos - pos; iseek(len); return len; } int FFAudioStream::audio_seek(int64_t pos) { if( decode_activate() < 0 ) return -1; if( !st->codec || !st->codec->codec ) return -1; if( in_history(pos) ) return 0; if( pos == curr_pos ) return 0; reset_history(); mbsz = 0; // guarentee preload > 1sec samples if( seek(pos-sample_rate, sample_rate) < 0 ) return -1; return 1; } int FFAudioStream::encode(double **samples, int len) { if( encode_activate() <= 0 ) return -1; ffmpeg->flow_ctl(); int ret = 0; int64_t count = load_buffer(samples, len); FFrame *frm = 0; while( ret >= 0 && count >= frame_sz ) { frm = new FFrame(this); if( (ret=frm->initted()) < 0 ) break; AVFrame *frame = *frm; float *bfrp = get_outp(frame_sz); ret = swr_convert(resample_context, (uint8_t **)frame->extended_data, frame_sz, (const uint8_t **)&bfrp, frame_sz); if( ret < 0 ) { ff_err(ret, "FFAudioStream::encode: swr_convert failed\n"); break; } frm->queue(curr_pos); frm = 0; curr_pos += frame_sz; count -= frame_sz; } delete frm; return ret >= 0 ? 0 : 1; } int FFAudioStream::encode_frame(AVPacket *pkt, AVFrame *frame, int &got_packet) { int ret = avcodec_encode_audio2(st->codec, pkt, frame, &got_packet); if( ret < 0 ) { ff_err(ret, "FFAudioStream::encode_frame: encode audio failed\n"); return -1; } return ret; } void FFAudioStream::load_markers() { IndexState *index_state = ffmpeg->file_base->asset->index_state; if( index_state->marker_status == MARKERS_NOTTESTED ) return; if( !index_state || idx >= index_state->audio_markers.size() ) return; FFStream::load_markers(*index_state->audio_markers[idx], sample_rate); } FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx) : FFStream(ffmpeg, strm, fidx) { this->idx = idx; width = height = 0; frame_rate = 0; aspect_ratio = 0; length = 0; } FFVideoStream::~FFVideoStream() { } int FFVideoStream::decode_frame(AVPacket *pkt, AVFrame *frame, int &got_frame) { int first_frame = seeked; seeked = 0; int ret = avcodec_decode_video2(st->codec, frame, &got_frame, pkt); if( ret < 0 ) { if( first_frame ) return 0; ff_err(ret, "FFVideoStream::decode_frame: Could not read video frame\n"); return -1; } else // this is right out of ffplay, looks questionable ??? ret = pkt->size; if( got_frame ) { int64_t pkt_ts = av_frame_get_best_effort_timestamp(frame); if( pkt_ts != AV_NOPTS_VALUE ) curr_pos = ffmpeg->to_secs(pkt_ts - nudge, st->time_base) * frame_rate + 0.5; } return ret; } int FFVideoStream::load(VFrame *vframe, int64_t pos) { int ret = video_seek(pos); if( ret < 0 ) return -1; if( !frame && !(frame=av_frame_alloc()) ) { fprintf(stderr, "FFVideoStream::load: av_frame_alloc failed\n"); return -1; } for( int i=0; ret>=0 && !flushed && curr_pos<=pos && i 0 ) ++curr_pos; } if( ret >= 0 ) { AVCodecContext *ctx = st->codec; ret = convert_cmodel(vframe, frame, ctx->pix_fmt, ctx->width, ctx->height); } ret = ret > 0 ? 1 : ret < 0 ? -1 : 0; return ret; } int FFVideoStream::video_seek(int64_t pos) { if( decode_activate() < 0 ) return -1; if( !st->codec || !st->codec->codec ) return -1; if( pos == curr_pos-1 && !seeked ) return 0; // if close enough, just read up to current int gop = st->codec->gop_size; if( gop < 4 ) gop = 4; if( gop > 64 ) gop = 64; int read_limit = curr_pos + 3*gop; if( pos >= curr_pos && pos <= read_limit ) return 0; // guarentee preload more than 2*gop frames if( seek(pos - 3*gop, frame_rate) < 0 ) return -1; return 1; } int FFVideoStream::init_frame(AVFrame *picture) { AVCodecContext *ctx = st->codec; picture->format = ctx->pix_fmt; picture->width = ctx->width; picture->height = ctx->height; int ret = av_frame_get_buffer(picture, 32); return ret; } int FFVideoStream::encode(VFrame *vframe) { if( encode_activate() <= 0 ) return -1; ffmpeg->flow_ctl(); FFrame *picture = new FFrame(this); int ret = picture->initted(); if( ret >= 0 ) { AVFrame *frame = *picture; frame->pts = curr_pos; AVCodecContext *ctx = st->codec; ret = convert_pixfmt(vframe, frame, ctx->pix_fmt, ctx->width, ctx->height); } if( ret >= 0 ) { picture->queue(curr_pos); ++curr_pos; } else { fprintf(stderr, "FFVideoStream::encode: encode failed\n"); delete picture; } return ret >= 0 ? 0 : 1; } int FFVideoStream::encode_frame(AVPacket *pkt, AVFrame *frame, int &got_packet) { int ret = avcodec_encode_video2(st->codec, pkt, frame, &got_packet); if( ret < 0 ) { ff_err(ret, "FFVideoStream::encode_frame: encode video failed\n"); return -1; } return ret; } AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model) { switch( color_model ) { case BC_YUV422: return AV_PIX_FMT_YUYV422; case BC_RGB888: return AV_PIX_FMT_RGB24; case BC_RGBA8888: return AV_PIX_FMT_RGBA; case BC_BGR8888: return AV_PIX_FMT_BGR0; case BC_BGR888: return AV_PIX_FMT_BGR24; case BC_ARGB8888: return AV_PIX_FMT_ARGB; case BC_ABGR8888: return AV_PIX_FMT_ABGR; case BC_RGB8: return AV_PIX_FMT_RGB8; case BC_YUV420P: return AV_PIX_FMT_YUV420P; case BC_YUV422P: return AV_PIX_FMT_YUV422P; case BC_YUV444P: return AV_PIX_FMT_YUV444P; case BC_YUV411P: return AV_PIX_FMT_YUV411P; case BC_RGB565: return AV_PIX_FMT_RGB565; case BC_RGB161616: return AV_PIX_FMT_RGB48LE; case BC_RGBA16161616: return AV_PIX_FMT_RGBA64LE; default: break; } return AV_PIX_FMT_NB; } int FFVideoConvert::pix_fmt_to_color_model(AVPixelFormat pix_fmt) { switch (pix_fmt) { case AV_PIX_FMT_YUYV422: return BC_YUV422; case AV_PIX_FMT_RGB24: return BC_RGB888; case AV_PIX_FMT_RGBA: return BC_RGBA8888; case AV_PIX_FMT_BGR0: return BC_BGR8888; case AV_PIX_FMT_BGR24: return BC_BGR888; case AV_PIX_FMT_ARGB: return BC_ARGB8888; case AV_PIX_FMT_ABGR: return BC_ABGR8888; case AV_PIX_FMT_RGB8: return BC_RGB8; case AV_PIX_FMT_YUV420P: return BC_YUV420P; case AV_PIX_FMT_YUV422P: return BC_YUV422P; case AV_PIX_FMT_YUV444P: return BC_YUV444P; case AV_PIX_FMT_YUV411P: return BC_YUV411P; case AV_PIX_FMT_RGB565: return BC_RGB565; case AV_PIX_FMT_RGB48LE: return BC_RGB161616; case AV_PIX_FMT_RGBA64LE: return BC_RGBA16161616; default: break; } return -1; } int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVPixelFormat ifmt, int iw, int ih) { // try bc_xfer methods int imodel = pix_fmt_to_color_model(ifmt); if( imodel >= 0 ) { long y_ofs = 0, u_ofs = 0, v_ofs = 0; uint8_t *data = ip->data[0]; if( BC_CModels::is_yuv(imodel) ) { u_ofs = ip->data[1] - data; v_ofs = ip->data[2] - data; } VFrame iframe(data, -1, y_ofs, u_ofs, v_ofs, iw, ih, imodel, ip->linesize[0]); frame->transfer_from(&iframe); return 0; } // try sws methods AVFrame opic; int cmodel = frame->get_color_model(); AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel); if( ofmt == AV_PIX_FMT_NB ) return -1; int size = av_image_fill_arrays(opic.data, opic.linesize, frame->get_data(), ofmt, frame->get_w(), frame->get_h(), 1); if( size < 0 ) return -1; // transfer line sizes must match also int planar = BC_CModels::is_planar(cmodel); int packed_width = !planar ? frame->get_bytes_per_line() : BC_CModels::calculate_pixelsize(cmodel) * frame->get_w(); if( packed_width != opic.linesize[0] ) return -1; if( planar ) { // override av_image_fill_arrays() for planar types opic.data[0] = frame->get_y(); opic.data[1] = frame->get_u(); opic.data[2] = frame->get_v(); } convert_ctx = sws_getCachedContext(convert_ctx, iw, ih, ifmt, frame->get_w(), frame->get_h(), ofmt, SWS_BICUBIC, NULL, NULL, NULL); if( !convert_ctx ) { fprintf(stderr, "FFVideoConvert::convert_picture_frame:" " sws_getCachedContext() failed\n"); return -1; } int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ih, opic.data, opic.linesize); if( ret < 0 ) { ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\n"); return -1; } return 0; } int FFVideoConvert::convert_cmodel(VFrame *frame, AVFrame *ip, AVPixelFormat ifmt, int iw, int ih) { // try direct transfer if( !convert_picture_vframe(frame, ip, ifmt, iw, ih) ) return 1; // use indirect transfer const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(ifmt); int max_bits = 0; for( int i = 0; i nb_components; ++i ) { int bits = desc->comp[i].depth; if( bits > max_bits ) max_bits = bits; } // from libavcodec/pixdesc.c #define pixdesc_has_alpha(pixdesc) ((pixdesc)->nb_components == 2 || \ (pixdesc)->nb_components == 4 || (pixdesc)->flags & AV_PIX_FMT_FLAG_PAL) int icolor_model = pixdesc_has_alpha(desc) ? (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) : (max_bits > 8 ? BC_RGB161616 : BC_RGB888) ; VFrame vframe(iw, ih, icolor_model); if( convert_picture_vframe(&vframe, ip, ifmt, iw, ih) ) return -1; frame->transfer_from(&vframe); return 1; } int FFVideoConvert::transfer_cmodel(VFrame *frame, AVFrame *ifp, AVPixelFormat ifmt, int iw, int ih) { int ret = convert_cmodel(frame, ifp, ifmt, iw, ih); if( ret > 0 ) { const AVDictionary *src = av_frame_get_metadata(ifp); AVDictionaryEntry *t = NULL; BC_Hash *hp = frame->get_params(); //hp->clear(); while( (t=av_dict_get(src, "", t, AV_DICT_IGNORE_SUFFIX)) ) hp->update(t->key, t->value); } return ret; } int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVPixelFormat ofmt, int ow, int oh) { AVFrame opic; int cmodel = frame->get_color_model(); AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel); if( ifmt == AV_PIX_FMT_NB ) return -1; int size = av_image_fill_arrays(opic.data, opic.linesize, frame->get_data(), ifmt, frame->get_w(), frame->get_h(), 1); if( size < 0 ) return -1; // transfer line sizes must match also int planar = BC_CModels::is_planar(cmodel); int packed_width = !planar ? frame->get_bytes_per_line() : BC_CModels::calculate_pixelsize(cmodel) * frame->get_w(); if( packed_width != opic.linesize[0] ) return -1; if( planar ) { // override av_image_fill_arrays() for planar types opic.data[0] = frame->get_y(); opic.data[1] = frame->get_u(); opic.data[2] = frame->get_v(); } convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(), ifmt, ow, oh, ofmt, SWS_BICUBIC, NULL, NULL, NULL); if( !convert_ctx ) { fprintf(stderr, "FFVideoConvert::convert_frame_picture:" " sws_getCachedContext() failed\n"); return -1; } int ret = sws_scale(convert_ctx, opic.data, opic.linesize, 0, frame->get_h(), op->data, op->linesize); if( ret < 0 ) { ff_err(ret, "FFVideoConvert::convert_frame_picture: sws_scale() failed\n"); return -1; } return 0; } int FFVideoConvert::convert_pixfmt(VFrame *frame, AVFrame *op, AVPixelFormat ofmt, int ow, int oh) { // try direct transfer if( !convert_vframe_picture(frame, op, ofmt, ow, oh) ) return 1; // use indirect transfer int colormodel = frame->get_color_model(); int bits = BC_CModels::calculate_pixelsize(colormodel) * 8; bits /= BC_CModels::components(colormodel); int icolor_model = BC_CModels::has_alpha(colormodel) ? (bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) : (bits > 8 ? BC_RGB161616: BC_RGB888) ; VFrame vframe(frame->get_w(), frame->get_h(), icolor_model); vframe.transfer_from(frame); if( !convert_vframe_picture(&vframe, op, ofmt, ow, oh) ) return 1; return -1; } int FFVideoConvert::transfer_pixfmt(VFrame *frame, AVFrame *ofp, AVPixelFormat ofmt, int ow, int oh) { int ret = convert_pixfmt(frame, ofp, ofmt, ow, oh); if( ret > 0 ) { BC_Hash *hp = frame->get_params(); AVDictionary **dict = avpriv_frame_get_metadatap(ofp); //av_dict_free(dict); for( int i=0; isize(); ++i ) { char *key = hp->get_key(i), *val = hp->get_value(i); av_dict_set(dict, key, val, 0); } } return ret; } void FFVideoStream::load_markers() { IndexState *index_state = ffmpeg->file_base->asset->index_state; if( idx >= index_state->video_markers.size() ) return; FFStream::load_markers(*index_state->video_markers[idx], frame_rate); } FFMPEG::FFMPEG(FileBase *file_base) { fmt_ctx = 0; this->file_base = file_base; memset(file_format,0,sizeof(file_format)); mux_lock = new Condition(0,"FFMPEG::mux_lock",0); flow_lock = new Condition(1,"FFStream::flow_lock",0); done = -1; flow = 1; decoding = encoding = 0; has_audio = has_video = 0; opts = 0; opt_duration = -1; opt_video_filter = 0; opt_audio_filter = 0; char option_path[BCTEXTLEN]; set_option_path(option_path, "%s", "ffmpeg.opts"); read_options(option_path, opts); } FFMPEG::~FFMPEG() { ff_lock("FFMPEG::~FFMPEG()"); close_encoder(); ffaudio.remove_all_objects(); ffvideo.remove_all_objects(); if( fmt_ctx ) avformat_close_input(&fmt_ctx); ff_unlock(); delete flow_lock; delete mux_lock; av_dict_free(&opts); delete [] opt_video_filter; delete [] opt_audio_filter; } int FFMPEG::check_sample_rate(AVCodec *codec, int sample_rate) { const int *p = codec->supported_samplerates; if( !p ) return sample_rate; while( *p != 0 ) { if( *p == sample_rate ) return *p; ++p; } return 0; } static inline AVRational std_frame_rate(int i) { static const int m1 = 1001*12, m2 = 1000*12; static const int freqs[] = { 40*m1, 48*m1, 50*m1, 60*m1, 80*m1,120*m1, 240*m1, 24*m2, 30*m2, 60*m2, 12*m2, 15*m2, 48*m2, 0, }; int freq = i<30*12 ? (i+1)*1001 : freqs[i-30*12]; return (AVRational) { freq, 1001*12 }; } AVRational FFMPEG::check_frame_rate(AVCodec *codec, double frame_rate) { const AVRational *p = codec->supported_framerates; AVRational rate, best_rate = (AVRational) { 0, 0 }; double max_err = 1.; int i = 0; while( ((p ? (rate=*p++) : (rate=std_frame_rate(i++))), rate.num) != 0 ) { double framerate = (double) rate.num / rate.den; double err = fabs(frame_rate/framerate - 1.); if( err >= max_err ) continue; max_err = err; best_rate = rate; } return max_err < 0.0001 ? best_rate : (AVRational) { 0, 0 }; } AVRational FFMPEG::to_sample_aspect_ratio(Asset *asset) { #if 1 double display_aspect = asset->width / (double)asset->height; double sample_aspect = asset->aspect_ratio / display_aspect; int width = 1000000, height = width * sample_aspect + 0.5; float w, h; MWindow::create_aspect_ratio(w, h, width, height); return (AVRational){(int)h, (int)w}; #else // square pixels return (AVRational){1, 1}; #endif } AVRational FFMPEG::to_time_base(int sample_rate) { return (AVRational){1, sample_rate}; } void FFMPEG::set_option_path(char *path, const char *fmt, ...) { char *ep = path + BCTEXTLEN-1; strncpy(path, File::get_cindat_path(), ep-path); strncat(path, "/ffmpeg/", ep-path); path += strlen(path); va_list ap; va_start(ap, fmt); path += vsnprintf(path, ep-path, fmt, ap); va_end(ap); *path = 0; } void FFMPEG::get_option_path(char *path, const char *type, const char *spec) { if( *spec == '/' ) strcpy(path, spec); else set_option_path(path, "%s/%s", type, spec); } int FFMPEG::get_format(char *format, const char *path, const char *spec) { char option_path[BCTEXTLEN], line[BCTEXTLEN], codec[BCTEXTLEN]; get_option_path(option_path, path, spec); FILE *fp = fopen(option_path,"r"); if( !fp ) return 1; int ret = 0; if( !fgets(line, sizeof(line), fp) ) ret = 1; if( !ret ) { line[sizeof(line)-1] = 0; ret = scan_option_line(line, format, codec); } fclose(fp); return ret; } int FFMPEG::get_codec(char *codec, const char *path, const char *spec) { char option_path[BCTEXTLEN], line[BCTEXTLEN], format[BCTEXTLEN]; get_option_path(option_path, path, spec); FILE *fp = fopen(option_path,"r"); if( !fp ) return 1; int ret = 0; if( !fgets(line, sizeof(line), fp) ) ret = 1; fclose(fp); if( !ret ) { line[sizeof(line)-1] = 0; ret = scan_option_line(line, format, codec); } if( !ret ) { char *vp = codec, *ep = vp+BCTEXTLEN-1; while( vp < ep && *vp && *vp != '|' ) ++vp; if( *vp == '|' ) --vp; while( vp > codec && (*vp==' ' || *vp=='\t') ) *vp-- = 0; } return ret; } int FFMPEG::get_file_format() { int ret = 0; char audio_format[BCSTRLEN], video_format[BCSTRLEN]; file_format[0] = audio_format[0] = video_format[0] = 0; Asset *asset = file_base->asset; if( !ret && asset->audio_data ) ret = get_format(audio_format, "audio", asset->acodec); if( !ret && asset->video_data ) ret = get_format(video_format, "video", asset->vcodec); if( !ret && !audio_format[0] && !video_format[0] ) ret = 1; if( !ret && audio_format[0] && video_format[0] && strcmp(audio_format, video_format) ) ret = -1; if( !ret ) strcpy(file_format, audio_format[0] ? audio_format : video_format); return ret; } int FFMPEG::scan_option_line(char *cp, char *tag, char *val) { while( *cp == ' ' || *cp == '\t' ) ++cp; char *bp = cp; while( *cp && *cp != ' ' && *cp != '\t' && *cp != '=' ) ++cp; int len = cp - bp; if( !len || len > BCSTRLEN-1 ) return 1; while( bp < cp ) *tag++ = *bp++; *tag = 0; while( *cp == ' ' || *cp == '\t' ) ++cp; if( *cp == '=' ) ++cp; while( *cp == ' ' || *cp == '\t' ) ++cp; bp = cp; while( *cp && *cp != '\n' ) ++cp; len = cp - bp; if( len > BCTEXTLEN-1 ) return 1; while( bp < cp ) *val++ = *bp++; *val = 0; return 0; } int FFMPEG::load_defaults(const char *path, const char *type, char *codec, char *codec_options, int len) { char default_file[BCTEXTLEN]; FFMPEG::set_option_path(default_file, "%s/%s.dfl", path, type); FILE *fp = fopen(default_file,"r"); if( !fp ) return 1; fgets(codec, BCSTRLEN, fp); char *cp = codec; while( *cp && *cp!='\n' ) ++cp; *cp = 0; while( len > 0 && fgets(codec_options, len, fp) ) { int n = strlen(codec_options); codec_options += n; len -= n; } fclose(fp); FFMPEG::set_option_path(default_file, "%s/%s", path, codec); return FFMPEG::load_options(default_file, codec_options, len); } void FFMPEG::set_asset_format(Asset *asset, const char *text) { if( asset->format != FILE_FFMPEG ) return; strcpy(asset->fformat, text); if( !asset->ff_audio_options[0] ) { asset->audio_data = !load_defaults("audio", text, asset->acodec, asset->ff_audio_options, sizeof(asset->ff_audio_options)); } if( !asset->ff_video_options[0] ) { asset->video_data = !load_defaults("video", text, asset->vcodec, asset->ff_video_options, sizeof(asset->ff_video_options)); } } int FFMPEG::get_encoder(const char *options, char *format, char *codec, char *bsfilter, char *bsargs) { FILE *fp = fopen(options,"r"); if( !fp ) { eprintf("FFMPEG::get_encoder: options open failed %s\n",options); return 1; } if( get_encoder(fp, format, codec, bsfilter, bsargs) ) eprintf(_("FFMPEG::get_encoder:" " err: format/codec not found %s\n"), options); fclose(fp); return 0; } int FFMPEG::get_encoder(FILE *fp, char *format, char *codec, char *bsfilter, char *bsargs) { format[0] = codec[0] = bsfilter[0] = bsargs[0] = 0; char line[BCTEXTLEN]; if( !fgets(line, sizeof(line), fp) ) return 1; line[sizeof(line)-1] = 0; if( scan_option_line(line, format, codec) ) return 1; char *cp = codec; while( *cp && *cp != '|' ) ++cp; if( !*cp ) return 0; if( scan_option_line(cp+1, bsfilter, bsargs) ) return 1; do { *cp-- = 0; } while( cp>=codec && (*cp==' ' || *cp == '\t' ) ); return 0; } int FFMPEG::read_options(const char *options, AVDictionary *&opts) { FILE *fp = fopen(options,"r"); if( !fp ) return 1; int ret = read_options(fp, options, opts); fclose(fp); return ret; } int FFMPEG::scan_options(const char *options, AVDictionary *&opts, AVStream *st) { FILE *fp = fmemopen((void *)options,strlen(options),"r"); if( !fp ) return 0; int ret = read_options(fp, options, opts); fclose(fp); AVDictionaryEntry *tag = av_dict_get(opts, "id", NULL, 0); if( tag ) st->id = strtol(tag->value,0,0); return ret; } int FFMPEG::read_options(FILE *fp, const char *options, AVDictionary *&opts) { int ret = 0, no = 0; char line[BCTEXTLEN]; while( !ret && fgets(line, sizeof(line), fp) ) { line[sizeof(line)-1] = 0; ++no; if( line[0] == '#' ) continue; if( line[0] == '\n' ) continue; char key[BCSTRLEN], val[BCTEXTLEN]; if( scan_option_line(line, key, val) ) { eprintf(_("FFMPEG::read_options:" " err reading %s: line %d\n"), options, no); ret = 1; } if( !ret ) { if( !strcmp(key, "duration") ) opt_duration = strtod(val, 0); else if( !strcmp(key, "video_filter") ) opt_video_filter = cstrdup(val); else if( !strcmp(key, "audio_filter") ) opt_audio_filter = cstrdup(val); else if( !strcmp(key, "loglevel") ) set_loglevel(val); else av_dict_set(&opts, key, val, 0); } } return ret; } int FFMPEG::load_options(const char *options, AVDictionary *&opts) { char option_path[BCTEXTLEN]; set_option_path(option_path, "%s", options); return read_options(option_path, opts); } int FFMPEG::load_options(const char *path, char *bfr, int len) { *bfr = 0; FILE *fp = fopen(path, "r"); if( !fp ) return 1; fgets(bfr, len, fp); // skip hdr len = fread(bfr, 1, len-1, fp); if( len < 0 ) len = 0; bfr[len] = 0; fclose(fp); return 0; } void FFMPEG::set_loglevel(const char *ap) { if( !ap || !*ap ) return; const struct { const char *name; int level; } log_levels[] = { { "quiet" , AV_LOG_QUIET }, { "panic" , AV_LOG_PANIC }, { "fatal" , AV_LOG_FATAL }, { "error" , AV_LOG_ERROR }, { "warning", AV_LOG_WARNING }, { "info" , AV_LOG_INFO }, { "verbose", AV_LOG_VERBOSE }, { "debug" , AV_LOG_DEBUG }, }; for( int i=0; i<(int)(sizeof(log_levels)/sizeof(log_levels[0])); ++i ) { if( !strcmp(log_levels[i].name, ap) ) { av_log_set_level(log_levels[i].level); return; } } av_log_set_level(atoi(ap)); } double FFMPEG::to_secs(int64_t time, AVRational time_base) { double base_time = time == AV_NOPTS_VALUE ? 0 : av_rescale_q(time, time_base, AV_TIME_BASE_Q); return base_time / AV_TIME_BASE; } int FFMPEG::info(char *text, int len) { if( len <= 0 ) return 0; decode_activate(); #define report(s...) do { int n = snprintf(cp,len,s); cp += n; len -= n; } while(0) char *cp = text; if( ffvideo.size() > 0 ) report("\n%d video stream%s\n",ffvideo.size(), ffvideo.size()!=1 ? "s" : ""); for( int vidx=0; vidxst; AVCodecContext *avctx = st->codec; report(_("vid%d (%d), id 0x%06x:\n"), vid->idx, vid->fidx, avctx->codec_id); const AVCodecDescriptor *desc = avcodec_descriptor_get(avctx->codec_id); report(" video%d %s", vidx+1, desc ? desc->name : " (unkn)"); report(" %dx%d %5.2f", vid->width, vid->height, vid->frame_rate); const char *pfn = av_get_pix_fmt_name(avctx->pix_fmt); report(" pix %s\n", pfn ? pfn : "(unkn)"); double secs = to_secs(st->duration, st->time_base); int64_t length = secs * vid->frame_rate + 0.5; double ofs = to_secs((vid->nudge - st->start_time), st->time_base); int64_t nudge = ofs * vid->frame_rate; int ch = nudge >= 0 ? '+' : (nudge=-nudge, '-'); report(" %jd%c%jd frms %0.2f secs", length,ch,nudge, secs); int hrs = secs/3600; secs -= hrs*3600; int mins = secs/60; secs -= mins*60; report(" %d:%02d:%05.2f\n", hrs, mins, secs); } if( ffaudio.size() > 0 ) report("\n%d audio stream%s\n",ffaudio.size(), ffaudio.size()!=1 ? "s" : ""); for( int aidx=0; aidxst; AVCodecContext *avctx = st->codec; report(_("aud%d (%d), id 0x%06x:\n"), aud->idx, aud->fidx, avctx->codec_id); const AVCodecDescriptor *desc = avcodec_descriptor_get(avctx->codec_id); int nch = aud->channels, ch0 = aud->channel0+1; report(" audio%d-%d %s", ch0, ch0+nch-1, desc ? desc->name : " (unkn)"); const char *fmt = av_get_sample_fmt_name(avctx->sample_fmt); report(" %s %d", fmt, aud->sample_rate); int sample_bits = av_get_bits_per_sample(avctx->codec_id); report(" %dbits\n", sample_bits); double secs = to_secs(st->duration, st->time_base); int64_t length = secs * aud->sample_rate + 0.5; double ofs = to_secs((aud->nudge - st->start_time), st->time_base); int64_t nudge = ofs * aud->sample_rate; int ch = nudge >= 0 ? '+' : (nudge=-nudge, '-'); report(" %jd%c%jd smpl %0.2f secs", length,ch,nudge, secs); int hrs = secs/3600; secs -= hrs*3600; int mins = secs/60; secs -= mins*60; report(" %d:%02d:%05.2f\n", hrs, mins, secs); } if( fmt_ctx->nb_programs > 0 ) report("\n%d program%s\n",fmt_ctx->nb_programs, fmt_ctx->nb_programs!=1 ? "s" : ""); for( int i=0; i<(int)fmt_ctx->nb_programs; ++i ) { report("program %d", i+1); AVProgram *pgrm = fmt_ctx->programs[i]; for( int j=0; j<(int)pgrm->nb_stream_indexes; ++j ) { int idx = pgrm->stream_index[j]; int vidx = ffvideo.size(); while( --vidx>=0 && ffvideo[vidx]->fidx != idx ); if( vidx >= 0 ) { report(", vid%d", vidx); continue; } int aidx = ffaudio.size(); while( --aidx>=0 && ffaudio[aidx]->fidx != idx ); if( aidx >= 0 ) { report(", aud%d", aidx); continue; } report(", (%d)", pgrm->stream_index[j]); } report("\n"); } report("\n"); AVDictionaryEntry *tag = 0; while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) report("%s=%s\n", tag->key, tag->value); if( !len ) --cp; *cp = 0; return cp - text; #undef report } int FFMPEG::init_decoder(const char *filename) { ff_lock("FFMPEG::init_decoder"); av_register_all(); char file_opts[BCTEXTLEN]; char *bp = strrchr(strcpy(file_opts, filename), '/'); char *sp = strrchr(!bp ? file_opts : bp, '.'); FILE *fp = 0; if( sp ) { strcpy(sp, ".opts"); fp = fopen(file_opts, "r"); } if( fp ) { read_options(fp, file_opts, opts); fclose(fp); } else load_options("decode.opts", opts); AVDictionary *fopts = 0; av_dict_copy(&fopts, opts, 0); int ret = avformat_open_input(&fmt_ctx, filename, NULL, &fopts); av_dict_free(&fopts); if( ret >= 0 ) ret = avformat_find_stream_info(fmt_ctx, NULL); if( !ret ) { decoding = -1; } ff_unlock(); return !ret ? 0 : 1; } int FFMPEG::open_decoder() { struct stat st; if( stat(fmt_ctx->filename, &st) < 0 ) { eprintf("FFMPEG::open_decoder: can't stat file: %s\n", fmt_ctx->filename); return 1; } int64_t file_bits = 8 * st.st_size; if( !fmt_ctx->bit_rate && opt_duration > 0 ) fmt_ctx->bit_rate = file_bits / opt_duration; int estimated = 0; if( fmt_ctx->bit_rate > 0 ) { for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) { AVStream *st = fmt_ctx->streams[i]; if( st->duration != AV_NOPTS_VALUE ) continue; if( st->time_base.num > INT64_MAX / fmt_ctx->bit_rate ) continue; st->duration = av_rescale(file_bits, st->time_base.den, fmt_ctx->bit_rate * (int64_t) st->time_base.num); estimated = 1; } } if( estimated ) printf("FFMPEG::open_decoder: some stream times estimated\n"); ff_lock("FFMPEG::open_decoder"); int bad_time = 0; for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) { AVStream *st = fmt_ctx->streams[i]; if( st->duration == AV_NOPTS_VALUE ) bad_time = 1; AVCodecContext *avctx = st->codec; const AVCodecDescriptor *codec_desc = avcodec_descriptor_get(avctx->codec_id); if( !codec_desc ) continue; if( avctx->codec_type == AVMEDIA_TYPE_VIDEO ) { if( avctx->width < 1 ) continue; if( avctx->height < 1 ) continue; AVRational framerate = av_guess_frame_rate(fmt_ctx, st, 0); if( framerate.num < 1 ) continue; has_video = 1; int vidx = ffvideo.size(); FFVideoStream *vid = new FFVideoStream(this, st, vidx, i); vstrm_index.append(ffidx(vidx, 0)); ffvideo.append(vid); vid->width = avctx->width; vid->height = avctx->height; vid->frame_rate = !framerate.den ? 0 : (double)framerate.num / framerate.den; double secs = to_secs(st->duration, st->time_base); vid->length = secs * vid->frame_rate; vid->aspect_ratio = (double)st->sample_aspect_ratio.num / st->sample_aspect_ratio.den; vid->nudge = st->start_time; vid->reading = -1; if( opt_video_filter ) vid->create_filter(opt_video_filter, avctx,avctx); } else if( avctx->codec_type == AVMEDIA_TYPE_AUDIO ) { if( avctx->channels < 1 ) continue; if( avctx->sample_rate < 1 ) continue; has_audio = 1; int aidx = ffaudio.size(); FFAudioStream *aud = new FFAudioStream(this, st, aidx, i); ffaudio.append(aud); aud->channel0 = astrm_index.size(); aud->channels = avctx->channels; for( int ch=0; chchannels; ++ch ) astrm_index.append(ffidx(aidx, ch)); aud->sample_rate = avctx->sample_rate; double secs = to_secs(st->duration, st->time_base); aud->length = secs * aud->sample_rate; if( avctx->sample_fmt != AV_SAMPLE_FMT_FLT ) { uint64_t layout = av_get_default_channel_layout(avctx->channels); if( !layout ) layout = ((uint64_t)1<channels) - 1; aud->resample_context = swr_alloc_set_opts(NULL, layout, AV_SAMPLE_FMT_FLT, avctx->sample_rate, layout, avctx->sample_fmt, avctx->sample_rate, 0, NULL); swr_init(aud->resample_context); } aud->nudge = st->start_time; aud->reading = -1; if( opt_audio_filter ) aud->create_filter(opt_audio_filter, avctx,avctx); } } if( bad_time ) printf("FFMPEG::open_decoder: some stream have bad times\n"); ff_unlock(); return 0; } int FFMPEG::init_encoder(const char *filename) { int fd = ::open(filename,O_WRONLY); if( fd < 0 ) fd = open(filename,O_WRONLY+O_CREAT,0666); if( fd < 0 ) { eprintf("FFMPEG::init_encoder: bad file path: %s\n", filename); return 1; } ::close(fd); int ret = get_file_format(); if( ret > 0 ) { eprintf("FFMPEG::init_encoder: bad file format: %s\n", filename); return 1; } if( ret < 0 ) { eprintf("FFMPEG::init_encoder: mismatch audio/video file format: %s\n", filename); return 1; } ff_lock("FFMPEG::init_encoder"); av_register_all(); avformat_alloc_output_context2(&fmt_ctx, 0, file_format, filename); if( !fmt_ctx ) { eprintf("FFMPEG::init_encoder: failed: %s\n", filename); ret = 1; } if( !ret ) { encoding = -1; load_options("encode.opts", opts); } ff_unlock(); return ret; } int FFMPEG::open_encoder(const char *type, const char *spec) { Asset *asset = file_base->asset; char *filename = asset->path; AVDictionary *sopts = 0; av_dict_copy(&sopts, opts, 0); char option_path[BCTEXTLEN]; set_option_path(option_path, "%s/%s.opts", type, type); read_options(option_path, sopts); get_option_path(option_path, type, spec); char format_name[BCSTRLEN], codec_name[BCTEXTLEN]; char bsfilter[BCSTRLEN], bsargs[BCTEXTLEN]; if( get_encoder(option_path, format_name, codec_name, bsfilter, bsargs) ) { eprintf("FFMPEG::open_encoder: get_encoder failed %s:%s\n", option_path, filename); return 1; } if( !strcmp(codec_name, CODEC_TAG_DVSD) ) strcpy(codec_name, "dv"); else if( !strcmp(codec_name, CODEC_TAG_MJPEG) ) strcpy(codec_name, "mjpeg"); else if( !strcmp(codec_name, CODEC_TAG_JPEG) ) strcpy(codec_name, "jpeg"); int ret = 0; ff_lock("FFMPEG::open_encoder"); FFStream *fst = 0; AVStream *st = 0; const AVCodecDescriptor *codec_desc = 0; AVCodec *codec = avcodec_find_encoder_by_name(codec_name); if( !codec ) { eprintf("FFMPEG::open_encoder: cant find codec %s:%s\n", codec_name, filename); ret = 1; } if( !ret ) { codec_desc = avcodec_descriptor_get(codec->id); if( !codec_desc ) { eprintf("FFMPEG::open_encoder: unknown codec %s:%s\n", codec_name, filename); ret = 1; } } if( !ret ) { st = avformat_new_stream(fmt_ctx, 0); if( !st ) { eprintf("FFMPEG::open_encoder: cant create stream %s:%s\n", codec_name, filename); ret = 1; } } if( !ret ) { AVCodecContext *ctx = st->codec; switch( codec_desc->type ) { case AVMEDIA_TYPE_AUDIO: { if( has_audio ) { eprintf("FFMPEG::open_encoder: duplicate audio %s:%s\n", codec_name, filename); ret = 1; break; } has_audio = 1; if( scan_options(asset->ff_audio_options, sopts, st) ) { eprintf("FFMPEG::open_encoder: bad audio options %s:%s\n", codec_name, filename); ret = 1; break; } if( asset->ff_audio_bitrate > 0 ) { ctx->bit_rate = asset->ff_audio_bitrate; char arg[BCSTRLEN]; sprintf(arg, "%d", asset->ff_audio_bitrate); av_dict_set(&sopts, "b", arg, 0); } int aidx = ffaudio.size(); int fidx = aidx + ffvideo.size(); FFAudioStream *aud = new FFAudioStream(this, st, aidx, fidx); ffaudio.append(aud); fst = aud; aud->sample_rate = asset->sample_rate; ctx->channels = aud->channels = asset->channels; for( int ch=0; chchannels; ++ch ) astrm_index.append(ffidx(aidx, ch)); ctx->channel_layout = av_get_default_channel_layout(ctx->channels); ctx->sample_rate = check_sample_rate(codec, asset->sample_rate); if( !ctx->sample_rate ) { eprintf("FFMPEG::open_encoder:" " check_sample_rate failed %s\n", filename); ret = 1; break; } ctx->time_base = st->time_base = (AVRational){1, aud->sample_rate}; ctx->sample_fmt = codec->sample_fmts[0]; uint64_t layout = av_get_default_channel_layout(ctx->channels); aud->resample_context = swr_alloc_set_opts(NULL, layout, ctx->sample_fmt, aud->sample_rate, layout, AV_SAMPLE_FMT_FLT, ctx->sample_rate, 0, NULL); swr_init(aud->resample_context); aud->writing = -1; break; } case AVMEDIA_TYPE_VIDEO: { if( has_video ) { eprintf("FFMPEG::open_encoder: duplicate video %s:%s\n", codec_name, filename); ret = 1; break; } has_video = 1; if( scan_options(asset->ff_video_options, sopts, st) ) { eprintf("FFMPEG::open_encoder: bad video options %s:%s\n", codec_name, filename); ret = 1; break; } if( asset->ff_video_bitrate > 0 ) { ctx->bit_rate = asset->ff_video_bitrate; char arg[BCSTRLEN]; sprintf(arg, "%d", asset->ff_video_bitrate); av_dict_set(&sopts, "b", arg, 0); } else if( asset->ff_video_quality > 0 ) { ctx->global_quality = asset->ff_video_quality * FF_QP2LAMBDA; ctx->qmin = ctx->qmax = asset->ff_video_quality; ctx->mb_lmin = ctx->qmin * FF_QP2LAMBDA; ctx->mb_lmax = ctx->qmax * FF_QP2LAMBDA; ctx->flags |= CODEC_FLAG_QSCALE; char arg[BCSTRLEN]; av_dict_set(&sopts, "flags", "+qscale", 0); sprintf(arg, "%d", asset->ff_video_quality); av_dict_set(&sopts, "qscale", arg, 0); sprintf(arg, "%d", ctx->global_quality); av_dict_set(&sopts, "global_quality", arg, 0); } int vidx = ffvideo.size(); int fidx = vidx + ffaudio.size(); FFVideoStream *vid = new FFVideoStream(this, st, vidx, fidx); vstrm_index.append(ffidx(vidx, 0)); ffvideo.append(vid); fst = vid; vid->width = asset->width; ctx->width = (vid->width+3) & ~3; vid->height = asset->height; ctx->height = (vid->height+3) & ~3; vid->frame_rate = asset->frame_rate; ctx->sample_aspect_ratio = to_sample_aspect_ratio(asset); ctx->pix_fmt = codec->pix_fmts ? codec->pix_fmts[0] : AV_PIX_FMT_YUV420P; AVRational frame_rate = check_frame_rate(codec, vid->frame_rate); if( !frame_rate.num || !frame_rate.den ) { eprintf("FFMPEG::open_encoder:" " check_frame_rate failed %s\n", filename); ret = 1; break; } ctx->time_base = (AVRational) { frame_rate.den, frame_rate.num }; st->time_base = ctx->time_base; vid->writing = -1; break; } default: eprintf("FFMPEG::open_encoder: not audio/video, %s:%s\n", codec_name, filename); ret = 1; } } if( !ret ) { if( fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER ) st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; ret = avcodec_open2(st->codec, codec, &sopts); if( ret < 0 ) { ff_err(ret,"FFMPEG::open_encoder"); eprintf("FFMPEG::open_encoder: open failed %s:%s\n", codec_name, filename); ret = 1; } else ret = 0; } if( !ret ) { if( fst && bsfilter[0] ) fst->add_bsfilter(bsfilter, !bsargs[0] ? 0 : bsargs); } ff_unlock(); if( !ret ) start_muxer(); av_dict_free(&sopts); return ret; } int FFMPEG::close_encoder() { stop_muxer(); if( encoding > 0 ) { av_write_trailer(fmt_ctx); if( !(fmt_ctx->flags & AVFMT_NOFILE) ) avio_closep(&fmt_ctx->pb); } encoding = 0; return 0; } int FFMPEG::decode_activate() { if( decoding < 0 ) { decoding = 0; for( int vidx=0; vidxnudge = AV_NOPTS_VALUE; for( int aidx=0; aidxnudge = AV_NOPTS_VALUE; // set nudges for each program stream set int npgrms = fmt_ctx->nb_programs; for( int i=0; iprograms[i]; // first start time video stream int64_t vstart_time = -1, astart_time = -1; for( int j=0; j<(int)pgrm->nb_stream_indexes; ++j ) { int fidx = pgrm->stream_index[j]; AVStream *st = fmt_ctx->streams[fidx]; AVCodecContext *avctx = st->codec; if( avctx->codec_type == AVMEDIA_TYPE_VIDEO ) { if( st->start_time == AV_NOPTS_VALUE ) continue; if( vstart_time > st->start_time ) continue; vstart_time = st->start_time; continue; } if( avctx->codec_type == AVMEDIA_TYPE_VIDEO ) { if( st->start_time == AV_NOPTS_VALUE ) continue; if( astart_time > st->start_time ) continue; astart_time = st->start_time; continue; } } // match program streams to max start_time int64_t nudge = vstart_time > astart_time ? vstart_time : astart_time; for( int j=0; j<(int)pgrm->nb_stream_indexes; ++j ) { int fidx = pgrm->stream_index[j]; AVStream *st = fmt_ctx->streams[fidx]; AVCodecContext *avctx = st->codec; if( avctx->codec_type == AVMEDIA_TYPE_VIDEO ) { for( int k=0; kfidx != fidx ) continue; ffvideo[k]->nudge = nudge; } continue; } if( avctx->codec_type == AVMEDIA_TYPE_AUDIO ) { for( int k=0; kfidx != fidx ) continue; ffaudio[k]->nudge = nudge; } continue; } } } // set nudges for any streams not yet set int64_t vstart_time = 0, astart_time = 0; int nstreams = fmt_ctx->nb_streams; for( int i=0; istreams[i]; AVCodecContext *avctx = st->codec; switch( avctx->codec_type ) { case AVMEDIA_TYPE_VIDEO: { if( st->start_time == AV_NOPTS_VALUE ) continue; int vidx = ffvideo.size(); while( --vidx >= 0 && ffvideo[vidx]->fidx != i ); if( vidx >= 0 && ffvideo[vidx]->nudge != AV_NOPTS_VALUE ) continue; if( vstart_time >= st->start_time ) continue; vstart_time = st->start_time; break; } case AVMEDIA_TYPE_AUDIO: { if( st->start_time == AV_NOPTS_VALUE ) continue; int aidx = ffaudio.size(); while( --aidx >= 0 && ffaudio[aidx]->fidx != i ); if( aidx >= 0 && ffaudio[aidx]->nudge != AV_NOPTS_VALUE ) continue; if( astart_time >= st->start_time ) continue; astart_time = st->start_time; break; } default: break; } } int64_t nudge = vstart_time > astart_time ? vstart_time : astart_time; for( int vidx=0; vidxnudge != AV_NOPTS_VALUE ) continue; ffvideo[vidx]->nudge = nudge; } for( int aidx=0; aidxnudge != AV_NOPTS_VALUE ) continue; ffaudio[aidx]->nudge = nudge; } decoding = 1; } return decoding; } int FFMPEG::encode_activate() { int ret = 0; if( encoding < 0 ) { encoding = 0; if( !(fmt_ctx->flags & AVFMT_NOFILE) && (ret=avio_open(&fmt_ctx->pb, fmt_ctx->filename, AVIO_FLAG_WRITE)) < 0 ) { ff_err(ret, "FFMPEG::encode_activate: err opening : %s\n", fmt_ctx->filename); return 1; } AVDictionary *fopts = 0; char option_path[BCTEXTLEN]; set_option_path(option_path, "format/%s", file_format); read_options(option_path, fopts); ret = avformat_write_header(fmt_ctx, &fopts); av_dict_free(&fopts); if( ret < 0 ) { ff_err(ret, "FFMPEG::encode_activate: write header failed %s\n", fmt_ctx->filename); return 1; } encoding = 1; } return encoding; } int FFMPEG::audio_seek(int stream, int64_t pos) { int aidx = astrm_index[stream].st_idx; FFAudioStream *aud = ffaudio[aidx]; aud->audio_seek(pos); return 0; } int FFMPEG::video_seek(int stream, int64_t pos) { int vidx = vstrm_index[stream].st_idx; FFVideoStream *vid = ffvideo[vidx]; vid->video_seek(pos); return 0; } int FFMPEG::decode(int chn, int64_t pos, double *samples, int len) { if( !has_audio || chn >= astrm_index.size() ) return -1; int aidx = astrm_index[chn].st_idx; FFAudioStream *aud = ffaudio[aidx]; if( aud->load(pos, len) < len ) return -1; int ch = astrm_index[chn].st_ch; int ret = aud->read(samples,len,ch); return ret; } int FFMPEG::decode(int layer, int64_t pos, VFrame *vframe) { if( !has_video || layer >= vstrm_index.size() ) return -1; int vidx = vstrm_index[layer].st_idx; FFVideoStream *vid = ffvideo[vidx]; return vid->load(vframe, pos); } int FFMPEG::encode(int stream, double **samples, int len) { FFAudioStream *aud = ffaudio[stream]; return aud->encode(samples, len); } int FFMPEG::encode(int stream, VFrame *frame) { FFVideoStream *vid = ffvideo[stream]; return vid->encode(frame); } void FFMPEG::start_muxer() { if( !running() ) { done = 0; start(); } } void FFMPEG::stop_muxer() { if( running() ) { done = 1; mux_lock->unlock(); } join(); } void FFMPEG::flow_off() { if( !flow ) return; flow_lock->lock("FFMPEG::flow_off"); flow = 0; } void FFMPEG::flow_on() { if( flow ) return; flow = 1; flow_lock->unlock(); } void FFMPEG::flow_ctl() { while( !flow ) { flow_lock->lock("FFMPEG::flow_ctl"); flow_lock->unlock(); } } int FFMPEG::mux_audio(FFrame *frm) { FFPacket pkt; FFStream *fst = frm->fst; AVCodecContext *ctx = fst->st->codec; AVFrame *frame = *frm; AVRational tick_rate = {1, ctx->sample_rate}; frame->pts = av_rescale_q(frm->position, tick_rate, ctx->time_base); int got_packet = 0; int ret = fst->encode_frame(pkt, frame, got_packet); if( ret >= 0 && got_packet ) ret = fst->write_packet(pkt); if( ret < 0 ) ff_err(ret, "FFMPEG::mux_audio"); return ret >= 0 ? 0 : 1; } int FFMPEG::mux_video(FFrame *frm) { FFPacket pkt; FFStream *fst = frm->fst; AVFrame *frame = *frm; frame->pts = frm->position; int got_packet = 0; int ret = fst->encode_frame(pkt, frame, got_packet); if( ret >= 0 && got_packet ) ret = fst->write_packet(pkt); if( ret < 0 ) ff_err(ret, "FFMPEG::mux_video"); return ret >= 0 ? 0 : 1; } void FFMPEG::mux() { for(;;) { double atm = -1, vtm = -1; FFrame *afrm = 0, *vfrm = 0; int demand = 0; for( int i=0; ifrm_count < 3 ) { demand = 1; flow_on(); } FFrame *frm = fst->frms.first; if( !frm ) { if( !done ) return; continue; } double tm = to_secs(frm->position, fst->st->codec->time_base); if( atm < 0 || tm < atm ) { atm = tm; afrm = frm; } } for( int i=0; ifrm_count < 2 ) { demand = 1; flow_on(); } FFrame *frm = fst->frms.first; if( !frm ) { if( !done ) return; continue; } double tm = to_secs(frm->position, fst->st->codec->time_base); if( vtm < 0 || tm < vtm ) { vtm = tm; vfrm = frm; } } if( !demand ) flow_off(); if( !afrm && !vfrm ) break; int v = !afrm ? -1 : !vfrm ? 1 : av_compare_ts( vfrm->position, vfrm->fst->st->codec->time_base, afrm->position, afrm->fst->st->codec->time_base); FFrame *frm = v <= 0 ? vfrm : afrm; if( frm == afrm ) mux_audio(frm); if( frm == vfrm ) mux_video(frm); frm->dequeue(); delete frm; } } void FFMPEG::run() { while( !done ) { mux_lock->lock("FFMPEG::run"); if( !done ) mux(); } mux(); for( int i=0; iflush(); for( int i=0; iflush(); } int FFMPEG::ff_total_audio_channels() { return astrm_index.size(); } int FFMPEG::ff_total_astreams() { return ffaudio.size(); } int FFMPEG::ff_audio_channels(int stream) { return ffaudio[stream]->channels; } int FFMPEG::ff_sample_rate(int stream) { return ffaudio[stream]->sample_rate; } const char* FFMPEG::ff_audio_format(int stream) { AVStream *st = ffaudio[stream]->st; AVCodecID id = st->codec->codec_id; const AVCodecDescriptor *desc = avcodec_descriptor_get(id); return desc ? desc->name : "Unknown"; } int FFMPEG::ff_audio_pid(int stream) { return ffaudio[stream]->st->id; } int64_t FFMPEG::ff_audio_samples(int stream) { return ffaudio[stream]->length; } // find audio astream/channels with this program, // or all program audio channels (astream=-1) int FFMPEG::ff_audio_for_video(int vstream, int astream, int64_t &channel_mask) { channel_mask = 0; int pidx = -1; int vidx = ffvideo[vstream]->fidx; // find first program with this video stream for( int i=0; pidx<0 && i<(int)fmt_ctx->nb_programs; ++i ) { AVProgram *pgrm = fmt_ctx->programs[i]; for( int j=0; pidx<0 && j<(int)pgrm->nb_stream_indexes; ++j ) { int st_idx = pgrm->stream_index[j]; AVStream *st = fmt_ctx->streams[st_idx]; if( st->codec->codec_type != AVMEDIA_TYPE_VIDEO ) continue; if( st_idx == vidx ) pidx = i; } } if( pidx < 0 ) return -1; int ret = -1; int64_t channels = 0; AVProgram *pgrm = fmt_ctx->programs[pidx]; for( int j=0; j<(int)pgrm->nb_stream_indexes; ++j ) { int aidx = pgrm->stream_index[j]; AVStream *st = fmt_ctx->streams[aidx]; if( st->codec->codec_type != AVMEDIA_TYPE_AUDIO ) continue; if( astream > 0 ) { --astream; continue; } int astrm = -1; for( int i=0; astrm<0 && ifidx == aidx ) astrm = i; if( astrm >= 0 ) { if( ret < 0 ) ret = astrm; int64_t mask = (1 << ffaudio[astrm]->channels) - 1; channels |= mask << ffaudio[astrm]->channel0; } if( !astream ) break; } channel_mask = channels; return ret; } int FFMPEG::ff_total_video_layers() { return vstrm_index.size(); } int FFMPEG::ff_total_vstreams() { return ffvideo.size(); } int FFMPEG::ff_video_width(int stream) { return ffvideo[stream]->width; } int FFMPEG::ff_video_height(int stream) { return ffvideo[stream]->height; } int FFMPEG::ff_set_video_width(int stream, int width) { int w = ffvideo[stream]->width; ffvideo[stream]->width = width; return w; } int FFMPEG::ff_set_video_height(int stream, int height) { int h = ffvideo[stream]->height; ffvideo[stream]->height = height; return h; } int FFMPEG::ff_coded_width(int stream) { AVStream *st = ffvideo[stream]->st; return st->codec->coded_width; } int FFMPEG::ff_coded_height(int stream) { AVStream *st = ffvideo[stream]->st; return st->codec->coded_height; } float FFMPEG::ff_aspect_ratio(int stream) { return ffvideo[stream]->aspect_ratio; } const char* FFMPEG::ff_video_format(int stream) { AVStream *st = ffvideo[stream]->st; AVCodecID id = st->codec->codec_id; const AVCodecDescriptor *desc = avcodec_descriptor_get(id); return desc ? desc->name : "Unknown"; } double FFMPEG::ff_frame_rate(int stream) { return ffvideo[stream]->frame_rate; } int64_t FFMPEG::ff_video_frames(int stream) { return ffvideo[stream]->length; } int FFMPEG::ff_video_pid(int stream) { return ffvideo[stream]->st->id; } int FFMPEG::ff_cpus() { return file_base->file->cpus; } int FFVideoStream::create_filter(const char *filter_spec, AVCodecContext *src_ctx, AVCodecContext *sink_ctx) { avfilter_register_all(); AVFilter *filter = avfilter_get_by_name(filter_spec); if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) { ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec); return -1; } filter_graph = avfilter_graph_alloc(); AVFilter *buffersrc = avfilter_get_by_name("buffer"); AVFilter *buffersink = avfilter_get_by_name("buffersink"); int ret = 0; char args[BCTEXTLEN]; snprintf(args, sizeof(args), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", src_ctx->width, src_ctx->height, src_ctx->pix_fmt, src_ctx->time_base.num, src_ctx->time_base.den, src_ctx->sample_aspect_ratio.num, src_ctx->sample_aspect_ratio.den); if( ret >= 0 ) ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); if( ret >= 0 ) ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); if( ret >= 0 ) ret = av_opt_set_bin(buffersink_ctx, "pix_fmts", (uint8_t*)&sink_ctx->pix_fmt, sizeof(sink_ctx->pix_fmt), AV_OPT_SEARCH_CHILDREN); if( ret < 0 ) ff_err(ret, "FFVideoStream::create_filter"); else ret = FFStream::create_filter(filter_spec); return ret >= 0 ? 0 : 1; } int FFAudioStream::create_filter(const char *filter_spec, AVCodecContext *src_ctx, AVCodecContext *sink_ctx) { avfilter_register_all(); AVFilter *filter = avfilter_get_by_name(filter_spec); if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) { ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec); return -1; } filter_graph = avfilter_graph_alloc(); AVFilter *buffersrc = avfilter_get_by_name("abuffer"); AVFilter *buffersink = avfilter_get_by_name("abuffersink"); int ret = 0; char args[BCTEXTLEN]; snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%jx", src_ctx->time_base.num, src_ctx->time_base.den, src_ctx->sample_rate, av_get_sample_fmt_name(src_ctx->sample_fmt), src_ctx->channel_layout); if( ret >= 0 ) ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); if( ret >= 0 ) ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); if( ret >= 0 ) ret = av_opt_set_bin(buffersink_ctx, "sample_fmts", (uint8_t*)&sink_ctx->sample_fmt, sizeof(sink_ctx->sample_fmt), AV_OPT_SEARCH_CHILDREN); if( ret >= 0 ) ret = av_opt_set_bin(buffersink_ctx, "channel_layouts", (uint8_t*)&sink_ctx->channel_layout, sizeof(sink_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN); if( ret >= 0 ) ret = av_opt_set_bin(buffersink_ctx, "sample_rates", (uint8_t*)&sink_ctx->sample_rate, sizeof(sink_ctx->sample_rate), AV_OPT_SEARCH_CHILDREN); if( ret < 0 ) ff_err(ret, "FFAudioStream::create_filter"); else ret = FFStream::create_filter(filter_spec); return ret >= 0 ? 0 : 1; } int FFStream::create_filter(const char *filter_spec) { /* Endpoints for the filter graph. */ AVFilterInOut *outputs = avfilter_inout_alloc(); outputs->name = av_strdup("in"); outputs->filter_ctx = buffersrc_ctx; outputs->pad_idx = 0; outputs->next = 0; AVFilterInOut *inputs = avfilter_inout_alloc(); inputs->name = av_strdup("out"); inputs->filter_ctx = buffersink_ctx; inputs->pad_idx = 0; inputs->next = 0; int ret = !outputs->name || !inputs->name ? -1 : 0; if( ret >= 0 ) ret = avfilter_graph_parse_ptr(filter_graph, filter_spec, &inputs, &outputs, NULL); if( ret >= 0 ) ret = avfilter_graph_config(filter_graph, NULL); if( ret < 0 ) ff_err(ret, "FFStream::create_filter"); avfilter_inout_free(&inputs); avfilter_inout_free(&outputs); return ret; } void FFStream::add_bsfilter(const char *bsf, const char *ap) { bsfilter.append(new BSFilter(bsf,ap)); } int FFStream::bs_filter(AVPacket *pkt) { if( !bsfilter.size() ) return 0; av_packet_split_side_data(pkt); int ret = 0; for( int i=0; ibsfc, st->codec, bsfilter[i]->args, &bspkt.data, &bspkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY); if( ret < 0 ) break; int size = bspkt.size; uint8_t *data = bspkt.data; if( !ret && bspkt.data != pkt->data ) { size = bspkt.size; data = (uint8_t *)av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); if( !data ) { ret = AVERROR(ENOMEM); break; } memcpy(data, bspkt.data, size); memset(data+size, 0, FF_INPUT_BUFFER_PADDING_SIZE); ret = 1; } if( ret > 0 ) { pkt->side_data = 0; pkt->side_data_elems = 0; av_packet_unref(pkt); ret = av_packet_from_data(&bspkt, data, size); if( ret < 0 ) break; } *pkt = bspkt; } if( ret < 0 ) ff_err(ret,"FFStream::bs_filter"); return ret; } int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled) { AVPacket pkt; av_init_packet(&pkt); AVFrame *frame = av_frame_alloc(); if( !frame ) { fprintf(stderr, "FFMPEG::scan: av_frame_alloc failed\n"); return -1; } index_state->add_video_markers(ffvideo.size()); index_state->add_audio_markers(ffaudio.size()); for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) { AVDictionary *copts = 0; av_dict_copy(&copts, opts, 0); AVStream *st = fmt_ctx->streams[i]; AVCodecID codec_id = st->codec->codec_id; AVCodec *decoder = avcodec_find_decoder(codec_id); if( avcodec_open2(st->codec, decoder, &copts) < 0 ) fprintf(stderr, "FFMPEG::scan: codec open failed\n"); av_dict_free(&copts); } int errs = 0; for( int64_t count=0; !*canceled; ++count ) { av_packet_unref(&pkt); pkt.data = 0; pkt.size = 0; int ret = av_read_frame(fmt_ctx, &pkt); if( ret < 0 ) { if( ret == AVERROR_EOF ) break; if( ++errs > 100 ) { ff_err(ret, "over 100 read_frame errs\n"); break; } continue; } if( !pkt.data ) continue; int i = pkt.stream_index; if( i < 0 || i >= (int)fmt_ctx->nb_streams ) continue; AVStream *st = fmt_ctx->streams[i]; AVCodecContext *avctx = st->codec; if( pkt.pos > *scan_position ) *scan_position = pkt.pos; switch( avctx->codec_type ) { case AVMEDIA_TYPE_VIDEO: { int vidx = ffvideo.size(); while( --vidx>=0 && ffvideo[vidx]->fidx != i ); if( vidx < 0 ) break; FFVideoStream *vid = ffvideo[vidx]; int64_t tstmp = pkt.dts; if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.pts; if( tstmp != AV_NOPTS_VALUE && (pkt.flags & AV_PKT_FLAG_KEY) && pkt.pos > 0 ) { if( vid->nudge != AV_NOPTS_VALUE ) tstmp -= vid->nudge; double secs = to_secs(tstmp, st->time_base); int64_t frm = secs * vid->frame_rate + 0.5; if( frm < 0 ) frm = 0; index_state->put_video_mark(vidx, frm, pkt.pos); } #if 0 while( pkt.size > 0 ) { av_frame_unref(frame); int got_frame = 0; int ret = vid->decode_frame(&pkt, frame, got_frame); if( ret <= 0 ) break; // if( got_frame ) {} pkt.data += ret; pkt.size -= ret; } #endif break; } case AVMEDIA_TYPE_AUDIO: { int aidx = ffaudio.size(); while( --aidx>=0 && ffaudio[aidx]->fidx != i ); if( aidx < 0 ) break; FFAudioStream *aud = ffaudio[aidx]; int64_t tstmp = pkt.pts; if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.dts; if( tstmp != AV_NOPTS_VALUE && (pkt.flags & AV_PKT_FLAG_KEY) && pkt.pos > 0 ) { if( aud->nudge != AV_NOPTS_VALUE ) tstmp -= aud->nudge; double secs = to_secs(tstmp, st->time_base); int64_t sample = secs * aud->sample_rate + 0.5; if( sample < 0 ) sample = 0; index_state->put_audio_mark(aidx, sample, pkt.pos); } while( pkt.size > 0 ) { int ch = aud->channel0, nch = aud->channels; int64_t pos = index_state->pos(ch); if( pos != aud->curr_pos ) { if( abs(pos-aud->curr_pos) > 1 ) printf("audio%d pad %ld %ld (%ld)\n", aud->idx, pos, aud->curr_pos, pos-aud->curr_pos); index_state->pad_data(ch, nch, aud->curr_pos); } av_frame_unref(frame); int got_frame = 0; int ret = aud->decode_frame(&pkt, frame, got_frame); if( ret <= 0 ) break; if( got_frame && frame->channels == nch ) { float *samples; int len = aud->get_samples(samples, &frame->extended_data[0], frame->nb_samples); for( int i=0; iput_data(ch+i,nch,samples+i,len); aud->curr_pos += len; } pkt.data += ret; pkt.size -= ret; } break; } default: break; } } av_frame_free(&frame); return 0; } void FFStream::load_markers(IndexMarks &marks, double rate) { index_markers = &marks; int in = 0; int64_t sz = marks.size(); int max_entries = fmt_ctx->max_index_size / sizeof(AVIndexEntry) - 1; int nb_ent = st->nb_index_entries; // some formats already have an index if( nb_ent > 0 ) { AVIndexEntry *ep = &st->index_entries[nb_ent-1]; int64_t tstmp = ep->timestamp; if( nudge != AV_NOPTS_VALUE ) tstmp -= nudge; double secs = ffmpeg->to_secs(tstmp, st->time_base); int64_t no = secs * rate; while( in < sz && marks[in].no <= no ) ++in; } int64_t len = sz - in; int64_t count = max_entries - nb_ent; if( count > len ) count = len; for( int i=0; itime_base.den / st->time_base.num; if( nudge != AV_NOPTS_VALUE ) tstmp += nudge; av_add_index_entry(st, pos, tstmp, 0, 0, AVINDEX_KEYFRAME); } }