12 // work arounds (centos)
15 #define INT64_MAX 9223372036854775807LL
17 #define MAX_RETRY 1000
18 // max pts/curr_pos drift allowed before correction (in seconds)
19 #define AUDIO_PTS_TOLERANCE 0.04
22 #include "bccmodels.h"
25 #include "edlsession.h"
27 #include "fileffmpeg.h"
28 #include "filesystem.h"
30 #include "indexfile.h"
31 #include "interlacemodes.h"
36 #include "mainerror.h"
38 #include "preferences.h"
44 #define av_register_all(s)
45 #define avfilter_register_all(s)
48 #define VIDEO_INBUF_SIZE 0x10000
49 #define AUDIO_INBUF_SIZE 0x10000
50 #define VIDEO_REFILL_THRESH 0
51 #define AUDIO_REFILL_THRESH 0x1000
52 #define AUDIO_MIN_FRAME_SZ 128
54 #define FF_ESTM_TIMES 0x0001
55 #define FF_BAD_TIMES 0x0002
57 Mutex FFMPEG::fflock("FFMPEG::fflock");
59 static void ff_err(int ret, const char *fmt, ...)
64 vsnprintf(msg, sizeof(msg), fmt, ap);
66 char errmsg[BCSTRLEN];
67 av_strerror(ret, errmsg, sizeof(errmsg));
68 fprintf(stderr,_("%s err: %s\n"),msg, errmsg);
74 pkt.data = 0; pkt.size = 0;
76 void FFPacket::finit()
78 av_packet_unref(&pkt);
81 FFrame::FFrame(FFStream *fst)
84 frm = av_frame_alloc();
85 init = fst->init_frame(frm);
93 void FFrame::queue(int64_t pos)
99 void FFrame::dequeue()
104 void FFrame::set_hw_frame(AVFrame *frame)
110 int FFAudioStream::read(float *fp, long len)
118 while( --k >= 0 ) *fp++ = *op++;
119 if( op >= lmt ) op = bfr;
124 void FFAudioStream::realloc(long nsz, int nch, long len)
126 long bsz = nsz * nch;
127 float *np = new float[bsz];
128 inp = np + read(np, len) * nch;
133 delete [] bfr; bfr = np;
136 void FFAudioStream::realloc(long nsz, int nch)
138 if( nsz > sz || this->nch != nch ) {
139 long len = this->nch != nch ? 0 : hpos;
140 if( len > sz ) len = sz;
142 realloc(nsz, nch, len);
146 void FFAudioStream::reserve(long nsz, int nch)
148 long len = (inp - outp) / nch;
150 if( nsz > sz || this->nch != nch ) {
151 if( this->nch != nch ) len = 0;
152 realloc(nsz, nch, len);
155 if( (len*=nch) > 0 && bfr != outp )
156 memmove(bfr, outp, len*sizeof(*bfr));
161 long FFAudioStream::used()
163 long len = inp>=outp ? inp-outp : inp-bfr + lmt-outp;
166 long FFAudioStream::avail()
169 if( in1 >= lmt ) in1 = bfr;
170 long len = outp >= in1 ? outp-in1 : outp-bfr + lmt-in1;
173 void FFAudioStream::reset_history()
177 memset(bfr, 0, lmt-bfr);
180 void FFAudioStream::iseek(int64_t ofs)
182 if( ofs > hpos ) ofs = hpos;
183 if( ofs > sz ) ofs = sz;
184 outp = inp - ofs*nch;
185 if( outp < bfr ) outp += sz*nch;
188 float *FFAudioStream::get_outp(int ofs)
195 int64_t FFAudioStream::put_inp(int ofs)
198 return (inp-outp) / nch;
201 int FFAudioStream::write(const float *fp, long len)
209 while( --k >= 0 ) *ip++ = *fp++;
210 if( ip >= lmt ) ip = bfr;
217 int FFAudioStream::zero(long len)
225 while( --k >= 0 ) *ip++ = 0;
226 if( ip >= lmt ) ip = bfr;
233 // does not advance outp
234 int FFAudioStream::read(double *dp, long len, int ch)
237 float *op = outp + ch;
238 float *lmt1 = lmt + nch-1;
240 int k = (lmt1 - op) / nch;
243 while( --k >= 0 ) { *dp++ = *op; op += nch; }
244 if( op >= lmt ) op -= sz*nch;
249 // load linear buffer, no wrapping allowed, does not advance inp
250 int FFAudioStream::write(const double *dp, long len, int ch)
253 float *ip = inp + ch;
254 while( --n >= 0 ) { *ip = *dp++; ip += nch; }
259 FFStream::FFStream(FFMPEG *ffmpeg, AVStream *st, int fidx)
261 this->ffmpeg = ffmpeg;
264 frm_lock = new Mutex("FFStream::frm_lock");
273 nudge = AV_NOPTS_VALUE;
274 seek_pos = curr_pos = 0;
275 seeking = 0; seeked = 1;
277 reading = writing = 0;
278 hw_pixfmt = AV_PIX_FMT_NONE;
291 FFStream::~FFStream()
293 frm_lock->lock("FFStream::~FFStream");
294 if( reading > 0 || writing > 0 ) avcodec_close(avctx);
295 if( avctx ) avcodec_free_context(&avctx);
296 if( fmt_ctx ) avformat_close_input(&fmt_ctx);
297 if( hw_device_ctx ) av_buffer_unref(&hw_device_ctx);
298 if( bsfc ) av_bsf_free(&bsfc);
299 while( frms.first ) frms.remove(frms.first);
300 if( filter_graph ) avfilter_graph_free(&filter_graph);
301 if( frame ) av_frame_free(&frame);
302 if( fframe ) av_frame_free(&fframe);
303 if( probe_frame ) av_frame_free(&probe_frame);
306 if( stats_fp ) fclose(stats_fp);
307 if( stats_in ) av_freep(&stats_in);
308 delete [] stats_filename;
311 void FFStream::ff_lock(const char *cp)
313 FFMPEG::fflock.lock(cp);
316 void FFStream::ff_unlock()
318 FFMPEG::fflock.unlock();
321 void FFStream::queue(FFrame *frm)
323 frm_lock->lock("FFStream::queue");
327 ffmpeg->mux_lock->unlock();
330 void FFStream::dequeue(FFrame *frm)
332 frm_lock->lock("FFStream::dequeue");
334 frms.remove_pointer(frm);
338 int FFStream::encode_activate()
341 writing = ffmpeg->encode_activate();
345 // this is a global parameter that really should be in the context
346 static AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE; // protected by ff_lock
348 // goofy maneuver to attach a hw_format to an av_context
349 #define GET_HW_PIXFMT(fn, fmt) \
350 static AVPixelFormat get_hw_##fn(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts) { \
353 GET_HW_PIXFMT(vaapi, AV_PIX_FMT_VAAPI)
354 GET_HW_PIXFMT(vdpau, AV_PIX_FMT_VDPAU)
355 GET_HW_PIXFMT(cuda, AV_PIX_FMT_CUDA)
356 GET_HW_PIXFMT(nv12, AV_PIX_FMT_NV12)
358 static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
359 const enum AVPixelFormat *pix_fmts)
361 for( const enum AVPixelFormat *p=pix_fmts; *p!=AV_PIX_FMT_NONE; ++p ) {
362 if( *p != hw_pix_fmt ) continue;
364 case AV_PIX_FMT_VAAPI: ctx->get_format = get_hw_vaapi; return *p;
365 case AV_PIX_FMT_VDPAU: ctx->get_format = get_hw_vdpau; return *p;
366 case AV_PIX_FMT_CUDA: ctx->get_format = get_hw_cuda; return *p;
367 case AV_PIX_FMT_NV12: ctx->get_format = get_hw_nv12; return *p;
369 fprintf(stderr, "Unknown HW surface format: %s\n",
370 av_get_pix_fmt_name(*p));
374 fprintf(stderr, "Failed to get HW surface format.\n");
375 return hw_pix_fmt = AV_PIX_FMT_NONE;
379 AVHWDeviceType FFStream::decode_hw_activate()
381 return AV_HWDEVICE_TYPE_NONE;
383 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
384 int FFStream::decode_hw_format(const AVCodec *decoder, AVHWDeviceType type)
386 int FFStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
392 int FFStream::decode_activate()
394 if( reading < 0 && (reading=ffmpeg->decode_activate()) > 0 ) {
395 ff_lock("FFStream::decode_activate");
397 AVDictionary *copts = 0;
398 av_dict_copy(&copts, ffmpeg->opts, 0);
400 AVHWDeviceType hw_type = decode_hw_activate();
402 // this should be avformat_copy_context(), but no copy avail
403 ret = avformat_open_input(&fmt_ctx,
404 ffmpeg->fmt_ctx->url, ffmpeg->fmt_ctx->iformat, &copts);
406 ret = avformat_find_stream_info(fmt_ctx, 0);
407 st = fmt_ctx->streams[fidx];
410 while( ret >= 0 && st != 0 && !reading ) {
411 AVCodecID codec_id = st->codecpar->codec_id;
412 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
413 const AVCodec *decoder = 0;
415 AVCodec *decoder = 0;
418 if( ffmpeg->opt_video_decoder )
419 decoder = avcodec_find_decoder_by_name(ffmpeg->opt_video_decoder);
421 ffmpeg->video_codec_remaps.update(codec_id, decoder);
423 else if( is_audio() ) {
424 if( ffmpeg->opt_audio_decoder )
425 decoder = avcodec_find_decoder_by_name(ffmpeg->opt_audio_decoder);
427 ffmpeg->audio_codec_remaps.update(codec_id, decoder);
430 decoder = avcodec_find_decoder(codec_id);
431 avctx = avcodec_alloc_context3(decoder);
433 eprintf(_("cant allocate codec context\n"));
434 ret = AVERROR(ENOMEM);
436 if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
437 ret = decode_hw_format(decoder, hw_type);
440 avcodec_parameters_to_context(avctx, st->codecpar);
441 if( !av_dict_get(copts, "threads", NULL, 0) )
442 avctx->thread_count = ffmpeg->ff_cpus();
443 ret = avcodec_open2(avctx, decoder, &copts);
445 AVFrame *hw_frame = 0;
446 if( ret >= 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
447 if( !(hw_frame=av_frame_alloc()) ) {
448 fprintf(stderr, "FFStream::decode_activate: av_frame_alloc failed\n");
449 ret = AVERROR(ENOMEM);
452 ret = decode(hw_frame);
454 if( ret < 0 && hw_type != AV_HWDEVICE_TYPE_NONE ) {
455 ff_err(ret, "HW device init failed, using SW decode.\nfile:%s\n",
456 ffmpeg->fmt_ctx->url);
457 avcodec_close(avctx);
458 avcodec_free_context(&avctx);
459 av_buffer_unref(&hw_device_ctx);
461 av_frame_free(&hw_frame);
462 hw_type = AV_HWDEVICE_TYPE_NONE;
463 int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
465 av_seek_frame(fmt_ctx, idx, 0, flags);
466 need_packet = 1; flushed = 0;
467 seeked = 1; st_eof(0);
471 probe_frame = hw_frame;
475 eprintf(_("open decoder failed\n"));
478 eprintf(_("can't open input file: %s\n"), ffmpeg->fmt_ctx->url);
479 av_dict_free(&copts);
485 int FFStream::read_packet()
487 av_packet_unref(ipkt);
488 int ret = av_read_frame(fmt_ctx, ipkt);
491 if( ret == AVERROR_EOF ) return 0;
492 ff_err(ret, "FFStream::read_packet: av_read_frame failed\n");
499 int FFStream::decode(AVFrame *frame)
501 if( probe_frame ) { // hw probe reads first frame
502 av_frame_ref(frame, probe_frame);
503 av_frame_free(&probe_frame);
507 int retries = MAX_RETRY;
508 frm_lock->lock("FFStream::decode");
509 while( ret >= 0 && !flushed && --retries >= 0 ) {
511 if( (ret=read_packet()) < 0 ) break;
512 AVPacket *pkt = ret > 0 ? (AVPacket*)ipkt : 0;
514 if( pkt->stream_index != st->index ) continue;
515 if( !pkt->data || !pkt->size ) continue;
517 if( (ret=avcodec_send_packet(avctx, pkt)) < 0 ) {
518 ff_err(ret, "FFStream::decode: avcodec_send_packet failed.\nfile:%s\n",
519 ffmpeg->fmt_ctx->url);
525 if( (ret=decode_frame(frame)) > 0 ) break;
534 fprintf(stderr, "FFStream::decode: Retry limit\n");
538 fprintf(stderr, "FFStream::decode: failed\n");
542 int FFStream::load_filter(AVFrame *frame)
544 int ret = av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0);
546 eprintf(_("av_buffersrc_add_frame_flags failed\n"));
550 int FFStream::read_filter(AVFrame *frame)
552 int ret = av_buffersink_get_frame(buffersink_ctx, frame);
554 if( ret == AVERROR(EAGAIN) ) return 0;
555 if( ret == AVERROR_EOF ) { st_eof(1); return -1; }
556 ff_err(ret, "FFStream::read_filter: av_buffersink_get_frame failed\n");
562 int FFStream::read_frame(AVFrame *frame)
564 av_frame_unref(frame);
565 if( !filter_graph || !buffersrc_ctx || !buffersink_ctx )
566 return decode(frame);
567 if( !fframe && !(fframe=av_frame_alloc()) ) {
568 fprintf(stderr, "FFStream::read_frame: av_frame_alloc failed\n");
572 while( !flushed && !(ret=read_filter(frame)) ) {
573 if( (ret=decode(fframe)) < 0 ) break;
574 if( ret > 0 && (ret=load_filter(fframe)) < 0 ) break;
579 int FFStream::write_packet(FFPacket &pkt)
583 av_packet_rescale_ts(pkt, avctx->time_base, st->time_base);
584 pkt->stream_index = st->index;
585 ret = av_interleaved_write_frame(ffmpeg->fmt_ctx, pkt);
588 bsfc->time_base_in = st->time_base;
589 avcodec_parameters_copy(bsfc->par_in, st->codecpar);
592 ret = av_bsf_send_packet(bsfc, pkt);
595 if( (ret=av_bsf_receive_packet(bsfc, bs)) < 0 ) {
596 if( ret == AVERROR(EAGAIN) ) return 0;
597 if( ret == AVERROR_EOF ) return -1;
600 //printf(" filter name %s \n", bsfc->filter[0].name);
601 //avcodec_parameters_copy(ffmpeg->fmt_ctx->streams[0]->codecpar, bsfc->par_out);
602 //avcodec_parameters_copy(st->codecpar, bsfc->par_out);
603 av_packet_rescale_ts(bs, avctx->time_base, st->time_base);
604 bs->stream_index = st->index;
605 ret = av_interleaved_write_frame(ffmpeg->fmt_ctx, bs);
609 ff_err(ret, "FFStream::write_packet: write packet failed.\nfile:%s\n",
610 ffmpeg->fmt_ctx->url);
614 int FFStream::encode_frame(AVFrame *frame)
616 int pkts = 0, ret = 0;
617 for( int retry=MAX_RETRY; --retry>=0; ) {
619 ret = avcodec_send_frame(avctx, frame);
620 if( !ret && frame ) return pkts;
621 if( ret < 0 && ret != AVERROR(EAGAIN) ) break;
622 if ( ret == AVERROR(EAGAIN) && !frame ) continue;
624 ret = avcodec_receive_packet(avctx, opkt);
625 if( !frame && ret == AVERROR_EOF ) return pkts;
627 ret = write_packet(opkt);
630 if( frame && stats_fp ) {
631 ret = write_stats_file();
635 ff_err(ret, "FFStream::encode_frame: encode failed.\nfile: %s\n",
636 ffmpeg->fmt_ctx->url);
640 int FFStream::flush()
644 int ret = encode_frame(0);
645 if( ret >= 0 && stats_fp ) {
646 ret = write_stats_file();
650 ff_err(ret, "FFStream::flush failed\n:file:%s\n",
651 ffmpeg->fmt_ctx->url);
652 return ret >= 0 ? 0 : 1;
656 int FFStream::open_stats_file()
658 stats_fp = fopen(stats_filename,"w");
659 return stats_fp ? 0 : AVERROR(errno);
662 int FFStream::close_stats_file()
665 fclose(stats_fp); stats_fp = 0;
670 int FFStream::read_stats_file()
672 int64_t len = 0; struct stat stats_st;
673 int fd = open(stats_filename, O_RDONLY);
674 int ret = fd >= 0 ? 0: ENOENT;
675 if( !ret && fstat(fd, &stats_st) )
678 len = stats_st.st_size;
679 stats_in = (char *)av_malloc(len+1);
683 if( !ret && read(fd, stats_in, len+1) != len )
687 avctx->stats_in = stats_in;
691 return !ret ? 0 : AVERROR(ret);
694 int FFStream::write_stats_file()
697 if( avctx->stats_out && (ret=strlen(avctx->stats_out)) > 0 ) {
698 int len = fwrite(avctx->stats_out, 1, ret, stats_fp);
700 ff_err(ret = AVERROR(errno), "FFStream::write_stats_file.\n%file:%s\n",
701 ffmpeg->fmt_ctx->url);
706 int FFStream::init_stats_file()
709 if( (pass & 2) && (ret = read_stats_file()) < 0 )
710 ff_err(ret, "stat file read: %s", stats_filename);
711 if( (pass & 1) && (ret=open_stats_file()) < 0 )
712 ff_err(ret, "stat file open: %s", stats_filename);
713 return ret >= 0 ? 0 : ret;
716 int FFStream::seek(int64_t no, double rate)
718 // default ffmpeg native seek
720 int64_t pos = no, pkt_pos = -1;
721 IndexMarks *index_markers = get_markers();
722 if( index_markers && index_markers->size() > 1 ) {
723 IndexMarks &marks = *index_markers;
724 int i = marks.find(pos);
725 int64_t n = i < 0 ? (i=0) : marks[i].no;
726 // if indexed seek point not too far away (<30 secs), use index
727 if( no-n < 30*rate ) {
730 if( i < marks.size() ) pkt_pos = marks[i].pos;
734 if( pos == curr_pos ) return 0;
736 double secs = pos < 0 ? 0. : pos / rate;
737 AVRational time_base = st->time_base;
738 int64_t tstmp = time_base.num > 0 ? secs * time_base.den/time_base.num : 0;
739 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
740 int nb_index_entries = avformat_index_get_entries_count(st);
743 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
744 if( nb_index_entries > 0 ) tstmp = (avformat_index_get_entry(st, 0))->timestamp;
746 if( st->nb_index_entries > 0 ) tstmp = st->index_entries[0].timestamp;
748 else if( st->start_time != AV_NOPTS_VALUE ) tstmp = st->start_time;
749 #if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(58,134,100)
750 else if( st->first_dts != AV_NOPTS_VALUE ) tstmp = st->first_dts;
752 else tstmp = INT64_MIN+1;
754 else if( nudge != AV_NOPTS_VALUE ) tstmp += nudge;
757 // seek all streams using the default timebase.
758 // this is how ffmpeg and ffplay work. stream seeks are less tested.
759 tstmp = av_rescale_q(tstmp, time_base, AV_TIME_BASE_Q);
762 frm_lock->lock("FFStream::seek");
763 av_frame_free(&probe_frame);
764 avcodec_flush_buffers(avctx);
765 avformat_flush(fmt_ctx);
767 int64_t seek = tstmp;
768 int flags = AVSEEK_FLAG_ANY;
769 if( !(fmt_ctx->iformat->flags & AVFMT_NO_BYTE_SEEK) && pkt_pos >= 0 ) {
771 flags = AVSEEK_FLAG_BYTE;
773 int ret = avformat_seek_file(fmt_ctx, st->index, -INT64_MAX, seek, INT64_MAX, flags);
775 // finds the first index frame below the target time
776 int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
777 int ret = av_seek_frame(fmt_ctx, idx, tstmp, flags);
779 int retry = MAX_RETRY;
781 need_packet = 0; flushed = 0;
782 seeked = 1; st_eof(0);
783 // read up to retry packets, limited to npkts in stream, and not pkt.pos past pkt_pos
784 while( --retry >= 0 ) {
785 if( read_packet() <= 0 ) { ret = -1; break; }
786 if( ipkt->stream_index != st->index ) continue;
787 if( !ipkt->data || !ipkt->size ) continue;
788 if( pkt_pos >= 0 && ipkt->pos >= pkt_pos ) break;
789 if( --npkts <= 0 ) break;
790 int64_t pkt_ts = ipkt->dts != AV_NOPTS_VALUE ? ipkt->dts : ipkt->pts;
791 if( pkt_ts == AV_NOPTS_VALUE ) continue;
792 if( pkt_ts >= tstmp ) break;
795 ff_err(AVERROR(EIO), "FFStream::seek: %s\n"
796 " retry limit, pos=%jd tstmp=%jd, ",
797 ffmpeg->fmt_ctx->url, pos, tstmp);
801 ret = avcodec_send_packet(avctx, ipkt);
803 //some codecs need more than one pkt to resync
804 if( ret == AVERROR_INVALIDDATA ) ret = 0;
806 ff_err(ret, "FFStream::avcodec_send_packet failed.\nseek:%s\n",
807 ffmpeg->fmt_ctx->url);
813 printf("** seek fail %jd, %jd\n", pos, tstmp);
814 seeked = need_packet = 0;
818 //printf("seeked pos = %ld, %ld\n", pos, tstmp);
819 seek_pos = curr_pos = pos;
823 FFAudioStream::FFAudioStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx)
824 : FFStream(ffmpeg, strm, fidx)
827 channel0 = channels = 0;
830 frame_sz = AUDIO_MIN_FRAME_SZ;
832 resample_context = 0;
833 swr_ichs = swr_ifmt = swr_irate = 0;
842 bfr = new float[bsz];
847 FFAudioStream::~FFAudioStream()
849 if( resample_context ) swr_free(&resample_context);
854 void FFAudioStream::init_swr(int ichs, int ifmt, int irate)
856 if( resample_context ) {
857 if( swr_ichs == ichs && swr_ifmt == ifmt && swr_irate == irate )
859 swr_free(&resample_context);
861 swr_ichs = ichs; swr_ifmt = ifmt; swr_irate = irate;
862 if( ichs == channels && ifmt == AV_SAMPLE_FMT_FLT && irate == sample_rate )
864 uint64_t ilayout = av_get_default_channel_layout(ichs);
865 if( !ilayout ) ilayout = ((uint64_t)1<<ichs) - 1;
866 uint64_t olayout = av_get_default_channel_layout(channels);
867 if( !olayout ) olayout = ((uint64_t)1<<channels) - 1;
868 resample_context = swr_alloc_set_opts(NULL,
869 olayout, AV_SAMPLE_FMT_FLT, sample_rate,
870 ilayout, (AVSampleFormat)ifmt, irate,
872 if( resample_context )
873 swr_init(resample_context);
876 int FFAudioStream::get_samples(float *&samples, uint8_t **data, int len)
878 samples = *(float **)data;
879 if( resample_context ) {
880 if( len > aud_bfr_sz ) {
886 aud_bfr = new float[aud_bfr_sz*channels];
888 int ret = swr_convert(resample_context,
889 (uint8_t**)&aud_bfr, aud_bfr_sz, (const uint8_t**)data, len);
891 ff_err(ret, "FFAudioStream::get_samples: swr_convert failed\n");
900 int FFAudioStream::load_history(uint8_t **data, int len)
903 len = get_samples(samples, data, len);
905 // biggest user bfr since seek + frame
906 realloc(mbsz + len + 1, channels);
912 int FFAudioStream::decode_frame(AVFrame *frame)
914 int first_frame = seeked; seeked = 0;
915 frame->best_effort_timestamp = AV_NOPTS_VALUE;
916 int ret = avcodec_receive_frame(avctx, frame);
918 if( first_frame ) return 0;
919 if( ret == AVERROR(EAGAIN) ) return 0;
920 if( ret == AVERROR_EOF ) { st_eof(1); return 0; }
921 ff_err(ret, "FFAudioStream::decode_frame: Could not read audio frame.\nfile:%s\n",
922 ffmpeg->fmt_ctx->url);
925 int64_t pkt_ts = frame->best_effort_timestamp;
926 if( pkt_ts != AV_NOPTS_VALUE ) {
927 double ts = ffmpeg->to_secs(pkt_ts - nudge, st->time_base);
928 double t = (double)curr_pos / sample_rate;
929 // some time_base clocks are very grainy, too grainy for audio (clicks, pops)
930 if( fabs(ts - t) > AUDIO_PTS_TOLERANCE )
931 curr_pos = ts * sample_rate + 0.5;
936 int FFAudioStream::encode_activate()
938 if( writing >= 0 ) return writing;
939 if( !avctx->codec ) return writing = 0;
940 frame_sz = avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE ?
941 10000 : avctx->frame_size;
942 return FFStream::encode_activate();
945 int64_t FFAudioStream::load_buffer(double ** const sp, int len)
947 reserve(len+1, st->codecpar->channels);
948 for( int ch=0; ch<nch; ++ch )
949 write(sp[ch], len, ch);
953 int FFAudioStream::in_history(int64_t pos)
955 if( pos > curr_pos ) return 0;
957 if( len > sz ) len = sz;
958 if( pos < curr_pos - len ) return 0;
963 int FFAudioStream::init_frame(AVFrame *frame)
965 frame->nb_samples = frame_sz;
966 frame->format = avctx->sample_fmt;
967 frame->channel_layout = avctx->channel_layout;
968 frame->sample_rate = avctx->sample_rate;
969 int ret = av_frame_get_buffer(frame, 0);
971 ff_err(ret, "FFAudioStream::init_frame: av_frame_get_buffer failed\n");
975 int FFAudioStream::load(int64_t pos, int len)
977 if( audio_seek(pos) < 0 ) return -1;
978 if( !frame && !(frame=av_frame_alloc()) ) {
979 fprintf(stderr, "FFAudioStream::load: av_frame_alloc failed\n");
982 if( mbsz < len ) mbsz = len;
983 int64_t end_pos = pos + len;
984 int ret = 0, i = len / frame_sz + MAX_RETRY;
985 while( ret>=0 && !flushed && curr_pos<end_pos && --i>=0 ) {
986 ret = read_frame(frame);
987 if( ret > 0 && frame->nb_samples > 0 ) {
988 init_swr(frame->channels, frame->format, frame->sample_rate);
989 load_history(&frame->extended_data[0], frame->nb_samples);
990 curr_pos += frame->nb_samples;
993 if( end_pos > curr_pos ) {
994 zero(end_pos - curr_pos);
997 len = curr_pos - pos;
1002 int FFAudioStream::audio_seek(int64_t pos)
1004 if( decode_activate() <= 0 ) return -1;
1005 if( !st->codecpar ) return -1;
1006 if( in_history(pos) ) return 0;
1007 if( pos == curr_pos ) return 0;
1008 reset_history(); mbsz = 0;
1009 // guarentee preload > 1sec samples
1010 if( (pos-=sample_rate) < 0 ) pos = 0;
1011 if( seek(pos, sample_rate) < 0 ) return -1;
1015 int FFAudioStream::encode(double **samples, int len)
1017 if( encode_activate() <= 0 ) return -1;
1020 int64_t count = samples ? load_buffer(samples, len) : used();
1021 int frame_sz1 = samples ? frame_sz-1 : 0;
1024 while( ret >= 0 && count > frame_sz1 ) {
1025 frm = new FFrame(this);
1026 if( (ret=frm->initted()) < 0 ) break;
1027 AVFrame *frame = *frm;
1028 len = count >= frame_sz ? frame_sz : count;
1029 float *bfrp = get_outp(len);
1030 ret = swr_convert(resample_context,
1031 (uint8_t **)frame->extended_data, len,
1032 (const uint8_t **)&bfrp, len);
1034 ff_err(ret, "FFAudioStream::encode: swr_convert failed\n");
1037 frame->nb_samples = len;
1038 frm->queue(curr_pos);
1045 return ret >= 0 ? 0 : 1;
1048 int FFAudioStream::drain()
1053 int FFAudioStream::encode_frame(AVFrame *frame)
1055 return FFStream::encode_frame(frame);
1058 int FFAudioStream::write_packet(FFPacket &pkt)
1060 return FFStream::write_packet(pkt);
1063 void FFAudioStream::load_markers()
1065 IndexState *index_state = ffmpeg->file_base->asset->index_state;
1066 if( !index_state || idx >= index_state->audio_markers.size() ) return;
1067 if( index_state->marker_status == MARKERS_NOTTESTED ) return;
1068 FFStream::load_markers(*index_state->audio_markers[idx], sample_rate);
1071 IndexMarks *FFAudioStream::get_markers()
1073 IndexState *index_state = ffmpeg->file_base->asset->index_state;
1074 if( !index_state || idx >= index_state->audio_markers.size() ) return 0;
1075 return index_state->audio_markers[idx];
1078 FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx)
1079 : FFStream(ffmpeg, strm, fidx),
1080 FFVideoConvert(ffmpeg->ff_prefs())
1089 top_field_first = 0;
1095 FFVideoStream::~FFVideoStream()
1097 if( fconvert_ctx ) sws_freeContext(fconvert_ctx);
1100 AVHWDeviceType FFVideoStream::decode_hw_activate()
1102 AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
1103 const char *hw_dev = ffmpeg->opt_hw_dev;
1104 if( !hw_dev ) hw_dev = getenv("CIN_HW_DEV");
1105 if( !hw_dev ) hw_dev = ffmpeg->ff_hw_dev();
1106 if( hw_dev && *hw_dev &&
1107 strcmp("none", hw_dev) && strcmp(_("none"), hw_dev) ) {
1108 type = av_hwdevice_find_type_by_name(hw_dev);
1109 if( type == AV_HWDEVICE_TYPE_NONE ) {
1110 fprintf(stderr, "Device type %s is not supported.\n", hw_dev);
1111 fprintf(stderr, "Available device types:");
1112 while( (type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE )
1113 fprintf(stderr, " %s", av_hwdevice_get_type_name(type));
1114 fprintf(stderr, "\n");
1119 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
1120 int FFVideoStream::decode_hw_format(const AVCodec *decoder, AVHWDeviceType type)
1122 int FFVideoStream::decode_hw_format(AVCodec *decoder, AVHWDeviceType type)
1126 hw_pix_fmt = AV_PIX_FMT_NONE;
1127 for( int i=0; ; ++i ) {
1128 const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
1130 fprintf(stderr, "Decoder %s does not support device type %s.\n",
1131 decoder->name, av_hwdevice_get_type_name(type));
1135 if( (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) != 0 &&
1136 config->device_type == type ) {
1137 hw_pix_fmt = config->pix_fmt;
1141 if( hw_pix_fmt >= 0 ) {
1142 hw_pixfmt = hw_pix_fmt;
1143 avctx->get_format = get_hw_format;
1144 ret = av_hwdevice_ctx_create(&hw_device_ctx, type, 0, 0, 0);
1146 avctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
1150 ff_err(ret, "Failed HW device create.\ndev:%s\n",
1151 av_hwdevice_get_type_name(type));
1158 AVHWDeviceType FFVideoStream::encode_hw_activate(const char *hw_dev)
1160 AVBufferRef *hw_device_ctx = 0;
1161 AVBufferRef *hw_frames_ref = 0;
1162 AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
1163 if( strcmp(_("none"), hw_dev) ) {
1164 type = av_hwdevice_find_type_by_name(hw_dev);
1165 if( type != AV_HWDEVICE_TYPE_VAAPI ) {
1166 fprintf(stderr, "currently, only vaapi hw encode is supported\n");
1167 type = AV_HWDEVICE_TYPE_NONE;
1170 if( type != AV_HWDEVICE_TYPE_NONE ) {
1171 int ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, 0, 0, 0);
1173 ff_err(ret, "Failed to create a HW device.\n");
1174 type = AV_HWDEVICE_TYPE_NONE;
1177 if( type != AV_HWDEVICE_TYPE_NONE ) {
1178 hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx);
1179 if( !hw_frames_ref ) {
1180 fprintf(stderr, "Failed to create HW frame context.\n");
1181 type = AV_HWDEVICE_TYPE_NONE;
1184 if( type != AV_HWDEVICE_TYPE_NONE ) {
1185 AVHWFramesContext *frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
1186 frames_ctx->format = AV_PIX_FMT_VAAPI;
1187 frames_ctx->sw_format = AV_PIX_FMT_NV12;
1188 frames_ctx->width = width;
1189 frames_ctx->height = height;
1190 frames_ctx->initial_pool_size = 0; // 200;
1191 int ret = av_hwframe_ctx_init(hw_frames_ref);
1193 avctx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
1194 if( !avctx->hw_frames_ctx ) ret = AVERROR(ENOMEM);
1197 ff_err(ret, "Failed to initialize HW frame context.\n");
1198 type = AV_HWDEVICE_TYPE_NONE;
1200 av_buffer_unref(&hw_frames_ref);
1205 int FFVideoStream::encode_hw_write(FFrame *picture)
1208 AVFrame *hw_frm = 0;
1209 switch( avctx->pix_fmt ) {
1210 case AV_PIX_FMT_VAAPI:
1211 hw_frm = av_frame_alloc();
1212 if( !hw_frm ) { ret = AVERROR(ENOMEM); break; }
1213 ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, hw_frm, 0);
1214 if( ret < 0 ) break;
1215 ret = av_hwframe_transfer_data(hw_frm, *picture, 0);
1216 if( ret < 0 ) break;
1217 picture->set_hw_frame(hw_frm);
1222 av_frame_free(&hw_frm);
1223 ff_err(ret, "Error while transferring frame data to GPU.\n");
1227 int FFVideoStream::decode_frame(AVFrame *frame)
1229 int first_frame = seeked; seeked = 0;
1230 int ret = avcodec_receive_frame(avctx, frame);
1232 if( first_frame ) return 0;
1233 if( ret == AVERROR(EAGAIN) ) return 0;
1234 if( ret == AVERROR_EOF ) { st_eof(1); return 0; }
1235 ff_err(ret, "FFVideoStream::decode_frame: Could not read video frame.\nfile:%s\n,",
1236 ffmpeg->fmt_ctx->url);
1239 int64_t pkt_ts = frame->best_effort_timestamp;
1240 if( pkt_ts != AV_NOPTS_VALUE )
1241 curr_pos = ffmpeg->to_secs(pkt_ts - nudge, st->time_base) * frame_rate + 0.5;
1245 int FFVideoStream::probe(int64_t pos)
1247 int ret = video_seek(pos);
1248 if( ret < 0 ) return -1;
1249 if( !frame && !(frame=av_frame_alloc()) ) {
1250 fprintf(stderr, "FFVideoStream::probe: av_frame_alloc failed\n");
1254 if (ffmpeg->interlace_from_codec) return 1;
1256 ret = read_frame(frame);
1258 //printf("codec interlace: %i \n",frame->interlaced_frame);
1259 //printf("codec tff: %i \n",frame->top_field_first);
1261 if (!frame->interlaced_frame)
1262 ffmpeg->interlace_from_codec = AV_FIELD_PROGRESSIVE;
1263 if ((frame->interlaced_frame) && (frame->top_field_first))
1264 ffmpeg->interlace_from_codec = AV_FIELD_TT;
1265 if ((frame->interlaced_frame) && (!frame->top_field_first))
1266 ffmpeg->interlace_from_codec = AV_FIELD_BB;
1267 //printf("Interlace mode from codec: %i\n", ffmpeg->interlace_from_codec);
1271 if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 )
1274 ret = ret > 0 ? 1 : ret < 0 ? -1 : 0;
1275 av_frame_free(&frame);
1279 int FFVideoStream::load(VFrame *vframe, int64_t pos)
1281 int ret = video_seek(pos);
1282 if( ret < 0 ) return -1;
1283 if( !frame && !(frame=av_frame_alloc()) ) {
1284 fprintf(stderr, "FFVideoStream::load: av_frame_alloc failed\n");
1289 int i = MAX_RETRY + pos - curr_pos;
1290 int64_t cache_start = 0;
1291 while( ret>=0 && !flushed && curr_pos<=pos && --i>=0 ) {
1292 ret = read_frame(frame);
1294 if( frame->key_frame && seeking < 0 ) {
1295 int use_cache = ffmpeg->get_use_cache();
1296 if( use_cache < 0 ) {
1297 // for reverse read, reload file frame_cache from keyframe to pos
1298 ffmpeg->purge_cache();
1299 int count = preferences->cache_size /
1300 vframe->get_data_size() / 2; // try to burn only 1/2 of cache
1301 cache_start = pos - count + 1;
1307 if( seeking > 0 && curr_pos >= cache_start && curr_pos < pos ) {
1308 int vw =vframe->get_w(), vh = vframe->get_h();
1309 int vcolor_model = vframe->get_color_model();
1310 // do not use shm here, puts too much pressure on 32bit systems
1311 VFrame *cache_frame = new VFrame(vw, vh, vcolor_model, 0);
1312 ret = convert_cmodel(cache_frame, frame);
1314 ffmpeg->put_cache_frame(cache_frame, curr_pos);
1320 if( frame->format == AV_PIX_FMT_NONE || frame->width <= 0 || frame->height <= 0 )
1323 ret = convert_cmodel(vframe, frame);
1325 ret = ret > 0 ? 1 : ret < 0 ? -1 : 0;
1329 int FFVideoStream::video_seek(int64_t pos)
1331 if( decode_activate() <= 0 ) return -1;
1332 if( !st->codecpar ) return -1;
1333 if( pos == curr_pos-1 && !seeked ) return 0;
1334 // if close enough, just read up to current
1335 int gop = avctx->gop_size;
1336 if( gop < 4 ) gop = 4;
1337 if( gop > 64 ) gop = 64;
1338 int read_limit = curr_pos + 3*gop;
1339 if( pos >= curr_pos && pos <= read_limit ) return 0;
1340 // guarentee preload more than 2*gop frames
1341 if( seek(pos - 3*gop, frame_rate) < 0 ) return -1;
1345 int FFVideoStream::init_frame(AVFrame *picture)
1347 switch( avctx->pix_fmt ) {
1348 case AV_PIX_FMT_VAAPI:
1349 picture->format = AV_PIX_FMT_NV12;
1352 picture->format = avctx->pix_fmt;
1355 picture->width = avctx->width;
1356 picture->height = avctx->height;
1357 int ret = av_frame_get_buffer(picture, 32);
1361 int FFVideoStream::convert_hw_frame(AVFrame *ifrm, AVFrame *ofrm)
1363 AVPixelFormat ifmt = (AVPixelFormat)ifrm->format;
1364 AVPixelFormat ofmt = (AVPixelFormat)st->codecpar->format;
1365 ofrm->width = ifrm->width;
1366 ofrm->height = ifrm->height;
1367 ofrm->format = ofmt;
1368 int ret = av_frame_get_buffer(ofrm, 32);
1370 ff_err(ret, "FFVideoStream::convert_hw_frame:"
1371 " av_frame_get_buffer failed\n");
1374 fconvert_ctx = sws_getCachedContext(fconvert_ctx,
1375 ifrm->width, ifrm->height, ifmt,
1376 ofrm->width, ofrm->height, ofmt,
1377 SWS_POINT, NULL, NULL, NULL);
1378 if( !fconvert_ctx ) {
1379 ff_err(AVERROR(EINVAL), "FFVideoStream::convert_hw_frame:"
1380 " sws_getCachedContext() failed\n");
1383 int codec_range = st->codecpar->color_range;
1384 int codec_space = st->codecpar->color_space;
1385 const int *codec_table = sws_getCoefficients(codec_space);
1386 int *inv_table, *table, src_range, dst_range;
1387 int brightness, contrast, saturation;
1388 if( !sws_getColorspaceDetails(fconvert_ctx,
1389 &inv_table, &src_range, &table, &dst_range,
1390 &brightness, &contrast, &saturation) ) {
1391 if( src_range != codec_range || dst_range != codec_range ||
1392 inv_table != codec_table || table != codec_table )
1393 sws_setColorspaceDetails(fconvert_ctx,
1394 codec_table, codec_range, codec_table, codec_range,
1395 brightness, contrast, saturation);
1397 ret = sws_scale(fconvert_ctx,
1398 ifrm->data, ifrm->linesize, 0, ifrm->height,
1399 ofrm->data, ofrm->linesize);
1401 ff_err(ret, "FFVideoStream::convert_hw_frame:"
1402 " sws_scale() failed\nfile: %s\n",
1403 ffmpeg->fmt_ctx->url);
1409 int FFVideoStream::load_filter(AVFrame *frame)
1411 AVPixelFormat pix_fmt = (AVPixelFormat)frame->format;
1412 if( pix_fmt == hw_pixfmt ) {
1413 AVFrame *hw_frame = this->frame;
1414 av_frame_unref(hw_frame);
1415 int ret = av_hwframe_transfer_data(hw_frame, frame, 0);
1417 eprintf(_("Error retrieving data from GPU to CPU\nfile: %s\n"),
1418 ffmpeg->fmt_ctx->url);
1421 av_frame_unref(frame);
1422 ret = convert_hw_frame(hw_frame, frame);
1424 eprintf(_("Error converting data from GPU to CPU\nfile: %s\n"),
1425 ffmpeg->fmt_ctx->url);
1428 av_frame_unref(hw_frame);
1430 return FFStream::load_filter(frame);
1433 int FFVideoStream::encode(VFrame *vframe)
1435 if( encode_activate() <= 0 ) return -1;
1437 FFrame *picture = new FFrame(this);
1438 int ret = picture->initted();
1440 AVFrame *frame = *picture;
1441 frame->pts = curr_pos;
1442 ret = convert_pixfmt(vframe, frame);
1444 if( ret >= 0 && avctx->hw_frames_ctx )
1445 encode_hw_write(picture);
1447 picture->queue(curr_pos);
1451 fprintf(stderr, "FFVideoStream::encode: encode failed\n");
1454 return ret >= 0 ? 0 : 1;
1457 int FFVideoStream::drain()
1463 int FFVideoStream::encode_frame(AVFrame *frame)
1466 frame->interlaced_frame = interlaced;
1467 frame->top_field_first = top_field_first;
1469 if( frame && frame->format == AV_PIX_FMT_VAAPI ) { // ugly
1470 int ret = avcodec_send_frame(avctx, frame);
1471 for( int retry=MAX_RETRY; !ret && --retry>=0; ) {
1472 FFPacket pkt; av_init_packet(pkt);
1473 pkt->data = NULL; pkt->size = 0;
1474 if( (ret=avcodec_receive_packet(avctx, pkt)) < 0 ) {
1475 if( ret == AVERROR(EAGAIN) ) ret = 0; // weird
1478 ret = write_packet(pkt);
1479 pkt->stream_index = 0;
1480 av_packet_unref(pkt);
1483 ff_err(ret, "FFStream::encode_frame: vaapi encode failed.\nfile: %s\n",
1484 ffmpeg->fmt_ctx->url);
1489 return FFStream::encode_frame(frame);
1492 int FFVideoStream::write_packet(FFPacket &pkt)
1494 if( !(ffmpeg->fmt_ctx->oformat->flags & AVFMT_VARIABLE_FPS) )
1496 return FFStream::write_packet(pkt);
1499 AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model)
1501 switch( color_model ) {
1502 case BC_YUV422: return AV_PIX_FMT_YUYV422;
1503 case BC_RGB888: return AV_PIX_FMT_RGB24;
1504 case BC_RGBA8888: return AV_PIX_FMT_RGBA;
1505 case BC_BGR8888: return AV_PIX_FMT_BGR0;
1506 case BC_BGR888: return AV_PIX_FMT_BGR24;
1507 case BC_ARGB8888: return AV_PIX_FMT_ARGB;
1508 case BC_ABGR8888: return AV_PIX_FMT_ABGR;
1509 case BC_RGB8: return AV_PIX_FMT_RGB8;
1510 case BC_YUV420P: return AV_PIX_FMT_YUV420P;
1511 case BC_YUV422P: return AV_PIX_FMT_YUV422P;
1512 case BC_YUV444P: return AV_PIX_FMT_YUV444P;
1513 case BC_YUV411P: return AV_PIX_FMT_YUV411P;
1514 case BC_RGB565: return AV_PIX_FMT_RGB565;
1515 case BC_RGB161616: return AV_PIX_FMT_RGB48LE;
1516 case BC_RGBA16161616: return AV_PIX_FMT_RGBA64LE;
1517 case BC_AYUV16161616: return AV_PIX_FMT_AYUV64LE;
1518 case BC_GBRP: return AV_PIX_FMT_GBRP;
1522 return AV_PIX_FMT_NB;
1525 int FFVideoConvert::pix_fmt_to_color_model(AVPixelFormat pix_fmt)
1528 case AV_PIX_FMT_YUYV422: return BC_YUV422;
1529 case AV_PIX_FMT_RGB24: return BC_RGB888;
1530 case AV_PIX_FMT_RGBA: return BC_RGBA8888;
1531 case AV_PIX_FMT_BGR0: return BC_BGR8888;
1532 case AV_PIX_FMT_BGR24: return BC_BGR888;
1533 case AV_PIX_FMT_ARGB: return BC_ARGB8888;
1534 case AV_PIX_FMT_ABGR: return BC_ABGR8888;
1535 case AV_PIX_FMT_RGB8: return BC_RGB8;
1536 case AV_PIX_FMT_YUV420P: return BC_YUV420P;
1537 case AV_PIX_FMT_YUV422P: return BC_YUV422P;
1538 case AV_PIX_FMT_YUV444P: return BC_YUV444P;
1539 case AV_PIX_FMT_YUV411P: return BC_YUV411P;
1540 case AV_PIX_FMT_RGB565: return BC_RGB565;
1541 case AV_PIX_FMT_RGB48LE: return BC_RGB161616;
1542 case AV_PIX_FMT_RGBA64LE: return BC_RGBA16161616;
1543 case AV_PIX_FMT_AYUV64LE: return BC_AYUV16161616;
1544 case AV_PIX_FMT_GBRP: return BC_GBRP;
1551 int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip)
1553 AVFrame *ipic = av_frame_alloc();
1554 int ret = convert_picture_vframe(frame, ip, ipic);
1555 av_frame_free(&ipic);
1559 int FFVideoConvert::convert_picture_vframe(VFrame *frame, AVFrame *ip, AVFrame *ipic)
1560 { // picture = vframe
1561 int cmodel = frame->get_color_model();
1562 AVPixelFormat ofmt = color_model_to_pix_fmt(cmodel);
1563 if( ofmt == AV_PIX_FMT_NB ) return -1;
1564 int size = av_image_fill_arrays(ipic->data, ipic->linesize,
1565 frame->get_data(), ofmt, frame->get_w(), frame->get_h(), 1);
1566 if( size < 0 ) return -1;
1568 int bpp = BC_CModels::calculate_pixelsize(cmodel);
1569 int ysz = bpp * frame->get_w(), usz = ysz;
1579 // override av_image_fill_arrays() for planar types
1580 ipic->data[0] = frame->get_y(); ipic->linesize[0] = ysz;
1581 ipic->data[1] = frame->get_u(); ipic->linesize[1] = usz;
1582 ipic->data[2] = frame->get_v(); ipic->linesize[2] = usz;
1585 ipic->data[0] = frame->get_data();
1586 ipic->linesize[0] = frame->get_bytes_per_line();
1590 AVPixelFormat pix_fmt = (AVPixelFormat)ip->format;
1591 FFVideoStream *vid =(FFVideoStream *)this;
1592 if( pix_fmt == vid->hw_pixfmt ) {
1594 if( !sw_frame && !(sw_frame=av_frame_alloc()) )
1595 ret = AVERROR(ENOMEM);
1597 ret = av_hwframe_transfer_data(sw_frame, ip, 0);
1599 pix_fmt = (AVPixelFormat)ip->format;
1602 eprintf(_("Error retrieving data from GPU to CPU\nfile: %s\n"),
1603 vid->ffmpeg->fmt_ctx->url);
1607 convert_ctx = sws_getCachedContext(convert_ctx, ip->width, ip->height, pix_fmt,
1608 frame->get_w(), frame->get_h(), ofmt, SWS_POINT, NULL, NULL, NULL);
1609 if( !convert_ctx ) {
1610 fprintf(stderr, "FFVideoConvert::convert_picture_frame:"
1611 " sws_getCachedContext() failed\n");
1615 int color_range = 0;
1616 switch( preferences->yuv_color_range ) {
1617 case BC_COLORS_JPEG: color_range = 1; break;
1618 case BC_COLORS_MPEG: color_range = 0; break;
1620 int color_space = SWS_CS_ITU601;
1621 switch( preferences->yuv_color_space ) {
1622 case BC_COLORS_BT601_PAL: color_space = SWS_CS_ITU601; break;
1623 case BC_COLORS_BT601_NTSC: color_space = SWS_CS_SMPTE170M; break;
1624 case BC_COLORS_BT709: color_space = SWS_CS_ITU709; break;
1625 case BC_COLORS_BT2020_NCL:
1626 case BC_COLORS_BT2020_CL: color_space = SWS_CS_BT2020; break;
1628 const int *color_table = sws_getCoefficients(color_space);
1630 int *inv_table, *table, src_range, dst_range;
1631 int brightness, contrast, saturation;
1632 if( !sws_getColorspaceDetails(convert_ctx,
1633 &inv_table, &src_range, &table, &dst_range,
1634 &brightness, &contrast, &saturation) ) {
1635 if( src_range != color_range || dst_range != color_range ||
1636 inv_table != color_table || table != color_table )
1637 sws_setColorspaceDetails(convert_ctx,
1638 color_table, color_range, color_table, color_range,
1639 brightness, contrast, saturation);
1642 int ret = sws_scale(convert_ctx, ip->data, ip->linesize, 0, ip->height,
1643 ipic->data, ipic->linesize);
1645 ff_err(ret, "FFVideoConvert::convert_picture_frame: sws_scale() failed\nfile: %s\n",
1646 vid->ffmpeg->fmt_ctx->url);
1652 int FFVideoConvert::convert_cmodel(VFrame *frame, AVFrame *ip)
1654 // try direct transfer
1655 if( !convert_picture_vframe(frame, ip) ) return 1;
1656 // use indirect transfer
1657 AVPixelFormat ifmt = (AVPixelFormat)ip->format;
1658 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(ifmt);
1660 for( int i = 0; i <desc->nb_components; ++i ) {
1661 int bits = desc->comp[i].depth;
1662 if( bits > max_bits ) max_bits = bits;
1664 int imodel = pix_fmt_to_color_model(ifmt);
1665 int imodel_is_yuv = BC_CModels::is_yuv(imodel);
1666 int cmodel = frame->get_color_model();
1667 int cmodel_is_yuv = BC_CModels::is_yuv(cmodel);
1668 if( imodel < 0 || imodel_is_yuv != cmodel_is_yuv ) {
1669 imodel = cmodel_is_yuv ?
1670 (BC_CModels::has_alpha(cmodel) ?
1672 (max_bits > 8 ? BC_AYUV16161616 : BC_YUV444P)) :
1673 (BC_CModels::has_alpha(cmodel) ?
1674 (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
1675 (max_bits > 8 ? BC_RGB161616 : BC_RGB888)) ;
1677 VFrame vframe(ip->width, ip->height, imodel);
1678 if( convert_picture_vframe(&vframe, ip) ) return -1;
1679 frame->transfer_from(&vframe);
1683 int FFVideoConvert::transfer_cmodel(VFrame *frame, AVFrame *ifp)
1685 int ret = convert_cmodel(frame, ifp);
1687 const AVDictionary *src = ifp->metadata;
1688 AVDictionaryEntry *t = NULL;
1689 BC_Hash *hp = frame->get_params();
1691 while( (t=av_dict_get(src, "", t, AV_DICT_IGNORE_SUFFIX)) )
1692 hp->update(t->key, t->value);
1697 int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op)
1699 AVFrame *opic = av_frame_alloc();
1700 int ret = convert_vframe_picture(frame, op, opic);
1701 av_frame_free(&opic);
1705 int FFVideoConvert::convert_vframe_picture(VFrame *frame, AVFrame *op, AVFrame *opic)
1706 { // vframe = picture
1707 int cmodel = frame->get_color_model();
1708 AVPixelFormat ifmt = color_model_to_pix_fmt(cmodel);
1709 if( ifmt == AV_PIX_FMT_NB ) return -1;
1710 int size = av_image_fill_arrays(opic->data, opic->linesize,
1711 frame->get_data(), ifmt, frame->get_w(), frame->get_h(), 1);
1712 if( size < 0 ) return -1;
1714 int bpp = BC_CModels::calculate_pixelsize(cmodel);
1715 int ysz = bpp * frame->get_w(), usz = ysz;
1725 // override av_image_fill_arrays() for planar types
1726 opic->data[0] = frame->get_y(); opic->linesize[0] = ysz;
1727 opic->data[1] = frame->get_u(); opic->linesize[1] = usz;
1728 opic->data[2] = frame->get_v(); opic->linesize[2] = usz;
1731 opic->data[0] = frame->get_data();
1732 opic->linesize[0] = frame->get_bytes_per_line();
1736 AVPixelFormat ofmt = (AVPixelFormat)op->format;
1737 convert_ctx = sws_getCachedContext(convert_ctx, frame->get_w(), frame->get_h(),
1738 ifmt, op->width, op->height, ofmt, SWS_POINT, NULL, NULL, NULL);
1739 if( !convert_ctx ) {
1740 fprintf(stderr, "FFVideoConvert::convert_frame_picture:"
1741 " sws_getCachedContext() failed\n");
1746 int color_range = 0;
1747 switch( preferences->yuv_color_range ) {
1748 case BC_COLORS_JPEG: color_range = 1; break;
1749 case BC_COLORS_MPEG: color_range = 0; break;
1751 int color_space = SWS_CS_ITU601;
1752 switch( preferences->yuv_color_space ) {
1753 case BC_COLORS_BT601_PAL: color_space = SWS_CS_ITU601; break;
1754 case BC_COLORS_BT601_NTSC: color_space = SWS_CS_SMPTE170M; break;
1755 case BC_COLORS_BT709: color_space = SWS_CS_ITU709; break;
1756 case BC_COLORS_BT2020_NCL:
1757 case BC_COLORS_BT2020_CL: color_space = SWS_CS_BT2020; break;
1759 const int *color_table = sws_getCoefficients(color_space);
1761 int *inv_table, *table, src_range, dst_range;
1762 int brightness, contrast, saturation;
1763 if( !sws_getColorspaceDetails(convert_ctx,
1764 &inv_table, &src_range, &table, &dst_range,
1765 &brightness, &contrast, &saturation) ) {
1766 if( dst_range != color_range || table != color_table )
1767 sws_setColorspaceDetails(convert_ctx,
1768 inv_table, src_range, color_table, color_range,
1769 brightness, contrast, saturation);
1772 int ret = sws_scale(convert_ctx, opic->data, opic->linesize, 0, frame->get_h(),
1773 op->data, op->linesize);
1775 ff_err(ret, "FFVideoConvert::convert_frame_picture: sws_scale() failed\n");
1781 int FFVideoConvert::convert_pixfmt(VFrame *frame, AVFrame *op)
1783 // try direct transfer
1784 if( !convert_vframe_picture(frame, op) ) return 1;
1785 // use indirect transfer
1786 int cmodel = frame->get_color_model();
1787 int max_bits = BC_CModels::calculate_pixelsize(cmodel) * 8;
1788 max_bits /= BC_CModels::components(cmodel);
1789 AVPixelFormat ofmt = (AVPixelFormat)op->format;
1790 int imodel = pix_fmt_to_color_model(ofmt);
1791 int imodel_is_yuv = BC_CModels::is_yuv(imodel);
1792 int cmodel_is_yuv = BC_CModels::is_yuv(cmodel);
1793 if( imodel < 0 || imodel_is_yuv != cmodel_is_yuv ) {
1794 imodel = cmodel_is_yuv ?
1795 (BC_CModels::has_alpha(cmodel) ?
1797 (max_bits > 8 ? BC_AYUV16161616 : BC_YUV444P)) :
1798 (BC_CModels::has_alpha(cmodel) ?
1799 (max_bits > 8 ? BC_RGBA16161616 : BC_RGBA8888) :
1800 (max_bits > 8 ? BC_RGB161616 : BC_RGB888)) ;
1802 VFrame vframe(frame->get_w(), frame->get_h(), imodel);
1803 vframe.transfer_from(frame);
1804 if( !convert_vframe_picture(&vframe, op) ) return 1;
1808 int FFVideoConvert::transfer_pixfmt(VFrame *frame, AVFrame *ofp)
1810 int ret = convert_pixfmt(frame, ofp);
1812 BC_Hash *hp = frame->get_params();
1813 AVDictionary **dict = &ofp->metadata;
1814 //av_dict_free(dict);
1815 for( int i=0; i<hp->size(); ++i ) {
1816 char *key = hp->get_key(i), *val = hp->get_value(i);
1817 av_dict_set(dict, key, val, 0);
1823 void FFVideoStream::load_markers()
1825 IndexState *index_state = ffmpeg->file_base->asset->index_state;
1826 if( !index_state || idx >= index_state->video_markers.size() ) return;
1827 FFStream::load_markers(*index_state->video_markers[idx], frame_rate);
1830 IndexMarks *FFVideoStream::get_markers()
1832 IndexState *index_state = ffmpeg->file_base->asset->index_state;
1833 if( !index_state || idx >= index_state->video_markers.size() ) return 0;
1834 return !index_state ? 0 : index_state->video_markers[idx];
1838 FFMPEG::FFMPEG(FileBase *file_base)
1841 this->file_base = file_base;
1842 memset(file_format,0,sizeof(file_format));
1843 mux_lock = new Condition(0,"FFMPEG::mux_lock",0);
1844 flow_lock = new Condition(1,"FFStream::flow_lock",0);
1847 decoding = encoding = 0;
1848 has_audio = has_video = 0;
1849 interlace_from_codec = 0;
1852 opt_video_filter = 0;
1853 opt_audio_filter = 0;
1855 opt_video_decoder = 0;
1856 opt_audio_decoder = 0;
1858 char option_path[BCTEXTLEN];
1859 set_option_path(option_path, "%s", "ffmpeg.opts");
1860 read_options(option_path, opts);
1865 ff_lock("FFMPEG::~FFMPEG()");
1867 ffaudio.remove_all_objects();
1868 ffvideo.remove_all_objects();
1869 if( fmt_ctx ) avformat_close_input(&fmt_ctx);
1873 av_dict_free(&opts);
1874 delete [] opt_video_filter;
1875 delete [] opt_audio_filter;
1876 delete [] opt_hw_dev;
1879 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
1880 int FFMPEG::check_sample_rate(const AVCodec *codec, int sample_rate)
1882 int FFMPEG::check_sample_rate(AVCodec *codec, int sample_rate)
1885 const int *p = codec->supported_samplerates;
1886 if( !p ) return sample_rate;
1888 if( *p == sample_rate ) return *p;
1894 // check_frame_rate and std_frame_rate needed for 23.976
1895 // and 59.94 fps mpeg2
1896 static inline AVRational std_frame_rate(int i)
1898 static const int m1 = 1001*12, m2 = 1000*12;
1899 static const int freqs[] = {
1900 40*m1, 48*m1, 50*m1, 60*m1, 80*m1,120*m1, 240*m1,
1901 24*m2, 30*m2, 60*m2, 12*m2, 15*m2, 48*m2, 90*m2,
1902 100*m2, 120*m2, 144*m2, 72*m2, 0,
1904 int freq = i<30*12 ? (i+1)*1001 : freqs[i-30*12];
1905 return (AVRational) { freq, 1001*12 };
1908 AVRational FFMPEG::check_frame_rate(const AVRational *p, double frame_rate)
1910 AVRational rate, best_rate = (AVRational) { 0, 0 };
1911 double max_err = 1.; int i = 0;
1912 while( ((p ? (rate=*p++) : (rate=std_frame_rate(i++))), rate.num) != 0 ) {
1913 double framerate = (double) rate.num / rate.den;
1914 double err = fabs(frame_rate/framerate - 1.);
1915 if( err >= max_err ) continue;
1919 return max_err < 0.0001 ? best_rate : (AVRational) { 0, 0 };
1922 AVRational FFMPEG::to_sample_aspect_ratio(Asset *asset)
1925 double display_aspect = asset->width / (double)asset->height;
1926 double sample_aspect = display_aspect / asset->aspect_ratio;
1927 int width = 1000000, height = width * sample_aspect + 0.5;
1929 MWindow::create_aspect_ratio(w, h, width, height);
1930 return (AVRational){(int)w, (int)h};
1933 return (AVRational){1, 1};
1937 AVRational FFMPEG::to_time_base(int sample_rate)
1939 return (AVRational){1, sample_rate};
1942 int FFMPEG::get_fmt_score(AVSampleFormat dst_fmt, AVSampleFormat src_fmt)
1945 int dst_planar = av_sample_fmt_is_planar(dst_fmt);
1946 int src_planar = av_sample_fmt_is_planar(src_fmt);
1947 if( dst_planar != src_planar ) ++score;
1948 int dst_bytes = av_get_bytes_per_sample(dst_fmt);
1949 int src_bytes = av_get_bytes_per_sample(src_fmt);
1950 score += (src_bytes > dst_bytes ? 100 : -10) * (src_bytes - dst_bytes);
1951 int src_packed = av_get_packed_sample_fmt(src_fmt);
1952 int dst_packed = av_get_packed_sample_fmt(dst_fmt);
1953 if( dst_packed == AV_SAMPLE_FMT_S32 && src_packed == AV_SAMPLE_FMT_FLT ) score += 20;
1954 if( dst_packed == AV_SAMPLE_FMT_FLT && src_packed == AV_SAMPLE_FMT_S32 ) score += 2;
1958 AVSampleFormat FFMPEG::find_best_sample_fmt_of_list(
1959 const AVSampleFormat *sample_fmts, AVSampleFormat src_fmt)
1961 AVSampleFormat best = AV_SAMPLE_FMT_NONE;
1962 int best_score = get_fmt_score(best, src_fmt);
1963 for( int i=0; sample_fmts[i] >= 0; ++i ) {
1964 AVSampleFormat sample_fmt = sample_fmts[i];
1965 int score = get_fmt_score(sample_fmt, src_fmt);
1966 if( score >= best_score ) continue;
1967 best = sample_fmt; best_score = score;
1973 void FFMPEG::set_option_path(char *path, const char *fmt, ...)
1975 char *ep = path + BCTEXTLEN-1;
1976 strncpy(path, File::get_cindat_path(), ep-path);
1977 strncat(path, "/ffmpeg/", ep-path);
1978 path += strlen(path);
1981 path += vsnprintf(path, ep-path, fmt, ap);
1986 void FFMPEG::get_option_path(char *path, const char *type, const char *spec)
1991 set_option_path(path, "%s/%s", type, spec);
1994 int FFMPEG::get_format(char *format, const char *path, const char *spec)
1996 char option_path[BCTEXTLEN], line[BCTEXTLEN], codec[BCTEXTLEN];
1997 get_option_path(option_path, path, spec);
1998 FILE *fp = fopen(option_path,"r");
2001 if( !fgets(line, sizeof(line), fp) ) ret = 1;
2003 line[sizeof(line)-1] = 0;
2004 ret = scan_option_line(line, format, codec);
2010 int FFMPEG::get_codec(char *codec, const char *path, const char *spec)
2012 char option_path[BCTEXTLEN], line[BCTEXTLEN], format[BCTEXTLEN];
2013 get_option_path(option_path, path, spec);
2014 FILE *fp = fopen(option_path,"r");
2017 if( !fgets(line, sizeof(line), fp) ) ret = 1;
2020 line[sizeof(line)-1] = 0;
2021 ret = scan_option_line(line, format, codec);
2024 char *vp = codec, *ep = vp+BCTEXTLEN-1;
2025 while( vp < ep && *vp && *vp != '|' ) ++vp;
2026 if( *vp == '|' ) --vp;
2027 while( vp > codec && (*vp==' ' || *vp=='\t') ) *vp-- = 0;
2032 int FFMPEG::get_file_format()
2034 char audio_muxer[BCSTRLEN], video_muxer[BCSTRLEN];
2035 char audio_format[BCSTRLEN], video_format[BCSTRLEN];
2036 audio_muxer[0] = audio_format[0] = 0;
2037 video_muxer[0] = video_format[0] = 0;
2038 Asset *asset = file_base->asset;
2039 int ret = asset ? 0 : 1;
2040 if( !ret && asset->audio_data ) {
2041 if( !(ret=get_format(audio_format, "audio", asset->acodec)) ) {
2042 if( get_format(audio_muxer, "format", audio_format) ) {
2043 strcpy(audio_muxer, audio_format);
2044 audio_format[0] = 0;
2048 if( !ret && asset->video_data ) {
2049 if( !(ret=get_format(video_format, "video", asset->vcodec)) ) {
2050 if( get_format(video_muxer, "format", video_format) ) {
2051 strcpy(video_muxer, video_format);
2052 video_format[0] = 0;
2056 if( !ret && !audio_muxer[0] && !video_muxer[0] )
2058 if( !ret && audio_muxer[0] && video_muxer[0] &&
2059 strcmp(audio_muxer, video_muxer) ) ret = -1;
2060 if( !ret && audio_format[0] && video_format[0] &&
2061 strcmp(audio_format, video_format) ) ret = -1;
2063 strcpy(file_format, !audio_format[0] && !video_format[0] ?
2064 (audio_muxer[0] ? audio_muxer : video_muxer) :
2065 (audio_format[0] ? audio_format : video_format));
2069 int FFMPEG::scan_option_line(const char *cp, char *tag, char *val)
2071 while( *cp == ' ' || *cp == '\t' ) ++cp;
2072 const char *bp = cp;
2073 while( *cp && *cp != ' ' && *cp != '\t' && *cp != '=' && *cp != '\n' ) ++cp;
2075 if( !len || len > BCSTRLEN-1 ) return 1;
2076 while( bp < cp ) *tag++ = *bp++;
2078 while( *cp == ' ' || *cp == '\t' ) ++cp;
2079 if( *cp == '=' ) ++cp;
2080 while( *cp == ' ' || *cp == '\t' ) ++cp;
2082 while( *cp && *cp != '\n' ) ++cp;
2084 if( len > BCTEXTLEN-1 ) return 1;
2085 while( bp < cp ) *val++ = *bp++;
2090 int FFMPEG::can_render(const char *fformat, const char *type)
2093 char option_path[BCTEXTLEN];
2094 FFMPEG::set_option_path(option_path, type);
2095 fs.update(option_path);
2096 int total_files = fs.total_files();
2097 for( int i=0; i<total_files; ++i ) {
2098 const char *name = fs.get_entry(i)->get_name();
2099 const char *ext = strrchr(name,'.');
2100 if( !ext ) continue;
2101 if( !strcmp(fformat, ++ext) ) return 1;
2106 int FFMPEG::get_ff_option(const char *nm, const char *options, char *value)
2108 for( const char *cp=options; *cp!=0; ) {
2109 char line[BCTEXTLEN], *bp = line, *ep = bp+sizeof(line)-1;
2110 while( bp < ep && *cp && *cp!='\n' ) *bp++ = *cp++;
2113 if( !line[0] || line[0] == '#' || line[0] == ';' ) continue;
2114 char key[BCSTRLEN], val[BCTEXTLEN];
2115 if( FFMPEG::scan_option_line(line, key, val) ) continue;
2116 if( !strcmp(key, nm) ) {
2117 strncpy(value, val, BCSTRLEN);
2124 void FFMPEG::scan_audio_options(Asset *asset, EDL *edl)
2126 char cin_sample_fmt[BCSTRLEN];
2127 int cin_fmt = AV_SAMPLE_FMT_NONE;
2128 const char *options = asset->ff_audio_options;
2129 if( !get_ff_option("cin_sample_fmt", options, cin_sample_fmt) )
2130 cin_fmt = (int)av_get_sample_fmt(cin_sample_fmt);
2132 char audio_codec[BCSTRLEN]; audio_codec[0] = 0;
2133 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,18,100)
2134 const AVCodec *av_codec = !FFMPEG::get_codec(audio_codec, "audio", asset->acodec) ?
2136 AVCodec *av_codec = !FFMPEG::get_codec(audio_codec, "audio", asset->acodec) ?
2138 avcodec_find_encoder_by_name(audio_codec) : 0;
2139 if( av_codec && av_codec->sample_fmts )
2140 cin_fmt = find_best_sample_fmt_of_list(av_codec->sample_fmts, AV_SAMPLE_FMT_FLT);
2142 if( cin_fmt < 0 ) cin_fmt = AV_SAMPLE_FMT_S16;
2143 const char *name = av_get_sample_fmt_name((AVSampleFormat)cin_fmt);
2144 if( !name ) name = _("None");
2145 strcpy(asset->ff_sample_format, name);
2147 char value[BCSTRLEN];
2148 if( !get_ff_option("cin_bitrate", options, value) )
2149 asset->ff_audio_bitrate = atoi(value);
2150 if( !get_ff_option("cin_quality", options, value) )
2151 asset->ff_audio_quality = atoi(value);
2154 void FFMPEG::load_audio_options(Asset *asset, EDL *edl)
2156 char options_path[BCTEXTLEN];
2157 set_option_path(options_path, "audio/%s", asset->acodec);
2158 if( !load_options(options_path,
2159 asset->ff_audio_options,
2160 sizeof(asset->ff_audio_options)) )
2161 scan_audio_options(asset, edl);
2164 void FFMPEG::scan_video_options(Asset *asset, EDL *edl)
2166 char cin_pix_fmt[BCSTRLEN];
2167 int cin_fmt = AV_PIX_FMT_NONE;
2168 const char *options = asset->ff_video_options;
2169 if( !get_ff_option("cin_pix_fmt", options, cin_pix_fmt) )
2170 cin_fmt = (int)av_get_pix_fmt(cin_pix_fmt);
2172 char video_codec[BCSTRLEN]; video_codec[0] = 0;
2173 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,18,100)
2174 const AVCodec *av_codec = !get_codec(video_codec, "video", asset->vcodec) ?
2176 AVCodec *av_codec = !get_codec(video_codec, "video", asset->vcodec) ?
2178 avcodec_find_encoder_by_name(video_codec) : 0;
2179 if( av_codec && av_codec->pix_fmts ) {
2180 if( 0 && edl ) { // frequently picks a bad answer
2181 int color_model = edl->session->color_model;
2182 int max_bits = BC_CModels::calculate_pixelsize(color_model) * 8;
2183 max_bits /= BC_CModels::components(color_model);
2184 cin_fmt = avcodec_find_best_pix_fmt_of_list(av_codec->pix_fmts,
2185 (BC_CModels::is_yuv(color_model) ?
2186 (max_bits > 8 ? AV_PIX_FMT_AYUV64LE : AV_PIX_FMT_YUV444P) :
2187 (max_bits > 8 ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB24)), 0, 0);
2190 cin_fmt = av_codec->pix_fmts[0];
2193 if( cin_fmt < 0 ) cin_fmt = AV_PIX_FMT_YUV420P;
2194 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get((AVPixelFormat)cin_fmt);
2195 const char *name = desc ? desc->name : _("None");
2196 strcpy(asset->ff_pixel_format, name);
2198 char value[BCSTRLEN];
2199 if( !get_ff_option("cin_bitrate", options, value) )
2200 asset->ff_video_bitrate = atoi(value);
2201 if( !get_ff_option("cin_quality", options, value) )
2202 asset->ff_video_quality = atoi(value);
2205 void FFMPEG::load_video_options(Asset *asset, EDL *edl)
2207 char options_path[BCTEXTLEN];
2208 set_option_path(options_path, "video/%s", asset->vcodec);
2209 if( !load_options(options_path,
2210 asset->ff_video_options,
2211 sizeof(asset->ff_video_options)) )
2212 scan_video_options(asset, edl);
2215 void FFMPEG::scan_format_options(Asset *asset, EDL *edl)
2219 void FFMPEG::load_format_options(Asset *asset, EDL *edl)
2221 char options_path[BCTEXTLEN];
2222 set_option_path(options_path, "format/%s", asset->fformat);
2223 if( !load_options(options_path,
2224 asset->ff_format_options,
2225 sizeof(asset->ff_format_options)) )
2226 scan_format_options(asset, edl);
2229 int FFMPEG::load_defaults(const char *path, const char *type,
2230 char *codec, char *codec_options, int len)
2232 char default_file[BCTEXTLEN];
2233 set_option_path(default_file, "%s/%s.dfl", path, type);
2234 FILE *fp = fopen(default_file,"r");
2236 fgets(codec, BCSTRLEN, fp);
2238 while( *cp && *cp!='\n' ) ++cp;
2240 while( len > 0 && fgets(codec_options, len, fp) ) {
2241 int n = strlen(codec_options);
2242 codec_options += n; len -= n;
2245 set_option_path(default_file, "%s/%s", path, codec);
2246 return load_options(default_file, codec_options, len);
2249 void FFMPEG::set_asset_format(Asset *asset, EDL *edl, const char *text)
2251 if( asset->format != FILE_FFMPEG ) return;
2252 if( text != asset->fformat )
2253 strcpy(asset->fformat, text);
2254 if( !asset->ff_format_options[0] )
2255 load_format_options(asset, edl);
2256 if( asset->audio_data && !asset->ff_audio_options[0] ) {
2257 if( !load_defaults("audio", text, asset->acodec,
2258 asset->ff_audio_options, sizeof(asset->ff_audio_options)) )
2259 scan_audio_options(asset, edl);
2261 asset->audio_data = 0;
2263 if( asset->video_data && !asset->ff_video_options[0] ) {
2264 if( !load_defaults("video", text, asset->vcodec,
2265 asset->ff_video_options, sizeof(asset->ff_video_options)) )
2266 scan_video_options(asset, edl);
2268 asset->video_data = 0;
2272 int FFMPEG::get_encoder(const char *options,
2273 char *format, char *codec, char *bsfilter)
2275 FILE *fp = fopen(options,"r");
2277 eprintf(_("options open failed %s\n"),options);
2280 char line[BCTEXTLEN];
2281 if( !fgets(line, sizeof(line), fp) ||
2282 scan_encoder(line, format, codec, bsfilter) )
2283 eprintf(_("format/codec not found %s\n"), options);
2288 int FFMPEG::scan_encoder(const char *line,
2289 char *format, char *codec, char *bsfilter)
2291 format[0] = codec[0] = bsfilter[0] = 0;
2292 if( scan_option_line(line, format, codec) ) return 1;
2294 while( *cp && *cp != '|' ) ++cp;
2295 if( !*cp ) return 0;
2297 do { *bp-- = 0; } while( bp>=codec && (*bp==' ' || *bp == '\t' ) );
2298 while( *++cp && (*cp==' ' || *cp == '\t') );
2300 for( int i=BCTEXTLEN; --i>0 && *cp; ) *bp++ = *cp++;
2305 int FFMPEG::read_options(const char *options, AVDictionary *&opts, int skip)
2307 FILE *fp = fopen(options,"r");
2310 while( !ret && --skip >= 0 ) {
2312 while( ch >= 0 && ch != '\n' ) ch = getc(fp);
2313 if( ch < 0 ) ret = 1;
2316 ret = read_options(fp, options, opts);
2321 int FFMPEG::scan_options(const char *options, AVDictionary *&opts, AVStream *st)
2323 FILE *fp = fmemopen((void *)options,strlen(options),"r");
2325 int ret = read_options(fp, options, opts);
2328 AVDictionaryEntry *tag = av_dict_get(opts, "id", NULL, 0);
2329 if( tag ) st->id = strtol(tag->value,0,0);
2334 void FFMPEG::put_cache_frame(VFrame *frame, int64_t position)
2336 file_base->file->put_cache_frame(frame, position, 0);
2339 int FFMPEG::get_use_cache()
2341 return file_base->file->get_use_cache();
2344 void FFMPEG::purge_cache()
2346 file_base->file->purge_cache();
2349 FFCodecRemap::FFCodecRemap()
2354 FFCodecRemap::~FFCodecRemap()
2356 delete [] old_codec;
2357 delete [] new_codec;
2360 int FFCodecRemaps::add(const char *val)
2362 char old_codec[BCSTRLEN], new_codec[BCSTRLEN];
2363 if( sscanf(val, " %63[a-zA-z0-9_-] = %63[a-z0-9_-]",
2364 &old_codec[0], &new_codec[0]) != 2 ) return 1;
2365 FFCodecRemap &remap = append();
2366 remap.old_codec = cstrdup(old_codec);
2367 remap.new_codec = cstrdup(new_codec);
2371 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
2372 int FFCodecRemaps::update(AVCodecID &codec_id, const AVCodec *&decoder)
2374 const AVCodec *codec = avcodec_find_decoder(codec_id);
2376 int FFCodecRemaps::update(AVCodecID &codec_id, AVCodec *&decoder)
2378 AVCodec *codec = avcodec_find_decoder(codec_id);
2380 if( !codec ) return -1;
2381 const char *name = codec->name;
2382 FFCodecRemaps &map = *this;
2384 while( --k >= 0 && strcmp(map[k].old_codec, name) );
2385 if( k < 0 ) return 1;
2386 const char *new_codec = map[k].new_codec;
2387 codec = avcodec_find_decoder_by_name(new_codec);
2388 if( !codec ) return -1;
2393 int FFMPEG::read_options(FILE *fp, const char *options, AVDictionary *&opts)
2395 int ret = 0, no = 0;
2396 char line[BCTEXTLEN];
2397 while( !ret && fgets(line, sizeof(line), fp) ) {
2398 line[sizeof(line)-1] = 0;
2399 if( line[0] == '#' ) continue;
2400 if( line[0] == '\n' ) continue;
2401 char key[BCSTRLEN], val[BCTEXTLEN];
2402 if( scan_option_line(line, key, val) ) {
2403 eprintf(_("err reading %s: line %d\n"), options, no);
2407 if( !strcmp(key, "duration") )
2408 opt_duration = strtod(val, 0);
2409 else if( !strcmp(key, "video_decoder") )
2410 opt_video_decoder = cstrdup(val);
2411 else if( !strcmp(key, "audio_decoder") )
2412 opt_audio_decoder = cstrdup(val);
2413 else if( !strcmp(key, "remap_video_decoder") )
2414 video_codec_remaps.add(val);
2415 else if( !strcmp(key, "remap_audio_decoder") )
2416 audio_codec_remaps.add(val);
2417 else if( !strcmp(key, "video_filter") )
2418 opt_video_filter = cstrdup(val);
2419 else if( !strcmp(key, "audio_filter") )
2420 opt_audio_filter = cstrdup(val);
2421 else if( !strcmp(key, "cin_hw_dev") )
2422 opt_hw_dev = cstrdup(val);
2423 else if( !strcmp(key, "loglevel") )
2426 av_dict_set(&opts, key, val, 0);
2432 int FFMPEG::load_options(const char *options, AVDictionary *&opts)
2434 char option_path[BCTEXTLEN];
2435 set_option_path(option_path, "%s", options);
2436 return read_options(option_path, opts);
2439 int FFMPEG::load_options(const char *path, char *bfr, int len)
2442 FILE *fp = fopen(path, "r");
2444 fgets(bfr, len, fp); // skip hdr
2445 len = fread(bfr, 1, len-1, fp);
2446 if( len < 0 ) len = 0;
2452 void FFMPEG::set_loglevel(const char *ap)
2454 if( !ap || !*ap ) return;
2459 { "quiet" , AV_LOG_QUIET },
2460 { "panic" , AV_LOG_PANIC },
2461 { "fatal" , AV_LOG_FATAL },
2462 { "error" , AV_LOG_ERROR },
2463 { "warning", AV_LOG_WARNING },
2464 { "info" , AV_LOG_INFO },
2465 { "verbose", AV_LOG_VERBOSE },
2466 { "debug" , AV_LOG_DEBUG },
2468 for( int i=0; i<(int)(sizeof(log_levels)/sizeof(log_levels[0])); ++i ) {
2469 if( !strcmp(log_levels[i].name, ap) ) {
2470 av_log_set_level(log_levels[i].level);
2474 av_log_set_level(atoi(ap));
2477 double FFMPEG::to_secs(int64_t time, AVRational time_base)
2479 double base_time = time == AV_NOPTS_VALUE ? 0 :
2480 av_rescale_q(time, time_base, AV_TIME_BASE_Q);
2481 return base_time / AV_TIME_BASE;
2484 int FFMPEG::info(char *text, int len)
2486 if( len <= 0 ) return 0;
2488 #define report(s...) do { int n = snprintf(cp,len,s); cp += n; len -= n; } while(0)
2490 report("format: %s\n",fmt_ctx->iformat->name);
2491 if( ffvideo.size() > 0 )
2492 report("\n%d video stream%s\n",ffvideo.size(), ffvideo.size()!=1 ? "s" : "");
2493 for( int vidx=0; vidx<ffvideo.size(); ++vidx ) {
2494 const char *unkn = _("(unkn)");
2495 FFVideoStream *vid = ffvideo[vidx];
2496 AVStream *st = vid->st;
2497 AVCodecID codec_id = st->codecpar->codec_id;
2498 report(_("vid%d (%d), id 0x%06x:\n"), vid->idx, vid->fidx, codec_id);
2499 const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
2500 report(" video%d %s ", vidx+1, desc ? desc->name : unkn);
2501 report(" %dx%d %5.2f", vid->width, vid->height, vid->frame_rate);
2502 AVPixelFormat pix_fmt = (AVPixelFormat)st->codecpar->format;
2503 const char *pfn = av_get_pix_fmt_name(pix_fmt);
2504 report(" pix %s\n", pfn ? pfn : unkn);
2505 int interlace = st->codecpar->field_order;
2506 report(" interlace (container level): %i\n", interlace ? interlace : -1);
2507 int interlace_codec = interlace_from_codec;
2508 report(" interlace (codec level): %i\n", interlace_codec ? interlace_codec : -1);
2509 enum AVColorSpace space = st->codecpar->color_space;
2510 const char *nm = av_color_space_name(space);
2511 report(" color space:%s", nm ? nm : unkn);
2512 enum AVColorRange range = st->codecpar->color_range;
2513 const char *rg = av_color_range_name(range);
2514 report("/ range:%s\n", rg ? rg : unkn);
2515 double secs = to_secs(st->duration, st->time_base);
2516 int64_t length = secs * vid->frame_rate + 0.5;
2517 double ofs = to_secs((vid->nudge - st->start_time), st->time_base);
2518 int64_t nudge = ofs * vid->frame_rate;
2519 int ch = nudge >= 0 ? '+' : (nudge=-nudge, '-');
2520 report(" %jd%c%jd frms %0.2f secs", length,ch,nudge, secs);
2521 int hrs = secs/3600; secs -= hrs*3600;
2522 int mins = secs/60; secs -= mins*60;
2523 report(" %d:%02d:%05.2f\n", hrs, mins, secs);
2524 double theta = vid->get_rotation_angle();
2525 if( fabs(theta) > 1 )
2526 report(" rotation angle: %0.1f\n", theta);
2528 if( ffaudio.size() > 0 )
2529 report("\n%d audio stream%s\n",ffaudio.size(), ffaudio.size()!=1 ? "s" : "");
2530 for( int aidx=0; aidx<ffaudio.size(); ++aidx ) {
2531 FFAudioStream *aud = ffaudio[aidx];
2532 AVStream *st = aud->st;
2533 AVCodecID codec_id = st->codecpar->codec_id;
2534 report(_("aud%d (%d), id 0x%06x:\n"), aud->idx, aud->fidx, codec_id);
2535 const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
2536 int nch = aud->channels, ch0 = aud->channel0+1;
2537 report(" audio%d-%d %s", ch0, ch0+nch-1, desc ? desc->name : " (unkn)");
2538 AVSampleFormat sample_fmt = (AVSampleFormat)st->codecpar->format;
2539 const char *fmt = av_get_sample_fmt_name(sample_fmt);
2540 report(" %s %d", fmt, aud->sample_rate);
2541 int sample_bits = av_get_bits_per_sample(codec_id);
2542 report(" %dbits\n", sample_bits);
2543 double secs = to_secs(st->duration, st->time_base);
2544 int64_t length = secs * aud->sample_rate + 0.5;
2545 double ofs = to_secs((aud->nudge - st->start_time), st->time_base);
2546 int64_t nudge = ofs * aud->sample_rate;
2547 int ch = nudge >= 0 ? '+' : (nudge=-nudge, '-');
2548 report(" %jd%c%jd smpl %0.2f secs", length,ch,nudge, secs);
2549 int hrs = secs/3600; secs -= hrs*3600;
2550 int mins = secs/60; secs -= mins*60;
2551 report(" %d:%02d:%05.2f\n", hrs, mins, secs);
2553 if( fmt_ctx->nb_programs > 0 )
2554 report("\n%d program%s\n",fmt_ctx->nb_programs, fmt_ctx->nb_programs!=1 ? "s" : "");
2555 for( int i=0; i<(int)fmt_ctx->nb_programs; ++i ) {
2556 report("program %d", i+1);
2557 AVProgram *pgrm = fmt_ctx->programs[i];
2558 for( int j=0; j<(int)pgrm->nb_stream_indexes; ++j ) {
2559 int idx = pgrm->stream_index[j];
2560 int vidx = ffvideo.size();
2561 while( --vidx>=0 && ffvideo[vidx]->fidx != idx );
2563 report(", vid%d", vidx);
2566 int aidx = ffaudio.size();
2567 while( --aidx>=0 && ffaudio[aidx]->fidx != idx );
2569 report(", aud%d", aidx);
2572 report(", (%d)", pgrm->stream_index[j]);
2577 AVDictionaryEntry *tag = 0;
2578 while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
2579 report("%s=%s\n", tag->key, tag->value);
2588 int FFMPEG::init_decoder(const char *filename)
2590 ff_lock("FFMPEG::init_decoder");
2592 char file_opts[BCTEXTLEN];
2593 strcpy(file_opts, filename);
2594 char *bp = strrchr(file_opts, '/');
2595 if( !bp ) bp = file_opts;
2596 char *sp = strrchr(bp, '.');
2597 if( !sp ) sp = bp + strlen(bp);
2599 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,18,100)
2600 const AVInputFormat *ifmt = 0;
2602 AVInputFormat *ifmt = 0;
2605 strcpy(sp, ".opts");
2606 fp = fopen(file_opts, "r");
2609 read_options(fp, file_opts, opts);
2611 AVDictionaryEntry *tag;
2612 if( (tag=av_dict_get(opts, "format", NULL, 0)) != 0 ) {
2613 ifmt = av_find_input_format(tag->value);
2617 load_options("decode.opts", opts);
2618 AVDictionary *fopts = 0;
2619 av_dict_copy(&fopts, opts, 0);
2620 int ret = avformat_open_input(&fmt_ctx, filename, ifmt, &fopts);
2621 av_dict_free(&fopts);
2623 ret = avformat_find_stream_info(fmt_ctx, NULL);
2628 return !ret ? 0 : 1;
2631 int FFMPEG::open_decoder()
2634 if( stat(fmt_ctx->url, &st) < 0 ) {
2635 eprintf(_("can't stat file: %s\n"), fmt_ctx->url);
2639 int64_t file_bits = 8 * st.st_size;
2640 if( !fmt_ctx->bit_rate && opt_duration > 0 )
2641 fmt_ctx->bit_rate = file_bits / opt_duration;
2644 if( fmt_ctx->bit_rate > 0 ) {
2645 for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) {
2646 AVStream *st = fmt_ctx->streams[i];
2647 if( st->duration != AV_NOPTS_VALUE ) continue;
2648 if( st->time_base.num > INT64_MAX / fmt_ctx->bit_rate ) continue;
2649 st->duration = av_rescale(file_bits, st->time_base.den,
2650 fmt_ctx->bit_rate * (int64_t) st->time_base.num);
2654 if( estimated && !(fflags & FF_ESTM_TIMES) ) {
2655 fflags |= FF_ESTM_TIMES;
2656 printf("FFMPEG::open_decoder: some stream times estimated: %s\n",
2660 ff_lock("FFMPEG::open_decoder");
2661 int ret = 0, bad_time = 0;
2662 for( int i=0; !ret && i<(int)fmt_ctx->nb_streams; ++i ) {
2663 AVStream *st = fmt_ctx->streams[i];
2664 if( st->duration == AV_NOPTS_VALUE ) bad_time = 1;
2665 AVCodecParameters *avpar = st->codecpar;
2666 const AVCodecDescriptor *codec_desc = avcodec_descriptor_get(avpar->codec_id);
2667 if( !codec_desc ) continue;
2668 switch( avpar->codec_type ) {
2669 case AVMEDIA_TYPE_VIDEO: {
2670 if( avpar->width < 1 ) continue;
2671 if( avpar->height < 1 ) continue;
2672 AVRational framerate = av_guess_frame_rate(fmt_ctx, st, 0);
2673 if( framerate.num < 1 ) continue;
2675 int vidx = ffvideo.size();
2676 FFVideoStream *vid = new FFVideoStream(this, st, vidx, i);
2677 vstrm_index.append(ffidx(vidx, 0));
2678 ffvideo.append(vid);
2679 vid->width = avpar->width;
2680 vid->height = avpar->height;
2681 vid->frame_rate = !framerate.den ? 0 : (double)framerate.num / framerate.den;
2682 switch( avpar->color_range ) {
2683 case AVCOL_RANGE_MPEG:
2684 vid->color_range = BC_COLORS_MPEG;
2686 case AVCOL_RANGE_JPEG:
2687 vid->color_range = BC_COLORS_JPEG;
2690 vid->color_range = !file_base ? BC_COLORS_JPEG :
2691 file_base->file->preferences->yuv_color_range;
2694 switch( avpar->color_space ) {
2695 case AVCOL_SPC_BT470BG:
2696 vid->color_space = BC_COLORS_BT601_PAL;
2698 case AVCOL_SPC_SMPTE170M:
2699 vid->color_space = BC_COLORS_BT601_NTSC;
2701 case AVCOL_SPC_BT709:
2702 vid->color_space = BC_COLORS_BT709;
2704 case AVCOL_SPC_BT2020_NCL:
2705 vid->color_space = BC_COLORS_BT2020_NCL;
2707 case AVCOL_SPC_BT2020_CL:
2708 vid->color_space = BC_COLORS_BT2020_CL;
2711 vid->color_space = !file_base ? BC_COLORS_BT601_NTSC :
2712 file_base->file->preferences->yuv_color_space;
2715 double secs = to_secs(st->duration, st->time_base);
2716 vid->length = secs * vid->frame_rate;
2717 vid->aspect_ratio = (double)st->sample_aspect_ratio.num / st->sample_aspect_ratio.den;
2718 vid->nudge = st->start_time;
2720 ret = vid->create_filter(opt_video_filter);
2722 case AVMEDIA_TYPE_AUDIO: {
2723 if( avpar->channels < 1 ) continue;
2724 if( avpar->sample_rate < 1 ) continue;
2726 int aidx = ffaudio.size();
2727 FFAudioStream *aud = new FFAudioStream(this, st, aidx, i);
2728 ffaudio.append(aud);
2729 aud->channel0 = astrm_index.size();
2730 aud->channels = avpar->channels;
2731 for( int ch=0; ch<aud->channels; ++ch )
2732 astrm_index.append(ffidx(aidx, ch));
2733 aud->sample_rate = avpar->sample_rate;
2734 double secs = to_secs(st->duration, st->time_base);
2735 aud->length = secs * aud->sample_rate;
2736 aud->init_swr(aud->channels, avpar->format, aud->sample_rate);
2737 aud->nudge = st->start_time;
2739 ret = aud->create_filter(opt_audio_filter);
2744 if( bad_time && !(fflags & FF_BAD_TIMES) ) {
2745 fflags |= FF_BAD_TIMES;
2746 printf(_("FFMPEG::open_decoder: some stream have bad times: %s\n"),
2750 return ret < 0 ? -1 : 0;
2754 int FFMPEG::init_encoder(const char *filename)
2756 // try access first for named pipes
2757 int ret = access(filename, W_OK);
2759 int fd = ::open(filename,O_WRONLY);
2760 if( fd < 0 ) fd = open(filename,O_WRONLY+O_CREAT,0666);
2761 if( fd >= 0 ) { close(fd); ret = 0; }
2764 eprintf(_("bad file path: %s\n"), filename);
2767 ret = get_file_format();
2769 eprintf(_("bad file format: %s\n"), filename);
2773 eprintf(_("mismatch audio/video file format: %s\n"), filename);
2776 ff_lock("FFMPEG::init_encoder");
2778 char format[BCSTRLEN];
2779 if( get_format(format, "format", file_format) )
2780 strcpy(format, file_format);
2781 avformat_alloc_output_context2(&fmt_ctx, 0, format, filename);
2783 eprintf(_("failed: %s\n"), filename);
2788 load_options("encode.opts", opts);
2794 int FFMPEG::open_encoder(const char *type, const char *spec)
2797 Asset *asset = file_base->asset;
2798 char *filename = asset->path;
2799 AVDictionary *sopts = 0;
2800 av_dict_copy(&sopts, opts, 0);
2801 char option_path[BCTEXTLEN];
2802 set_option_path(option_path, "%s/%s.opts", type, type);
2803 read_options(option_path, sopts);
2804 get_option_path(option_path, type, spec);
2805 char format_name[BCSTRLEN], codec_name[BCTEXTLEN], bsfilter[BCTEXTLEN];
2806 if( get_encoder(option_path, format_name, codec_name, bsfilter) ) {
2807 eprintf(_("get_encoder failed %s:%s\n"), option_path, filename);
2812 if( !strcmp(codec_name, CODEC_TAG_DVSD) ) strcpy(codec_name, "dv");
2814 else if( !strcmp(codec_name, CODEC_TAG_MJPEG) ) strcpy(codec_name, "mjpeg");
2815 else if( !strcmp(codec_name, CODEC_TAG_JPEG) ) strcpy(codec_name, "jpeg");
2818 ff_lock("FFMPEG::open_encoder");
2821 AVCodecContext *ctx = 0;
2823 const AVCodecDescriptor *codec_desc = 0;
2824 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
2825 const AVCodec *codec = avcodec_find_encoder_by_name(codec_name);
2827 AVCodec *codec = avcodec_find_encoder_by_name(codec_name);
2830 eprintf(_("cant find codec %s:%s\n"), codec_name, filename);
2834 codec_desc = avcodec_descriptor_get(codec->id);
2836 eprintf(_("unknown codec %s:%s\n"), codec_name, filename);
2841 st = avformat_new_stream(fmt_ctx, 0);
2843 eprintf(_("cant create stream %s:%s\n"), codec_name, filename);
2848 switch( codec_desc->type ) {
2849 case AVMEDIA_TYPE_AUDIO: {
2851 eprintf(_("duplicate audio %s:%s\n"), codec_name, filename);
2855 if( scan_options(asset->ff_audio_options, sopts, st) ) {
2856 eprintf(_("bad audio options %s:%s\n"), codec_name, filename);
2861 ctx = avcodec_alloc_context3(codec);
2862 if( asset->ff_audio_bitrate > 0 ) {
2863 ctx->bit_rate = asset->ff_audio_bitrate;
2865 sprintf(arg, "%d", asset->ff_audio_bitrate);
2866 av_dict_set(&sopts, "b", arg, 0);
2868 else if( asset->ff_audio_quality >= 0 ) {
2869 ctx->global_quality = asset->ff_audio_quality * FF_QP2LAMBDA;
2870 ctx->qmin = ctx->qmax = asset->ff_audio_quality;
2871 ctx->mb_lmin = ctx->qmin * FF_QP2LAMBDA;
2872 ctx->mb_lmax = ctx->qmax * FF_QP2LAMBDA;
2873 ctx->flags |= AV_CODEC_FLAG_QSCALE;
2875 av_dict_set(&sopts, "flags", "+qscale", 0);
2876 sprintf(arg, "%d", asset->ff_audio_quality);
2877 av_dict_set(&sopts, "qscale", arg, 0);
2878 sprintf(arg, "%d", ctx->global_quality);
2879 av_dict_set(&sopts, "global_quality", arg, 0);
2881 int aidx = ffaudio.size();
2882 int fidx = aidx + ffvideo.size();
2883 FFAudioStream *aud = new FFAudioStream(this, st, aidx, fidx);
2884 aud->avctx = ctx; ffaudio.append(aud); fst = aud;
2885 aud->sample_rate = asset->sample_rate;
2886 ctx->channels = aud->channels = asset->channels;
2887 for( int ch=0; ch<aud->channels; ++ch )
2888 astrm_index.append(ffidx(aidx, ch));
2889 ctx->channel_layout = av_get_default_channel_layout(ctx->channels);
2890 ctx->sample_rate = check_sample_rate(codec, asset->sample_rate);
2891 if( !ctx->sample_rate ) {
2892 eprintf(_("check_sample_rate failed %s\n"), filename);
2896 ctx->time_base = st->time_base = (AVRational){1, aud->sample_rate};
2897 AVSampleFormat sample_fmt = av_get_sample_fmt(asset->ff_sample_format);
2898 if( sample_fmt == AV_SAMPLE_FMT_NONE )
2899 sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_S16;
2900 ctx->sample_fmt = sample_fmt;
2901 uint64_t layout = av_get_default_channel_layout(ctx->channels);
2902 aud->resample_context = swr_alloc_set_opts(NULL,
2903 layout, ctx->sample_fmt, aud->sample_rate,
2904 layout, AV_SAMPLE_FMT_FLT, ctx->sample_rate,
2906 swr_init(aud->resample_context);
2909 case AVMEDIA_TYPE_VIDEO: {
2911 eprintf(_("duplicate video %s:%s\n"), codec_name, filename);
2915 if( scan_options(asset->ff_video_options, sopts, st) ) {
2916 eprintf(_("bad video options %s:%s\n"), codec_name, filename);
2921 ctx = avcodec_alloc_context3(codec);
2922 if( asset->ff_video_bitrate > 0 ) {
2923 ctx->bit_rate = asset->ff_video_bitrate;
2925 sprintf(arg, "%d", asset->ff_video_bitrate);
2926 av_dict_set(&sopts, "b", arg, 0);
2928 else if( asset->ff_video_quality >= 0 ) {
2929 ctx->global_quality = asset->ff_video_quality * FF_QP2LAMBDA;
2930 ctx->qmin = ctx->qmax = asset->ff_video_quality;
2931 ctx->mb_lmin = ctx->qmin * FF_QP2LAMBDA;
2932 ctx->mb_lmax = ctx->qmax * FF_QP2LAMBDA;
2933 ctx->flags |= AV_CODEC_FLAG_QSCALE;
2935 av_dict_set(&sopts, "flags", "+qscale", 0);
2936 sprintf(arg, "%d", asset->ff_video_quality);
2937 av_dict_set(&sopts, "qscale", arg, 0);
2938 sprintf(arg, "%d", ctx->global_quality);
2939 av_dict_set(&sopts, "global_quality", arg, 0);
2941 int vidx = ffvideo.size();
2942 int fidx = vidx + ffaudio.size();
2943 FFVideoStream *vid = new FFVideoStream(this, st, vidx, fidx);
2944 vstrm_index.append(ffidx(vidx, 0));
2945 vid->avctx = ctx; ffvideo.append(vid); fst = vid;
2946 vid->width = asset->width;
2947 vid->height = asset->height;
2948 vid->frame_rate = asset->frame_rate;
2949 if( (vid->color_range = asset->ff_color_range) < 0 )
2950 vid->color_range = file_base->file->preferences->yuv_color_range;
2951 switch( vid->color_range ) {
2952 case BC_COLORS_MPEG: ctx->color_range = AVCOL_RANGE_MPEG; break;
2953 case BC_COLORS_JPEG: ctx->color_range = AVCOL_RANGE_JPEG; break;
2955 if( (vid->color_space = asset->ff_color_space) < 0 )
2956 vid->color_space = file_base->file->preferences->yuv_color_space;
2957 switch( vid->color_space ) {
2958 case BC_COLORS_BT601_NTSC: ctx->colorspace = AVCOL_SPC_SMPTE170M; break;
2959 case BC_COLORS_BT601_PAL: ctx->colorspace = AVCOL_SPC_BT470BG; break;
2960 case BC_COLORS_BT709: ctx->colorspace = AVCOL_SPC_BT709; break;
2961 case BC_COLORS_BT2020_NCL: ctx->colorspace = AVCOL_SPC_BT2020_NCL; break;
2962 case BC_COLORS_BT2020_CL: ctx->colorspace = AVCOL_SPC_BT2020_CL; break;
2964 AVPixelFormat pix_fmt = av_get_pix_fmt(asset->ff_pixel_format);
2965 if( opt_hw_dev != 0 ) {
2966 AVHWDeviceType hw_type = vid->encode_hw_activate(opt_hw_dev);
2968 case AV_HWDEVICE_TYPE_VAAPI:
2969 pix_fmt = AV_PIX_FMT_VAAPI;
2971 case AV_HWDEVICE_TYPE_NONE:
2975 if( pix_fmt == AV_PIX_FMT_NONE )
2976 pix_fmt = codec->pix_fmts ? codec->pix_fmts[0] : AV_PIX_FMT_YUV420P;
2977 ctx->pix_fmt = pix_fmt;
2979 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
2980 int mask_w = (1<<desc->log2_chroma_w)-1;
2981 ctx->width = (vid->width+mask_w) & ~mask_w;
2982 int mask_h = (1<<desc->log2_chroma_h)-1;
2983 ctx->height = (vid->height+mask_h) & ~mask_h;
2984 ctx->sample_aspect_ratio = to_sample_aspect_ratio(asset);
2985 AVRational frame_rate;
2986 if (ctx->codec->id == AV_CODEC_ID_MPEG1VIDEO ||
2987 ctx->codec->id == AV_CODEC_ID_MPEG2VIDEO)
2988 frame_rate = check_frame_rate(codec->supported_framerates, vid->frame_rate);
2990 frame_rate = av_d2q(vid->frame_rate, INT_MAX);
2991 if( !frame_rate.num || !frame_rate.den ) {
2992 eprintf(_("check_frame_rate failed %s\n"), filename);
2996 av_reduce(&frame_rate.num, &frame_rate.den,
2997 frame_rate.num, frame_rate.den, INT_MAX);
2998 ctx->framerate = (AVRational) { frame_rate.num, frame_rate.den };
2999 ctx->time_base = (AVRational) { frame_rate.den, frame_rate.num };
3000 st->avg_frame_rate = frame_rate;
3001 st->time_base = ctx->time_base;
3003 vid->interlaced = asset->interlace_mode == ILACE_MODE_TOP_FIRST ||
3004 asset->interlace_mode == ILACE_MODE_BOTTOM_FIRST ? 1 : 0;
3005 vid->top_field_first = asset->interlace_mode == ILACE_MODE_TOP_FIRST ? 1 : 0;
3006 switch (asset->interlace_mode) {
3007 case ILACE_MODE_TOP_FIRST:
3008 if (ctx->codec->id == AV_CODEC_ID_MJPEG)
3009 av_dict_set(&sopts, "field_order", "tt", 0);
3011 av_dict_set(&sopts, "field_order", "tb", 0);
3012 if (ctx->codec_id != AV_CODEC_ID_MJPEG)
3013 av_dict_set(&sopts, "flags", "+ilme+ildct", 0);
3015 case ILACE_MODE_BOTTOM_FIRST:
3016 if (ctx->codec->id == AV_CODEC_ID_MJPEG)
3017 av_dict_set(&sopts, "field_order", "bb", 0);
3019 av_dict_set(&sopts, "field_order", "bt", 0);
3020 if (ctx->codec_id != AV_CODEC_ID_MJPEG)
3021 av_dict_set(&sopts, "flags", "+ilme+ildct", 0);
3023 case ILACE_MODE_NOTINTERLACED: av_dict_set(&sopts, "field_order", "progressive", 0); break;
3027 eprintf(_("not audio/video, %s:%s\n"), codec_name, filename);
3032 AVDictionaryEntry *tag;
3033 if( (tag=av_dict_get(sopts, "cin_stats_filename", NULL, 0)) != 0 ) {
3034 char suffix[BCSTRLEN]; sprintf(suffix,"-%d.log",fst->fidx);
3035 fst->stats_filename = cstrcat(2, tag->value, suffix);
3037 if( (tag=av_dict_get(sopts, "flags", NULL, 0)) != 0 ) {
3038 int pass = fst->pass;
3039 char *cp = tag->value;
3041 int ch = *cp++, pfx = ch=='-' ? -1 : ch=='+' ? 1 : 0;
3042 if( !isalnum(!pfx ? ch : (ch=*cp++)) ) continue;
3043 char id[BCSTRLEN], *bp = id, *ep = bp+sizeof(id)-1;
3044 for( *bp++=ch; isalnum(ch=*cp); ++cp )
3045 if( bp < ep ) *bp++ = ch;
3047 if( !strcmp(id, "pass1") ) {
3048 pass = pfx<0 ? (pass&~1) : pfx>0 ? (pass|1) : 1;
3050 else if( !strcmp(id, "pass2") ) {
3051 pass = pfx<0 ? (pass&~2) : pfx>0 ? (pass|2) : 2;
3054 if( (fst->pass=pass) ) {
3055 if( pass & 1 ) ctx->flags |= AV_CODEC_FLAG_PASS1;
3056 if( pass & 2 ) ctx->flags |= AV_CODEC_FLAG_PASS2;
3062 if( fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER )
3063 ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
3064 if( fst->stats_filename && (ret=fst->init_stats_file()) )
3065 eprintf(_("error: stats file = %s\n"), fst->stats_filename);
3068 av_dict_set(&sopts, "cin_bitrate", 0, 0);
3069 av_dict_set(&sopts, "cin_quality", 0, 0);
3071 if( !av_dict_get(sopts, "threads", NULL, 0) )
3072 ctx->thread_count = ff_cpus();
3073 ret = avcodec_open2(ctx, codec, &sopts);
3075 ret = avcodec_parameters_from_context(st->codecpar, ctx);
3077 fprintf(stderr, "Could not copy the stream parameters\n");
3080 _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
3081 #if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(58,134,100)
3082 ret = avcodec_copy_context(st->codec, ctx);
3084 ret = avcodec_parameters_to_context(ctx, st->codecpar);
3086 _Pragma("GCC diagnostic warning \"-Wdeprecated-declarations\"")
3088 fprintf(stderr, "Could not copy the stream context\n");
3091 ff_err(ret,"FFMPEG::open_encoder");
3092 eprintf(_("open failed %s:%s\n"), codec_name, filename);
3098 if( !ret && fst && bsfilter[0] ) {
3099 ret = av_bsf_list_parse_str(bsfilter, &fst->bsfc);
3101 ff_err(ret,"FFMPEG::open_encoder");
3102 eprintf(_("bitstream filter failed %s:\n%s\n"), filename, bsfilter);
3113 av_dict_free(&sopts);
3117 int FFMPEG::close_encoder()
3120 if( encoding > 0 ) {
3121 av_write_trailer(fmt_ctx);
3122 if( !(fmt_ctx->flags & AVFMT_NOFILE) )
3123 avio_closep(&fmt_ctx->pb);
3129 int FFMPEG::decode_activate()
3131 if( decoding < 0 ) {
3133 for( int vidx=0; vidx<ffvideo.size(); ++vidx )
3134 ffvideo[vidx]->nudge = AV_NOPTS_VALUE;
3135 for( int aidx=0; aidx<ffaudio.size(); ++aidx )
3136 ffaudio[aidx]->nudge = AV_NOPTS_VALUE;
3137 // set nudges for each program stream set
3138 const int64_t min_nudge = INT64_MIN+1;
3139 int npgrms = fmt_ctx->nb_programs;
3140 for( int i=0; i<npgrms; ++i ) {
3141 AVProgram *pgrm = fmt_ctx->programs[i];
3142 // first start time video stream
3143 int64_t vstart_time = min_nudge, astart_time = min_nudge;
3144 for( int j=0; j<(int)pgrm->nb_stream_indexes; ++j ) {
3145 int fidx = pgrm->stream_index[j];
3146 AVStream *st = fmt_ctx->streams[fidx];
3147 AVCodecParameters *avpar = st->codecpar;
3148 if( avpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
3149 if( st->start_time == AV_NOPTS_VALUE ) continue;
3150 if( vstart_time < st->start_time )
3151 vstart_time = st->start_time;
3154 if( avpar->codec_type == AVMEDIA_TYPE_AUDIO ) {
3155 if( st->start_time == AV_NOPTS_VALUE ) continue;
3156 if( astart_time < st->start_time )
3157 astart_time = st->start_time;
3161 //since frame rate is much more grainy than sample rate, it is better to
3162 // align using video, so that total absolute error is minimized.
3163 int64_t nudge = vstart_time > min_nudge ? vstart_time :
3164 astart_time > min_nudge ? astart_time : AV_NOPTS_VALUE;
3165 for( int j=0; j<(int)pgrm->nb_stream_indexes; ++j ) {
3166 int fidx = pgrm->stream_index[j];
3167 AVStream *st = fmt_ctx->streams[fidx];
3168 AVCodecParameters *avpar = st->codecpar;
3169 if( avpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
3170 for( int k=0; k<ffvideo.size(); ++k ) {
3171 if( ffvideo[k]->fidx != fidx ) continue;
3172 ffvideo[k]->nudge = nudge;
3176 if( avpar->codec_type == AVMEDIA_TYPE_AUDIO ) {
3177 for( int k=0; k<ffaudio.size(); ++k ) {
3178 if( ffaudio[k]->fidx != fidx ) continue;
3179 ffaudio[k]->nudge = nudge;
3185 // set nudges for any streams not yet set
3186 int64_t vstart_time = min_nudge, astart_time = min_nudge;
3187 int nstreams = fmt_ctx->nb_streams;
3188 for( int i=0; i<nstreams; ++i ) {
3189 AVStream *st = fmt_ctx->streams[i];
3190 AVCodecParameters *avpar = st->codecpar;
3191 switch( avpar->codec_type ) {
3192 case AVMEDIA_TYPE_VIDEO: {
3193 if( st->start_time == AV_NOPTS_VALUE ) continue;
3194 int vidx = ffvideo.size();
3195 while( --vidx >= 0 && ffvideo[vidx]->fidx != i );
3196 if( vidx < 0 ) continue;
3197 if( ffvideo[vidx]->nudge != AV_NOPTS_VALUE ) continue;
3198 if( vstart_time < st->start_time )
3199 vstart_time = st->start_time;
3201 case AVMEDIA_TYPE_AUDIO: {
3202 if( st->start_time == AV_NOPTS_VALUE ) continue;
3203 int aidx = ffaudio.size();
3204 while( --aidx >= 0 && ffaudio[aidx]->fidx != i );
3205 if( aidx < 0 ) continue;
3206 if( ffaudio[aidx]->frame_sz < avpar->frame_size )
3207 ffaudio[aidx]->frame_sz = avpar->frame_size;
3208 if( ffaudio[aidx]->nudge != AV_NOPTS_VALUE ) continue;
3209 if( astart_time < st->start_time )
3210 astart_time = st->start_time;
3215 int64_t nudge = vstart_time > min_nudge ? vstart_time :
3216 astart_time > min_nudge ? astart_time : 0;
3217 for( int vidx=0; vidx<ffvideo.size(); ++vidx ) {
3218 if( ffvideo[vidx]->nudge == AV_NOPTS_VALUE )
3219 ffvideo[vidx]->nudge = nudge;
3221 for( int aidx=0; aidx<ffaudio.size(); ++aidx ) {
3222 if( ffaudio[aidx]->nudge == AV_NOPTS_VALUE )
3223 ffaudio[aidx]->nudge = nudge;
3230 int FFMPEG::encode_activate()
3233 if( encoding < 0 ) {
3235 if( !(fmt_ctx->flags & AVFMT_NOFILE) &&
3236 (ret=avio_open(&fmt_ctx->pb, fmt_ctx->url, AVIO_FLAG_WRITE)) < 0 ) {
3237 ff_err(ret, "FFMPEG::encode_activate: err opening : %s\n",
3241 if( !strcmp(file_format, "image2") ) {
3242 Asset *asset = file_base->asset;
3243 const char *filename = asset->path;
3244 FILE *fp = fopen(filename,"w");
3246 eprintf(_("Cant write image2 header file: %s\n %m"), filename);
3249 fprintf(fp, "IMAGE2\n");
3250 fprintf(fp, "# Frame rate: %f\n", asset->frame_rate);
3251 fprintf(fp, "# Width: %d\n", asset->width);
3252 fprintf(fp, "# Height: %d\n", asset->height);
3256 AVProgram *prog = av_new_program(fmt_ctx, prog_id);
3257 for( int i=0; i< ffvideo.size(); ++i )
3258 av_program_add_stream_index(fmt_ctx, prog_id, ffvideo[i]->fidx);
3259 for( int i=0; i< ffaudio.size(); ++i )
3260 av_program_add_stream_index(fmt_ctx, prog_id, ffaudio[i]->fidx);
3261 int pi = fmt_ctx->nb_programs;
3262 while( --pi >= 0 && fmt_ctx->programs[pi]->id != prog_id );
3263 AVDictionary **meta = &prog->metadata;
3264 av_dict_set(meta, "service_provider", "cin5", 0);
3265 const char *path = fmt_ctx->url, *bp = strrchr(path,'/');
3266 if( bp ) path = bp + 1;
3267 av_dict_set(meta, "title", path, 0);
3269 if( ffaudio.size() ) {
3270 const char *ep = getenv("CIN_AUDIO_LANG"), *lp = 0;
3271 if( !ep && (lp=getenv("LANG")) ) { // some are guesses
3272 static struct { const char lc[3], lng[4]; } lcode[] = {
3273 { "en", "eng" }, { "de", "ger" }, { "es", "spa" },
3274 { "eu", "bas" }, { "fr", "fre" }, { "el", "gre" },
3275 { "hi", "hin" }, { "it", "ita" }, { "ja", "jap" },
3276 { "ko", "kor" }, { "du", "dut" }, { "pl", "pol" },
3277 { "pt", "por" }, { "ru", "rus" }, { "sl", "slv" },
3278 { "uk", "ukr" }, { "vi", "vie" }, { "zh", "chi" },
3280 for( int i=sizeof(lcode)/sizeof(lcode[0]); --i>=0 && !ep; )
3281 if( !strncmp(lcode[i].lc,lp,2) ) ep = lcode[i].lng;
3283 if( !ep ) ep = "und";
3285 strncpy(lang,ep,3); lang[3] = 0;
3286 AVStream *st = ffaudio[0]->st;
3287 av_dict_set(&st->metadata,"language",lang,0);
3290 AVDictionary *fopts = 0;
3291 char option_path[BCTEXTLEN];
3292 set_option_path(option_path, "format/%s", file_format);
3293 read_options(option_path, fopts, 1);
3294 av_dict_copy(&fopts, opts, 0);
3295 if( scan_options(file_base->asset->ff_format_options, fopts, 0) ) {
3296 eprintf(_("bad format options %s\n"), file_base->asset->path);
3300 ret = avformat_write_header(fmt_ctx, &fopts);
3302 ff_err(ret, "FFMPEG::encode_activate: write header failed %s\n",
3306 av_dict_free(&fopts);
3313 int FFMPEG::audio_seek(int stream, int64_t pos)
3315 int aidx = astrm_index[stream].st_idx;
3316 FFAudioStream *aud = ffaudio[aidx];
3317 aud->audio_seek(pos);
3321 int FFMPEG::video_probe(int64_t pos)
3323 int vidx = vstrm_index[0].st_idx;
3324 FFVideoStream *vid = ffvideo[vidx];
3327 int interlace1 = interlace_from_codec;
3328 //printf("interlace from codec: %i\n", interlace1);
3334 return ILACE_MODE_TOP_FIRST;
3337 return ILACE_MODE_BOTTOM_FIRST;
3338 case AV_FIELD_PROGRESSIVE:
3339 return ILACE_MODE_NOTINTERLACED;
3341 return ILACE_MODE_UNDETECTED;
3348 int FFMPEG::video_seek(int stream, int64_t pos)
3350 int vidx = vstrm_index[stream].st_idx;
3351 FFVideoStream *vid = ffvideo[vidx];
3352 vid->video_seek(pos);
3357 int FFMPEG::decode(int chn, int64_t pos, double *samples, int len)
3359 if( !has_audio || chn >= astrm_index.size() ) return -1;
3360 int aidx = astrm_index[chn].st_idx;
3361 FFAudioStream *aud = ffaudio[aidx];
3362 if( aud->load(pos, len) < len ) return -1;
3363 int ch = astrm_index[chn].st_ch;
3364 int ret = aud->read(samples,len,ch);
3368 int FFMPEG::decode(int layer, int64_t pos, VFrame *vframe)
3370 if( !has_video || layer >= vstrm_index.size() ) return -1;
3371 int vidx = vstrm_index[layer].st_idx;
3372 FFVideoStream *vid = ffvideo[vidx];
3373 return vid->load(vframe, pos);
3377 int FFMPEG::encode(int stream, double **samples, int len)
3379 FFAudioStream *aud = ffaudio[stream];
3380 return aud->encode(samples, len);
3384 int FFMPEG::encode(int stream, VFrame *frame)
3386 FFVideoStream *vid = ffvideo[stream];
3387 return vid->encode(frame);
3390 void FFMPEG::start_muxer()
3398 void FFMPEG::stop_muxer()
3407 void FFMPEG::flow_off()
3410 flow_lock->lock("FFMPEG::flow_off");
3414 void FFMPEG::flow_on()
3418 flow_lock->unlock();
3421 void FFMPEG::flow_ctl()
3424 flow_lock->lock("FFMPEG::flow_ctl");
3425 flow_lock->unlock();
3429 int FFMPEG::mux_audio(FFrame *frm)
3431 FFStream *fst = frm->fst;
3432 AVCodecContext *ctx = fst->avctx;
3433 AVFrame *frame = *frm;
3434 AVRational tick_rate = {1, ctx->sample_rate};
3435 frame->pts = av_rescale_q(frm->position, tick_rate, ctx->time_base);
3436 int ret = fst->encode_frame(frame);
3438 ff_err(ret, "FFMPEG::mux_audio");
3439 return ret >= 0 ? 0 : 1;
3442 int FFMPEG::mux_video(FFrame *frm)
3444 FFStream *fst = frm->fst;
3445 AVFrame *frame = *frm;
3446 frame->pts = frm->position;
3447 int ret = fst->encode_frame(frame);
3449 ff_err(ret, "FFMPEG::mux_video");
3450 return ret >= 0 ? 0 : 1;
3456 double atm = -1, vtm = -1;
3457 FFrame *afrm = 0, *vfrm = 0;
3459 for( int i=0; i<ffaudio.size(); ++i ) { // earliest audio
3460 FFStream *fst = ffaudio[i];
3461 if( fst->frm_count < 3 ) { demand = 1; flow_on(); }
3462 FFrame *frm = fst->frms.first;
3463 if( !frm ) { if( !done ) return; continue; }
3464 double tm = to_secs(frm->position, fst->avctx->time_base);
3465 if( atm < 0 || tm < atm ) { atm = tm; afrm = frm; }
3467 for( int i=0; i<ffvideo.size(); ++i ) { // earliest video
3468 FFStream *fst = ffvideo[i];
3469 if( fst->frm_count < 2 ) { demand = 1; flow_on(); }
3470 FFrame *frm = fst->frms.first;
3471 if( !frm ) { if( !done ) return; continue; }
3472 double tm = to_secs(frm->position, fst->avctx->time_base);
3473 if( vtm < 0 || tm < vtm ) { vtm = tm; vfrm = frm; }
3475 if( !demand ) flow_off();
3476 if( !afrm && !vfrm ) break;
3477 int v = !afrm ? -1 : !vfrm ? 1 : av_compare_ts(
3478 vfrm->position, vfrm->fst->avctx->time_base,
3479 afrm->position, afrm->fst->avctx->time_base);
3480 FFrame *frm = v <= 0 ? vfrm : afrm;
3481 if( frm == afrm ) mux_audio(frm);
3482 if( frm == vfrm ) mux_video(frm);
3491 mux_lock->lock("FFMPEG::run");
3494 for( int i=0; i<ffaudio.size(); ++i )
3495 ffaudio[i]->drain();
3496 for( int i=0; i<ffvideo.size(); ++i )
3497 ffvideo[i]->drain();
3499 for( int i=0; i<ffaudio.size(); ++i )
3500 ffaudio[i]->flush();
3501 for( int i=0; i<ffvideo.size(); ++i )
3502 ffvideo[i]->flush();
3506 int FFMPEG::ff_total_audio_channels()
3508 return astrm_index.size();
3511 int FFMPEG::ff_total_astreams()
3513 return ffaudio.size();
3516 int FFMPEG::ff_audio_channels(int stream)
3518 return ffaudio[stream]->channels;
3521 int FFMPEG::ff_sample_rate(int stream)
3523 return ffaudio[stream]->sample_rate;
3526 const char* FFMPEG::ff_audio_format(int stream)
3528 AVStream *st = ffaudio[stream]->st;
3529 AVCodecID id = st->codecpar->codec_id;
3530 const AVCodecDescriptor *desc = avcodec_descriptor_get(id);
3531 return desc ? desc->name : _("Unknown");
3534 int FFMPEG::ff_audio_pid(int stream)
3536 return ffaudio[stream]->st->id;
3539 int64_t FFMPEG::ff_audio_samples(int stream)
3541 return ffaudio[stream]->length;
3544 // find audio astream/channels with this program,
3545 // or all program audio channels (astream=-1)
3546 int FFMPEG::ff_audio_for_video(int vstream, int astream, int64_t &channel_mask)
3550 int vidx = ffvideo[vstream]->fidx;
3551 // find first program with this video stream
3552 for( int i=0; pidx<0 && i<(int)fmt_ctx->nb_programs; ++i ) {
3553 AVProgram *pgrm = fmt_ctx->programs[i];
3554 for( int j=0; pidx<0 && j<(int)pgrm->nb_stream_indexes; ++j ) {
3555 int st_idx = pgrm->stream_index[j];
3556 AVStream *st = fmt_ctx->streams[st_idx];
3557 if( st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ) continue;
3558 if( st_idx == vidx ) pidx = i;
3561 if( pidx < 0 ) return -1;
3563 int64_t channels = 0;
3564 AVProgram *pgrm = fmt_ctx->programs[pidx];
3565 for( int j=0; j<(int)pgrm->nb_stream_indexes; ++j ) {
3566 int aidx = pgrm->stream_index[j];
3567 AVStream *st = fmt_ctx->streams[aidx];
3568 if( st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO ) continue;
3569 if( astream > 0 ) { --astream; continue; }
3571 for( int i=0; astrm<0 && i<ffaudio.size(); ++i )
3572 if( ffaudio[i]->fidx == aidx ) astrm = i;
3574 if( ret < 0 ) ret = astrm;
3575 int64_t mask = (1 << ffaudio[astrm]->channels) - 1;
3576 channels |= mask << ffaudio[astrm]->channel0;
3578 if( !astream ) break;
3580 channel_mask = channels;
3585 int FFMPEG::ff_total_video_layers()
3587 return vstrm_index.size();
3590 int FFMPEG::ff_total_vstreams()
3592 return ffvideo.size();
3595 int FFMPEG::ff_video_width(int stream)
3597 FFVideoStream *vst = ffvideo[stream];
3598 return !vst->transpose ? vst->width : vst->height;
3601 int FFMPEG::ff_video_height(int stream)
3603 FFVideoStream *vst = ffvideo[stream];
3604 return !vst->transpose ? vst->height : vst->width;
3607 int FFMPEG::ff_set_video_width(int stream, int width)
3609 FFVideoStream *vst = ffvideo[stream];
3610 int *vw = !vst->transpose ? &vst->width : &vst->height, w = *vw;
3615 int FFMPEG::ff_set_video_height(int stream, int height)
3617 FFVideoStream *vst = ffvideo[stream];
3618 int *vh = !vst->transpose ? &vst->height : &vst->width, h = *vh;
3623 int FFMPEG::ff_coded_width(int stream)
3625 return ffvideo[stream]->avctx->coded_width;
3628 int FFMPEG::ff_coded_height(int stream)
3630 return ffvideo[stream]->avctx->coded_height;
3633 float FFMPEG::ff_aspect_ratio(int stream)
3635 //return ffvideo[stream]->aspect_ratio;
3636 AVFormatContext *fmt_ctx = ffvideo[stream]->fmt_ctx;
3637 AVStream *strm = ffvideo[stream]->st;
3638 AVCodecParameters *par = ffvideo[stream]->st->codecpar;
3640 AVRational sar = av_guess_sample_aspect_ratio(fmt_ctx, strm, NULL);
3642 av_reduce(&dar.num, &dar.den,
3643 par->width * sar.num,
3644 par->height * sar.den,
3648 return ffvideo[stream]->aspect_ratio;
3651 const char* FFMPEG::ff_video_codec(int stream)
3653 AVStream *st = ffvideo[stream]->st;
3654 AVCodecID id = st->codecpar->codec_id;
3655 const AVCodecDescriptor *desc = avcodec_descriptor_get(id);
3656 return desc ? desc->name : _("Unknown");
3659 int FFMPEG::ff_color_range(int stream)
3661 return ffvideo[stream]->color_range;
3664 int FFMPEG::ff_color_space(int stream)
3666 return ffvideo[stream]->color_space;
3669 double FFMPEG::ff_frame_rate(int stream)
3671 return ffvideo[stream]->frame_rate;
3674 int64_t FFMPEG::ff_video_frames(int stream)
3676 return ffvideo[stream]->length;
3679 int FFMPEG::ff_video_pid(int stream)
3681 return ffvideo[stream]->st->id;
3684 int FFMPEG::ff_video_mpeg_color_range(int stream)
3686 return ffvideo[stream]->st->codecpar->color_range == AVCOL_RANGE_MPEG ? 1 : 0;
3689 int FFMPEG::ff_interlace(int stream)
3691 // https://ffmpeg.org/doxygen/trunk/structAVCodecParserContext.html
3692 /* reads from demuxer because codec frame not ready */
3693 int interlace0 = ffvideo[stream]->st->codecpar->field_order;
3699 return ILACE_MODE_TOP_FIRST;
3702 return ILACE_MODE_BOTTOM_FIRST;
3703 case AV_FIELD_PROGRESSIVE:
3704 return ILACE_MODE_NOTINTERLACED;
3706 return ILACE_MODE_UNDETECTED;
3713 int FFMPEG::ff_cpus()
3715 return !file_base ? 1 : file_base->file->cpus;
3718 const char *FFMPEG::ff_hw_dev()
3720 return &file_base->file->preferences->use_hw_dev[0];
3723 Preferences *FFMPEG::ff_prefs()
3725 return !file_base ? 0 : file_base->file->preferences;
3728 double FFVideoStream::get_rotation_angle()
3730 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
3735 int *matrix = (int*)av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, &size);
3736 int len = size/sizeof(*matrix);
3737 if( !matrix || len < 5 ) return 0;
3738 const double s = 1/65536.;
3739 double theta = (!matrix[0] && !matrix[3]) || (!matrix[1] && !matrix[4]) ? 0 :
3740 atan2( s*matrix[1] / hypot(s*matrix[1], s*matrix[4]),
3741 s*matrix[0] / hypot(s*matrix[0], s*matrix[3])) * 180/M_PI;
3745 int FFVideoStream::flip(double theta)
3749 Preferences *preferences = ffmpeg->ff_prefs();
3750 if( !preferences || !preferences->auto_rotate ) return ret;
3751 double tolerance = 1;
3752 if( fabs(theta-0) < tolerance ) return ret;
3753 if( (theta=fmod(theta, 360)) < 0 ) theta += 360;
3754 if( fabs(theta-90) < tolerance ) {
3755 if( (ret = insert_filter("transpose", "clock")) < 0 )
3759 else if( fabs(theta-180) < tolerance ) {
3760 if( (ret=insert_filter("hflip", 0)) < 0 )
3762 if( (ret=insert_filter("vflip", 0)) < 0 )
3765 else if (fabs(theta-270) < tolerance ) {
3766 if( (ret=insert_filter("transpose", "cclock")) < 0 )
3771 char angle[BCSTRLEN];
3772 sprintf(angle, "%f", theta*M_PI/180.);
3773 if( (ret=insert_filter("rotate", angle)) < 0 )
3779 int FFVideoStream::create_filter(const char *filter_spec)
3781 double theta = get_rotation_angle();
3782 if( !theta && !filter_spec )
3784 avfilter_register_all();
3786 const char *sp = filter_spec;
3787 char filter_name[BCSTRLEN], *np = filter_name;
3788 int i = sizeof(filter_name);
3789 while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
3791 const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
3792 if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) {
3793 ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec);
3797 AVCodecParameters *avpar = st->codecpar;
3798 int sa_num = avpar->sample_aspect_ratio.num;
3799 if( !sa_num ) sa_num = 1;
3800 int sa_den = avpar->sample_aspect_ratio.den;
3801 if( !sa_den ) sa_num = 1;
3803 int ret = 0; char args[BCTEXTLEN];
3804 AVPixelFormat pix_fmt = (AVPixelFormat)avpar->format;
3805 snprintf(args, sizeof(args),
3806 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
3807 avpar->width, avpar->height, (int)pix_fmt,
3808 st->time_base.num, st->time_base.den, sa_num, sa_den);
3811 ret = insert_filter("buffer", args, "in");
3812 buffersrc_ctx = filt_ctx;
3816 AVFilterContext *fsrc = filt_ctx;
3819 ret = insert_filter("buffersink", 0, "out");
3820 buffersink_ctx = filt_ctx;
3823 ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
3824 (uint8_t*)&pix_fmt, sizeof(pix_fmt),
3825 AV_OPT_SEARCH_CHILDREN);
3828 ret = config_filters(filter_spec, fsrc);
3830 ff_err(ret, "FFVideoStream::create_filter");
3831 return ret >= 0 ? 0 : -1;
3834 int FFAudioStream::create_filter(const char *filter_spec)
3838 avfilter_register_all();
3840 const char *sp = filter_spec;
3841 char filter_name[BCSTRLEN], *np = filter_name;
3842 int i = sizeof(filter_name);
3843 while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++;
3845 const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name);
3846 if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) {
3847 ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec);
3851 int ret = 0; char args[BCTEXTLEN];
3852 AVCodecParameters *avpar = st->codecpar;
3853 AVSampleFormat sample_fmt = (AVSampleFormat)avpar->format;
3854 snprintf(args, sizeof(args),
3855 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%jx",
3856 st->time_base.num, st->time_base.den, avpar->sample_rate,
3857 av_get_sample_fmt_name(sample_fmt), avpar->channel_layout);
3860 ret = insert_filter("abuffer", args, "in");
3861 buffersrc_ctx = filt_ctx;
3863 AVFilterContext *fsrc = filt_ctx;
3866 ret = insert_filter("abuffersink", 0, "out");
3867 buffersink_ctx = filt_ctx;
3870 ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
3871 (uint8_t*)&sample_fmt, sizeof(sample_fmt),
3872 AV_OPT_SEARCH_CHILDREN);
3874 ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
3875 (uint8_t*)&avpar->channel_layout,
3876 sizeof(avpar->channel_layout), AV_OPT_SEARCH_CHILDREN);
3878 ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
3879 (uint8_t*)&sample_rate, sizeof(sample_rate),
3880 AV_OPT_SEARCH_CHILDREN);
3882 ret = config_filters(filter_spec, fsrc);
3884 ff_err(ret, "FFAudioStream::create_filter");
3885 return ret >= 0 ? 0 : -1;
3888 int FFStream::insert_filter(const char *name, const char *arg, const char *inst_name)
3890 const AVFilter *filter = avfilter_get_by_name(name);
3891 if( !filter ) return -1;
3892 char filt_inst[BCSTRLEN];
3894 snprintf(filt_inst, sizeof(filt_inst), "%s_%d", name, ++filt_id);
3895 inst_name = filt_inst;
3898 filter_graph = avfilter_graph_alloc();
3899 AVFilterContext *fctx = 0;
3900 int ret = avfilter_graph_create_filter(&fctx,
3901 filter, inst_name, arg, NULL, filter_graph);
3902 if( ret >= 0 && filt_ctx )
3903 ret = avfilter_link(filt_ctx, 0, fctx, 0);
3907 avfilter_free(fctx);
3911 int FFStream::config_filters(const char *filter_spec, AVFilterContext *fsrc)
3914 AVFilterContext *fsink = buffersink_ctx;
3916 /* Endpoints for the filter graph. */
3917 AVFilterInOut *outputs = avfilter_inout_alloc();
3918 AVFilterInOut *inputs = avfilter_inout_alloc();
3919 if( !inputs || !outputs ) ret = -1;
3921 outputs->filter_ctx = fsrc;
3922 outputs->pad_idx = 0;
3924 if( !(outputs->name = av_strdup(fsrc->name)) ) ret = -1;
3927 inputs->filter_ctx = fsink;
3928 inputs->pad_idx = 0;
3930 if( !(inputs->name = av_strdup(fsink->name)) ) ret = -1;
3933 int len = strlen(fsrc->name)+2 + strlen(filter_spec) + 1;
3934 char spec[len]; sprintf(spec, "[%s]%s", fsrc->name, filter_spec);
3935 ret = avfilter_graph_parse_ptr(filter_graph, spec,
3936 &inputs, &outputs, NULL);
3938 avfilter_inout_free(&inputs);
3939 avfilter_inout_free(&outputs);
3942 ret = avfilter_link(fsrc, 0, fsink, 0);
3944 ret = avfilter_graph_config(filter_graph, NULL);
3946 ff_err(ret, "FFStream::create_filter");
3947 avfilter_graph_free(&filter_graph);
3954 AVCodecContext *FFMPEG::activate_decoder(AVStream *st)
3956 AVDictionary *copts = 0;
3957 av_dict_copy(&copts, opts, 0);
3958 AVCodecID codec_id = st->codecpar->codec_id;
3959 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
3960 const AVCodec *decoder = 0;
3962 AVCodec *decoder = 0;
3964 switch( st->codecpar->codec_type ) {
3965 case AVMEDIA_TYPE_VIDEO:
3966 if( opt_video_decoder )
3967 decoder = avcodec_find_decoder_by_name(opt_video_decoder);
3969 video_codec_remaps.update(codec_id, decoder);
3971 case AVMEDIA_TYPE_AUDIO:
3972 if( opt_audio_decoder )
3973 decoder = avcodec_find_decoder_by_name(opt_audio_decoder);
3975 audio_codec_remaps.update(codec_id, decoder);
3980 if( !decoder && !(decoder = avcodec_find_decoder(codec_id)) ) {
3981 eprintf(_("cant find decoder codec %d\n"), (int)codec_id);
3984 AVCodecContext *avctx = avcodec_alloc_context3(decoder);
3986 eprintf(_("cant allocate codec context\n"));
3989 avcodec_parameters_to_context(avctx, st->codecpar);
3990 if( !av_dict_get(copts, "threads", NULL, 0) )
3991 avctx->thread_count = ff_cpus();
3992 int ret = avcodec_open2(avctx, decoder, &copts);
3993 av_dict_free(&copts);
3995 avcodec_free_context(&avctx);
4001 int FFMPEG::scan(IndexState *index_state, int64_t *scan_position, int *canceled)
4004 av_init_packet(&pkt);
4005 AVFrame *frame = av_frame_alloc();
4007 fprintf(stderr,"FFMPEG::scan: ");
4008 fprintf(stderr,_("av_frame_alloc failed\n"));
4009 fprintf(stderr,"FFMPEG::scan:file=%s\n", file_base->asset->path);
4013 index_state->add_video_markers(ffvideo.size());
4014 index_state->add_audio_markers(ffaudio.size());
4016 for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) {
4017 AVStream *st = fmt_ctx->streams[i];
4018 AVCodecContext *avctx = activate_decoder(st);
4020 AVCodecParameters *avpar = st->codecpar;
4021 switch( avpar->codec_type ) {
4022 case AVMEDIA_TYPE_VIDEO: {
4023 int vidx = ffvideo.size();
4024 while( --vidx>=0 && ffvideo[vidx]->fidx != i );
4025 if( vidx < 0 ) break;
4026 ffvideo[vidx]->avctx = avctx;
4028 case AVMEDIA_TYPE_AUDIO: {
4029 int aidx = ffaudio.size();
4030 while( --aidx>=0 && ffaudio[aidx]->fidx != i );
4031 if( aidx < 0 ) break;
4032 ffaudio[aidx]->avctx = avctx;
4037 fprintf(stderr,"FFMPEG::scan: ");
4038 fprintf(stderr,_("codec open failed\n"));
4039 fprintf(stderr,"FFMPEG::scan:file=%s\n", file_base->asset->path);
4040 avcodec_free_context(&avctx);
4044 for( int i=0; i<(int)fmt_ctx->nb_streams; ++i ) {
4045 AVStream *st = fmt_ctx->streams[i];
4046 AVCodecParameters *avpar = st->codecpar;
4047 if( avpar->codec_type != AVMEDIA_TYPE_AUDIO ) continue;
4048 int64_t tstmp = st->start_time;
4049 if( tstmp == AV_NOPTS_VALUE ) continue;
4050 int aidx = ffaudio.size();
4051 while( --aidx>=0 && ffaudio[aidx]->fidx != i );
4052 if( aidx < 0 ) continue;
4053 FFAudioStream *aud = ffaudio[aidx];
4054 tstmp -= aud->nudge;
4055 double secs = to_secs(tstmp, st->time_base);
4056 aud->curr_pos = secs * aud->sample_rate + 0.5;
4060 for( int64_t count=0; !*canceled; ++count ) {
4061 av_packet_unref(&pkt);
4062 pkt.data = 0; pkt.size = 0;
4064 int ret = av_read_frame(fmt_ctx, &pkt);
4066 if( ret == AVERROR_EOF ) break;
4067 if( ++errs > 100 ) {
4068 ff_err(ret,_("over 100 read_frame errs\n"));
4073 if( !pkt.data ) continue;
4074 int i = pkt.stream_index;
4075 if( i < 0 || i >= (int)fmt_ctx->nb_streams ) continue;
4076 AVStream *st = fmt_ctx->streams[i];
4077 if( pkt.pos > *scan_position ) *scan_position = pkt.pos;
4079 AVCodecParameters *avpar = st->codecpar;
4080 switch( avpar->codec_type ) {
4081 case AVMEDIA_TYPE_VIDEO: {
4082 int vidx = ffvideo.size();
4083 while( --vidx>=0 && ffvideo[vidx]->fidx != i );
4084 if( vidx < 0 ) break;
4085 FFVideoStream *vid = ffvideo[vidx];
4086 if( !vid->avctx ) break;
4087 int64_t tstmp = pkt.pts;
4088 if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.dts;
4089 if( tstmp != AV_NOPTS_VALUE && (pkt.flags & AV_PKT_FLAG_KEY) && pkt.pos > 0 ) {
4090 if( vid->nudge != AV_NOPTS_VALUE ) tstmp -= vid->nudge;
4091 double secs = to_secs(tstmp, st->time_base);
4092 int64_t frm = secs * vid->frame_rate + 0.5;
4093 if( frm < 0 ) frm = 0;
4094 index_state->put_video_mark(vidx, frm, pkt.pos);
4097 ret = avcodec_send_packet(vid->avctx, pkt);
4098 if( ret < 0 ) break;
4099 while( (ret=vid->decode_frame(frame)) > 0 ) {}
4102 case AVMEDIA_TYPE_AUDIO: {
4103 int aidx = ffaudio.size();
4104 while( --aidx>=0 && ffaudio[aidx]->fidx != i );
4105 if( aidx < 0 ) break;
4106 FFAudioStream *aud = ffaudio[aidx];
4107 if( !aud->avctx ) break;
4108 int64_t tstmp = pkt.pts;
4109 if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.dts;
4110 if( tstmp != AV_NOPTS_VALUE && (pkt.flags & AV_PKT_FLAG_KEY) && pkt.pos > 0 ) {
4111 if( aud->nudge != AV_NOPTS_VALUE ) tstmp -= aud->nudge;
4112 double secs = to_secs(tstmp, st->time_base);
4113 int64_t sample = secs * aud->sample_rate + 0.5;
4115 index_state->put_audio_mark(aidx, sample, pkt.pos);
4117 ret = avcodec_send_packet(aud->avctx, &pkt);
4118 if( ret < 0 ) break;
4119 int ch = aud->channel0, nch = aud->channels;
4120 int64_t pos = index_state->pos(ch);
4121 if( pos != aud->curr_pos ) {
4122 if( abs(pos-aud->curr_pos) > 1 )
4123 printf("audio%d pad %jd %jd (%jd)\n", aud->idx, pos, aud->curr_pos, pos-aud->curr_pos);
4124 index_state->pad_data(ch, nch, aud->curr_pos);
4126 while( (ret=aud->decode_frame(frame)) > 0 ) {
4127 //if( frame->channels != nch ) break;
4128 aud->init_swr(frame->channels, frame->format, frame->sample_rate);
4130 int len = aud->get_samples(samples,
4131 &frame->extended_data[0], frame->nb_samples);
4132 pos = aud->curr_pos;
4133 if( (aud->curr_pos += len) >= 0 ) {
4135 samples += -pos * nch;
4136 len = aud->curr_pos;
4138 for( int i=0; i<nch; ++i )
4139 index_state->put_data(ch+i,nch,samples+i,len);
4146 av_frame_free(&frame);
4150 void FFStream::load_markers(IndexMarks &marks, double rate)
4153 int64_t sz = marks.size();
4154 int max_entries = fmt_ctx->max_index_size / sizeof(AVIndexEntry) - 1;
4155 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
4156 int nb_ent = avformat_index_get_entries_count(st);
4158 #if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(58,134,100)
4159 int nb_ent = st->nb_index_entries;
4161 // some formats already have an index
4163 #if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(58,134,100)
4164 AVIndexEntry *ep = &st->index_entries[nb_ent-1];
4166 #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,16,100)
4167 const AVIndexEntry *ep = avformat_index_get_entry(st, nb_ent-1);
4169 int64_t tstmp = ep->timestamp;
4170 if( nudge != AV_NOPTS_VALUE ) tstmp -= nudge;
4171 double secs = ffmpeg->to_secs(tstmp, st->time_base);
4172 int64_t no = secs * rate;
4173 while( in < sz && marks[in].no <= no ) ++in;
4175 int64_t len = sz - in;
4176 int64_t count = max_entries - nb_ent;
4177 if( count > len ) count = len;
4178 for( int i=0; i<count; ++i ) {
4179 int k = in + i * len / count;
4180 int64_t no = marks[k].no, pos = marks[k].pos;
4181 double secs = (double)no / rate;
4182 int64_t tstmp = secs * st->time_base.den / st->time_base.num;
4183 if( nudge != AV_NOPTS_VALUE ) tstmp += nudge;
4184 av_add_index_entry(st, pos, tstmp, 0, 0, AVINDEX_KEYFRAME);
4190 * 1) if the format context has a timecode
4191 * return fmt_ctx->timecode - 0
4192 * 2) if the layer/channel has a timecode
4193 * return st->timecode - (start_time-nudge)
4194 * 3) find the 1st program with stream, find 1st program video stream,
4195 * if video stream has a timecode, return st->timecode - (start_time-nudge)
4196 * 4) find timecode in any stream, return st->timecode
4197 * 5) read 100 packets, save ofs=pkt.pts*st->time_base - st->nudge:
4198 * decode frame for video stream of 1st program
4199 * if frame->timecode has a timecode, return frame->timecode - ofs
4200 * if side_data has gop timecode, return gop->timecode - ofs
4201 * if side_data has smpte timecode, return smpte->timecode - ofs
4202 * 6) if the filename/url scans *date_time.ext, return date_time
4203 * 7) if stat works on the filename/url, return mtime
4204 * 8) return -1 failure
4206 double FFMPEG::get_initial_timecode(int data_type, int channel, double frame_rate)
4208 AVRational rate = check_frame_rate(0, frame_rate);
4209 if( !rate.num ) return -1;
4210 // format context timecode
4211 AVDictionaryEntry *tc = av_dict_get(fmt_ctx->metadata, "timecode", 0, 0);
4212 if( tc ) return ff_get_timecode(tc->value, rate, 0);
4214 if( open_decoder() ) return -1;
4217 int codec_type = -1, fidx = -1;
4218 switch( data_type ) {
4220 codec_type = AVMEDIA_TYPE_AUDIO;
4221 int aidx = astrm_index[channel].st_idx;
4222 FFAudioStream *aud = ffaudio[aidx];
4226 AVDictionaryEntry *tref = av_dict_get(fmt_ctx->metadata, "time_reference", 0, 0);
4227 if( tref && aud && aud->sample_rate )
4228 return strtod(tref->value, 0) / aud->sample_rate;
4231 codec_type = AVMEDIA_TYPE_VIDEO;
4232 int vidx = vstrm_index[channel].st_idx;
4233 FFVideoStream *vid = ffvideo[vidx];
4239 if( codec_type < 0 ) return -1;
4241 tc = av_dict_get(st->metadata, "timecode", 0, 0);
4244 // find first program which references this stream
4246 for( int i=0, m=fmt_ctx->nb_programs; pidx<0 && i<m; ++i ) {
4247 AVProgram *pgrm = fmt_ctx->programs[i];
4248 for( int j=0, n=pgrm->nb_stream_indexes; j<n; ++j ) {
4249 int st_idx = pgrm->stream_index[j];
4250 if( st_idx == fidx ) { pidx = i; break; }
4255 AVProgram *pgrm = fmt_ctx->programs[pidx];
4256 for( int j=0, n=pgrm->nb_stream_indexes; j<n; ++j ) {
4257 int st_idx = pgrm->stream_index[j];
4258 AVStream *tst = fmt_ctx->streams[st_idx];
4259 if( !tst ) continue;
4260 if( tst->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
4261 st = tst; fidx = st_idx;
4267 for( int i=0, n=fmt_ctx->nb_streams; i<n; ++i ) {
4268 AVStream *tst = fmt_ctx->streams[i];
4269 if( !tst ) continue;
4270 if( tst->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
4277 tc = av_dict_get(st->metadata, "timecode", 0, 0);
4281 // any timecode, includes -data- streams
4282 for( int i=0, n=fmt_ctx->nb_streams; i<n; ++i ) {
4283 AVStream *tst = fmt_ctx->streams[i];
4284 if( !tst ) continue;
4285 if( (tc = av_dict_get(tst->metadata, "timecode", 0, 0)) ) {
4292 if( st && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
4293 if( st->r_frame_rate.num && st->r_frame_rate.den )
4294 rate = st->r_frame_rate;
4295 nudge = st->start_time;
4296 for( int i=0; i<ffvideo.size(); ++i ) {
4297 if( ffvideo[i]->st == st ) {
4298 nudge = ffvideo[i]->nudge;
4304 if( tc ) { // return timecode
4305 double secs = st->start_time == AV_NOPTS_VALUE ? 0 :
4306 to_secs(st->start_time - nudge, st->time_base);
4307 return ff_get_timecode(tc->value, rate, secs);
4310 if( !st || fidx < 0 ) return -1;
4313 AVCodecContext *av_ctx = activate_decoder(st);
4315 fprintf(stderr,"activate_decoder failed\n");
4318 avCodecContext avctx(av_ctx); // auto deletes
4319 if( avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
4320 avctx->framerate.num && avctx->framerate.den )
4321 rate = avctx->framerate;
4323 avPacket pkt; // auto deletes
4324 avFrame frame; // auto deletes
4326 fprintf(stderr,"av_frame_alloc failed\n");
4330 int64_t max_packets = 100;
4331 char tcbuf[AV_TIMECODE_STR_SIZE];
4333 for( int64_t count=0; count<max_packets; ++count ) {
4334 av_packet_unref(pkt);
4335 pkt->data = 0; pkt->size = 0;
4337 int ret = av_read_frame(fmt_ctx, pkt);
4339 if( ret == AVERROR_EOF ) break;
4340 if( ++errs > 100 ) {
4341 fprintf(stderr,"over 100 read_frame errs\n");
4346 if( !pkt->data ) continue;
4347 int i = pkt->stream_index;
4348 if( i != fidx ) continue;
4349 int64_t tstmp = pkt->pts;
4350 if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt->dts;
4351 double secs = to_secs(tstmp - nudge, st->time_base);
4352 ret = avcodec_send_packet(avctx, pkt);
4353 if( ret < 0 ) return -1;
4355 while( (ret = avcodec_receive_frame(avctx, frame)) >= 0 ) {
4356 if( (tc = av_dict_get(frame->metadata, "timecode", 0, 0)) )
4357 return ff_get_timecode(tc->value, rate, secs);
4358 int k = frame->nb_side_data;
4359 AVFrameSideData *side_data = 0;
4361 side_data = frame->side_data[k];
4362 switch( side_data->type ) {
4363 case AV_FRAME_DATA_GOP_TIMECODE: {
4364 int64_t data = *(int64_t *)side_data->data;
4365 int sz = sizeof(data);
4366 if( side_data->size >= sz ) {
4367 av_timecode_make_mpeg_tc_string(tcbuf, data);
4368 return ff_get_timecode(tcbuf, rate, secs);
4371 case AV_FRAME_DATA_S12M_TIMECODE: {
4372 uint32_t *data = (uint32_t *)side_data->data;
4373 int n = data[0], sz = (n+1)*sizeof(*data);
4374 if( side_data->size >= sz ) {
4375 av_timecode_make_smpte_tc_string(tcbuf, data[n], 0);
4376 return ff_get_timecode(tcbuf, rate, secs);
4385 char *path = fmt_ctx->url;
4386 char *bp = strrchr(path, '/');
4387 if( !bp ) bp = path; else ++bp;
4388 char *cp = strrchr(bp, '.');
4389 if( cp && (cp-=(8+1+6)) >= bp ) {
4391 int year,mon,day, hour,min,sec, frm=0;
4392 if( sscanf(cp,"%4d%2d%2d%[_-]%2d%2d%2d",
4393 &year,&mon,&day, sep, &hour,&min,&sec) == 7 ) {
4395 // year>=1970,mon=1..12,day=1..31, hour=0..23,min=0..59,sec=0..60
4396 if( (ch=='_' || ch=='-' ) &&
4397 year >= 1970 && mon>=1 && mon<=12 && day>=1 && day<=31 &&
4398 hour>=0 && hour<24 && min>=0 && min<60 && sec>=0 && sec<=60 ) {
4399 sprintf(tcbuf,"%d:%02d:%02d:%02d", hour,min,sec, frm);
4400 return ff_get_timecode(tcbuf, rate, 0);
4405 if( stat(path, &tst) >= 0 ) {
4406 time_t t = (time_t)tst.st_mtim.tv_sec;
4408 localtime_r(&t, &tm);
4409 int64_t us = tst.st_mtim.tv_nsec / 1000;
4410 int frm = us/1000000. * frame_rate;
4411 sprintf(tcbuf,"%d:%02d:%02d:%02d", tm.tm_hour, tm.tm_min, tm.tm_sec, frm);
4412 return ff_get_timecode(tcbuf, rate, 0);
4417 double FFMPEG::ff_get_timecode(char *str, AVRational rate, double pos)
4420 if( av_timecode_init_from_string(&tc, rate, str, fmt_ctx) )
4422 double secs = (double)tc.start / tc.fps - pos;
4423 if( secs < 0 ) secs = 0;
4427 double FFMPEG::get_timecode(const char *path, int data_type, int channel, double rate)
4430 if( ffmpeg.init_decoder(path) ) return -1;
4431 return ffmpeg.get_initial_timecode(data_type, channel, rate);