break; }
case AVMEDIA_TYPE_AUDIO: {
s->coding_type = bd_coding_type(codec_id);
- s->format = bd_audio_format(st->codecpar->channels);
+ s->format = bd_audio_format(st->codecpar->ch_layout.nb_channels);
s->rate = bd_audio_rate(st->codecpar->sample_rate);
strcpy((char*)s->lang, "eng");
break; }
swr_ichs = ichs; swr_ifmt = ifmt; swr_irate = irate;
if( ichs == channels && ifmt == AV_SAMPLE_FMT_FLT && irate == sample_rate )
return;
- uint64_t ilayout = av_get_default_channel_layout(ichs);
- if( !ilayout ) ilayout = ((uint64_t)1<<ichs) - 1;
- uint64_t olayout = av_get_default_channel_layout(channels);
- if( !olayout ) olayout = ((uint64_t)1<<channels) - 1;
- resample_context = swr_alloc_set_opts(NULL,
- olayout, AV_SAMPLE_FMT_FLT, sample_rate,
- ilayout, (AVSampleFormat)ifmt, irate,
+ //uint64_t ilayout = av_get_default_channel_layout(ichs);
+ AVChannelLayout ilayout, olayout;
+ av_channel_layout_default(&ilayout, ichs);
+ //if( !ilayout ) ilayout = ((uint64_t)1<<ichs) - 1;
+ //uint64_t olayout = av_get_default_channel_layout(channels);
+ av_channel_layout_default(&olayout, channels);
+ //if( !olayout ) olayout = ((uint64_t)1<<channels) - 1;
+
+ swr_alloc_set_opts2(&resample_context,
+ &olayout, AV_SAMPLE_FMT_FLT, sample_rate,
+ &ilayout, (AVSampleFormat)ifmt, irate,
0, NULL);
if( resample_context )
swr_init(resample_context);
int64_t FFAudioStream::load_buffer(double ** const sp, int len)
{
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61,3,100)
+ reserve(len+1, st->codecpar->ch_layout.nb_channels);
+#else
reserve(len+1, st->codecpar->channels);
+#endif
for( int ch=0; ch<nch; ++ch )
write(sp[ch], len, ch);
return put_inp(len);
{
frame->nb_samples = frame_sz;
frame->format = avctx->sample_fmt;
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61,3,100)
+ frame->ch_layout.u.mask = avctx->ch_layout.u.mask;
+ av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
+#else
frame->channel_layout = avctx->channel_layout;
+#endif
frame->sample_rate = avctx->sample_rate;
int ret = av_frame_get_buffer(frame, 0);
if (ret < 0)
while( ret>=0 && !flushed && curr_pos<end_pos && --i>=0 ) {
ret = read_frame(frame);
if( ret > 0 && frame->nb_samples > 0 ) {
- init_swr(frame->channels, frame->format, frame->sample_rate);
+ init_swr(frame->ch_layout.nb_channels, frame->format, frame->sample_rate);
load_history(&frame->extended_data[0], frame->nb_samples);
curr_pos += frame->nb_samples;
}
ret = vid->create_filter(opt_video_filter);
break; }
case AVMEDIA_TYPE_AUDIO: {
- if( avpar->channels < 1 ) continue;
+ if( avpar->ch_layout.nb_channels < 1 ) continue;
if( avpar->sample_rate < 1 ) continue;
has_audio = 1;
int aidx = ffaudio.size();
FFAudioStream *aud = new FFAudioStream(this, st, aidx, i);
ffaudio.append(aud);
aud->channel0 = astrm_index.size();
- aud->channels = avpar->channels;
+ aud->channels = avpar->ch_layout.nb_channels;
for( int ch=0; ch<aud->channels; ++ch )
astrm_index.append(ffidx(aidx, ch));
aud->sample_rate = avpar->sample_rate;
FFAudioStream *aud = new FFAudioStream(this, st, aidx, fidx);
aud->avctx = ctx; ffaudio.append(aud); fst = aud;
aud->sample_rate = asset->sample_rate;
- ctx->channels = aud->channels = asset->channels;
+ ctx->ch_layout.nb_channels = aud->channels = asset->channels;
for( int ch=0; ch<aud->channels; ++ch )
astrm_index.append(ffidx(aidx, ch));
- ctx->channel_layout = av_get_default_channel_layout(ctx->channels);
+ AVChannelLayout ch_layout;
+ av_channel_layout_default(&ch_layout, ctx->ch_layout.nb_channels);
+ ctx->ch_layout.u.mask = ch_layout.u.mask;
+ av_channel_layout_copy(&ctx->ch_layout, &ch_layout);
ctx->sample_rate = check_sample_rate(codec, asset->sample_rate);
if( !ctx->sample_rate ) {
eprintf(_("check_sample_rate failed %s\n"), filename);
if( sample_fmt == AV_SAMPLE_FMT_NONE )
sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_S16;
ctx->sample_fmt = sample_fmt;
- uint64_t layout = av_get_default_channel_layout(ctx->channels);
- aud->resample_context = swr_alloc_set_opts(NULL,
- layout, ctx->sample_fmt, aud->sample_rate,
- layout, AV_SAMPLE_FMT_FLT, ctx->sample_rate,
+ //uint64_t layout = av_get_default_channel_layout(ctx->ch_layout.nb_channels);
+ AVChannelLayout layout;
+ av_channel_layout_default(&layout, ctx->ch_layout.nb_channels);
+ swr_alloc_set_opts2(&aud->resample_context,
+ &layout, ctx->sample_fmt, aud->sample_rate,
+ &layout, AV_SAMPLE_FMT_FLT, ctx->sample_rate,
0, NULL);
swr_init(aud->resample_context);
aud->writing = -1;
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%jx",
st->time_base.num, st->time_base.den, avpar->sample_rate,
- av_get_sample_fmt_name(sample_fmt), avpar->channel_layout);
+ av_get_sample_fmt_name(sample_fmt), avpar->ch_layout.u.mask);
if( ret >= 0 ) {
filt_ctx = 0;
ret = insert_filter("abuffer", args, "in");
AV_OPT_SEARCH_CHILDREN);
if( ret >= 0 )
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
- (uint8_t*)&avpar->channel_layout,
- sizeof(avpar->channel_layout), AV_OPT_SEARCH_CHILDREN);
+ (uint8_t*)&avpar->ch_layout.u.mask,
+ sizeof(avpar->ch_layout.u.mask), AV_OPT_SEARCH_CHILDREN);
if( ret >= 0 )
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
(uint8_t*)&sample_rate, sizeof(sample_rate),
}
while( (ret=aud->decode_frame(frame)) > 0 ) {
//if( frame->channels != nch ) break;
- aud->init_swr(frame->channels, frame->format, frame->sample_rate);
+ aud->init_swr(frame->ch_layout.nb_channels, frame->format, frame->sample_rate);
float *samples;
int len = aud->get_samples(samples,
&frame->extended_data[0], frame->nb_samples);
int channels = asset->channels;
int sample_rate = asset->sample_rate;
int64_t layout = get_channel_layout(channels);
+ AVChannelLayout ch_layout;
+ av_channel_layout_default(&ch_layout, channels);
+ if(!ch_layout.nb_channels) {
+ printf ("av_ch_layut_default failed! \n"); }
int bitrate = asset->ac3_bitrate * 1000;
av_init_packet(&avpkt);
codec_context = avcodec_alloc_context3(codec);
codec_context->bit_rate = bitrate;
codec_context->sample_rate = sample_rate;
- codec_context->channels = channels;
- codec_context->channel_layout = layout;
+ codec_context->ch_layout.nb_channels = channels;
+ codec_context->ch_layout.u.mask = layout;
+ av_channel_layout_copy(&codec_context->ch_layout, &ch_layout);
codec_context->sample_fmt = codec->sample_fmts[0];
- resample_context = swr_alloc_set_opts(NULL,
- layout, codec_context->sample_fmt, sample_rate,
- layout, AV_SAMPLE_FMT_S16, sample_rate,
+ SwrContext *tmp_resample_context = NULL;
+ int ret = swr_alloc_set_opts2(&tmp_resample_context,
+ &ch_layout, codec_context->sample_fmt, sample_rate,
+ &ch_layout, AV_SAMPLE_FMT_S16, sample_rate,
0, NULL);
- swr_init(resample_context);
+ if(ret <0) printf("swr_alloc eror: %i \n", ret );
+ if(tmp_resample_context){
+ resample_context = tmp_resample_context;
+ if(swr_init(resample_context))
+ {
+ eprintf(_("FileAC3::open_file failed to init swr.\n"));
+ result = 1;
+ }
+ }
if(avcodec_open2(codec_context, codec, 0))
{
eprintf(_("FileAC3::open_file failed to open codec.\n"));
result = 1;
}
+ av_channel_layout_uninit(&ch_layout);
}
}
AVFrame *frame = av_frame_alloc();
frame->nb_samples = frame_size;
frame->format = avctx->sample_fmt;
- frame->channel_layout = avctx->channel_layout;
+ av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
frame->sample_rate = avctx->sample_rate;
+
ret = av_frame_get_buffer(frame, 0);
if( ret >= 0 ) {
const uint8_t *samples = (uint8_t *)temp_raw +
ret = swr_convert(resample_context,
(uint8_t **)frame->extended_data, frame_size,
&samples, frame_size);
- }
+ } else { printf("fileAC3: av_frame_get_buffer failed!\n"); }
if( ret >= 0 ) {
frame->pts = avctx->sample_rate && avctx->time_base.num ?
file->get_audio_position() : AV_NOPTS_VALUE ;
case AV_OPT_TYPE_SAMPLE_FMT: cp = N_("<sample_fmt>"); break;
case AV_OPT_TYPE_DURATION: cp = N_("<duration>"); break;
case AV_OPT_TYPE_COLOR: cp = N_("<color>"); break;
- case AV_OPT_TYPE_CHANNEL_LAYOUT: cp = N_("<channel_layout>"); break;
+ case AV_OPT_TYPE_CHLAYOUT: cp = N_("<channel_layout>"); break;
case AV_OPT_TYPE_BOOL: cp = N_("<bool>"); break;
default: cp = N_("<undef>"); break;
}
case AV_OPT_TYPE_SAMPLE_FMT: cp = "<sample_fmt>"; break;
case AV_OPT_TYPE_DURATION: cp = "<duration>"; break;
case AV_OPT_TYPE_COLOR: cp = "<color>"; break;
- case AV_OPT_TYPE_CHANNEL_LAYOUT: cp = "<channel_layout>"; break;
+ case AV_OPT_TYPE_CHLAYOUT: cp = "<channel_layout>"; break;
default: cp = "<undef>"; break;
}
return sprintf(rp, "%s", cp);
bool PluginFClient::is_audio(const AVFilter *fp)
{
if( !fp->outputs ) return 0;
-#if LIBAVFILTER_VERSION_MINOR > 2 && LIBAVFILTER_VERSION_MAJOR > 7
+#if LIBAVFILTER_VERSION_MAJOR > 8
if( avfilter_filter_pad_count(fp, 1) > 1 ) return 0;
#else
if( avfilter_pad_count(fp->outputs) > 1 ) return 0;
if( !avfilter_pad_get_name(fp->outputs, 0) ) return 0;
if( avfilter_pad_get_type(fp->outputs, 0) != AVMEDIA_TYPE_AUDIO ) return 0;
if( !fp->inputs ) return 1;
-#if LIBAVFILTER_VERSION_MINOR > 2 && LIBAVFILTER_VERSION_MAJOR > 7
+#if LIBAVFILTER_VERSION_MAJOR > 8
if( avfilter_filter_pad_count(fp, 0) > 1 ) return 0;
#else
if( avfilter_pad_count(fp->inputs) > 1 ) return 0;
bool PluginFClient::is_video(const AVFilter *fp)
{
if( !fp->outputs ) return 0;
-#if LIBAVFILTER_VERSION_MINOR > 2 && LIBAVFILTER_VERSION_MAJOR > 7
+#if LIBAVFILTER_VERSION_MAJOR > 8
if( avfilter_filter_pad_count(fp, 1) > 1 ) return 0;
#else
if( avfilter_pad_count(fp->outputs) > 1 ) return 0;
if( !avfilter_pad_get_name(fp->outputs, 0) ) return 0;
if( avfilter_pad_get_type(fp->outputs, 0) != AVMEDIA_TYPE_VIDEO ) return 0;
if( !fp->inputs ) return 1;
-#if LIBAVFILTER_VERSION_MINOR > 2 && LIBAVFILTER_VERSION_MAJOR > 7
+#if LIBAVFILTER_VERSION_MAJOR > 8
if( avfilter_filter_pad_count(fp, 0) > 1 ) return 0;
#else
if( avfilter_pad_count(fp->inputs) > 1 ) return 0;
}
AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
int channels = PluginClient::total_in_buffers;
- uint64_t layout = (((uint64_t)1)<<channels) - 1;
+ //uint64_t layout = (((uint64_t)1)<<channels) - 1;
+ AVChannelLayout layout;
+
+ av_channel_layout_default(&layout, channels);
+
+ char chLayoutDescription[128];
+ av_channel_layout_describe(&layout, chLayoutDescription, sizeof(chLayoutDescription));
+
AVFilterGraph *graph = !ffilt ? 0 : ffilt->graph;
int ret = !graph ? -1 : 0;
if( ret >= 0 && ffilt->filter->inputs ) {
char args[BCTEXTLEN];
snprintf(args, sizeof(args),
- "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%jx",
- 1, sample_rate, sample_rate, av_get_sample_fmt_name(sample_fmt), layout);
+ "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s:channels=%i",
+ 1, sample_rate, sample_rate, av_get_sample_fmt_name(sample_fmt), chLayoutDescription, channels);
ret = avfilter_graph_create_filter(&fsrc, avfilter_get_by_name("abuffer"),
"in", args, NULL, graph);
+ if(ret <0) printf("abuffer failed!\n");
}
if( ret >= 0 )
ret = avfilter_graph_create_filter(&fsink, avfilter_get_by_name("abuffersink"),
ret = av_opt_set_bin(fsink, "sample_fmts",
(uint8_t*)&sample_fmt, sizeof(sample_fmt), AV_OPT_SEARCH_CHILDREN);
if( ret >= 0 )
- ret = av_opt_set_bin(fsink, "channel_layouts",
- (uint8_t*)&layout, sizeof(layout), AV_OPT_SEARCH_CHILDREN);
+ ret = av_opt_set(fsink, "ch_layouts",
+ chLayoutDescription, AV_OPT_SEARCH_CHILDREN);
if( ret >= 0 )
ret = av_opt_set_bin(fsink, "sample_rates",
(uint8_t*)&sample_rate, sizeof(sample_rate), AV_OPT_SEARCH_CHILDREN);
if( ret >= 0 ) {
in_channels = get_inchannels();
out_channels = get_outchannels();
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59,24,100)
+ AVChannelLayout in_ch_layout;
+ AVFilterContext *fctx = ffilt->fctx;
+ AVFilterLink **links = !fctx->nb_outputs ? 0 : fctx->outputs;
+ av_channel_layout_copy(&in_ch_layout, &links[0]->ch_layout);
+#endif
frame->nb_samples = size;
frame->format = AV_SAMPLE_FMT_FLTP;
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61,3,100)
+ av_channel_layout_copy(&frame->ch_layout, &in_ch_layout);
+#else
frame->channel_layout = (1<<in_channels)-1;
+#endif
frame->sample_rate = sample_rate;
frame->pts = filter_position;
}
[ . ])
PKG_3RD([ffmpeg],[yes],
- [ffmpeg-6.1],
+ [ffmpeg-7.0],
[ libavutil/libavutil.a \
libavcodec/libavcodec.a \
libpostproc/libpostproc.a \
#bilateral_cuda ###Function not implemented
#bwdif_cuda ###Function not implemented
#colorspace_cuda ###Function not implemented
+; do not work in 7.0
+#aap ###Input/output error
+#fsync ###No such file or directory
+#tiltandshift ###Resource temporarily available
https://cinelerra-gg.org/download/CinelerraGG_Manual.pdf
http://g-raffa.eu/Cinelerra/HOWTO/basics.html
.
+2024 May changes of note:
+ FFmpeg has been updated from 6.1 to 7.0.
+ ChromaKey and ChromaKeyHSV have improved menus/options.
+ Mjpegtools upgraded from 2.1.0 to 2.2.1.
2024 January changes of note:
X265 library has been updated to snapshot of 17-12-2023.
Libsndfile is now at version 1.2.2.
ChromaKeyColor *color;
- ChromaKeyThreshold *threshold;
ChromaKeyFText *threshold_text;
ChromaKeyFSlider *threshold_slider;
ChromaKeyClr *threshold_Clr;
ChromaKeyUseValue *use_value;
ChromaKeyUseColorPicker *use_colorpicker;
- ChromaKeySlope *slope;
ChromaKeyFText *slope_text;
ChromaKeyFSlider *slope_slider;
ChromaKeyClr *slope_Clr;
http://downloads.xiph.org/releases/vorbis/libvorbis-1.3.7.tar.xz
http://downloads.xiph.org/releases/ogg/libogg-1.3.5.tar.gz
http://downloads.xiph.org/releases/theora/libtheora-1.1.1.tar.bz2
+# Added 0.7.4 in at least 2016; no new updates; not in HV or CV
+https://repology.org/project/a52dec/information
https://sourceforge.net/projects/lame/files/latest/download?source=directory = 3.100
https://download.osgeo.org/libtiff/tiff-4.6.0.tar.xz
https://sourceforge.net/projects/libuuid/files/latest/download?source=directory - 1.0.3
https://code.videolan.org/videolan/x264/-/tree/stable/x264-stable.tar.gz (Jan. 2023 version r3106)
https://bitbucket.org/multicoreware/x265_git/downloads/x265_3.5.tar.gz (snapshot 17122023)
-https://ffmpeg.org/releases/ffmpeg-6.1.tar.bz2
+https://ffmpeg.org/releases/ffmpeg-7.0.tar.xz
https://github.com/webmproject/libvpx/archive/v1.13.1.tar.gz
https://code.videolan.org/videolan/dav1d/-/archive/0.5.1/dav1d-0.5.1.tar.gz
https://github.com/swh/ladspa/releases/tag/v0.4.17, plugin.org.uk
--- /dev/null
+--- a/fftools/cmdutils.c
++++ b/fftools/cmdutils.c
+@@ -60,7 +60,7 @@
+ AVDictionary *swr_opts;
+ AVDictionary *format_opts, *codec_opts;
+
+-int hide_banner = 0;
++int hide_banner = 1;
+
+ void uninit_opts(void)
+ {
--- /dev/null
+--- a/libavutil/hwcontext_cuda.c
++++ b/libavutil/hwcontext_cuda.c
+@@ -363,11 +363,13 @@
+ hwctx->internal->cuda_device));
+ if (ret < 0)
+ return ret;
++#if 0
+ } else if (flags & AV_CUDA_USE_CURRENT_CONTEXT) {
+ ret = CHECK_CU(cu->cuCtxGetCurrent(&hwctx->cuda_ctx));
+ if (ret < 0)
+ return ret;
+ av_log(device_ctx, AV_LOG_INFO, "Using current CUDA context.\n");
++#endif
+ } else {
+ ret = CHECK_CU(cu->cuCtxCreate(&hwctx->cuda_ctx, desired_flags,
+ hwctx->internal->cuda_device));
--- /dev/null
+--- a/libavformat/mpegtsenc.c
++++ b/libavformat/mpegtsenc.c
+@@ -89,9 +89,11 @@
+ int64_t pat_period; /* PAT/PMT period in PCR time base */
+ int64_t nit_period; /* NIT period in PCR time base */
+ int nb_services;
+- int64_t first_pcr;
+ int first_dts_checked;
+- int64_t next_pcr;
++ int64_t pcr_pos, pcr;
++ int64_t first_pcr, next_pcr;
++ int64_t delay;
++ int pcr_stream_pid;
+ int mux_rate; ///< set to 1 when VBR
+ int pes_payload_size;
+ int64_t total_size;
+@@ -258,7 +260,7 @@
+ int data_st_warning;
+
+ int64_t pcr_period; /* PCR period in PCR time base */
+- int64_t last_pcr;
++ int64_t pcr_timer;
+
+ /* For Opus */
+ int opus_queued_samples;
+@@ -959,18 +961,18 @@
+ return 0;
+ }
+
+-static int64_t get_pcr(const MpegTSWrite *ts)
++static int64_t get_pcr(const MpegTSWrite *ts, AVIOContext *pb)
+ {
+- return av_rescale(ts->total_size + 11, 8 * PCR_TIME_BASE, ts->mux_rate) +
+- ts->first_pcr;
++ int64_t pos = avio_tell(pb) + 11;
++ return ts->pcr + (ts->mux_rate == 1 ? (pos - ts->pcr_pos) * 8 :
++ av_rescale(pos - ts->pcr_pos, 8 * PCR_TIME_BASE, ts->mux_rate));
+ }
+
+ static void write_packet(AVFormatContext *s, const uint8_t *packet)
+ {
+ MpegTSWrite *ts = s->priv_data;
+ if (ts->m2ts_mode) {
+- int64_t pcr = get_pcr(s->priv_data);
+- uint32_t tp_extra_header = pcr % 0x3fffffff;
++ uint32_t tp_extra_header = get_pcr(ts, s->pb) % 0x3fffffff;
+ tp_extra_header = AV_RB32(&tp_extra_header);
+ avio_write(s->pb, (unsigned char *) &tp_extra_header,
+ sizeof(tp_extra_header));
+@@ -1056,9 +1058,6 @@
+ else
+ ts_st->pcr_period = 1;
+ }
+-
+- // output a PCR as soon as possible
+- ts_st->last_pcr = ts->first_pcr - ts_st->pcr_period;
+ }
+
+ static void select_pcr_streams(AVFormatContext *s)
+@@ -1121,6 +1120,7 @@
+
+ if (s->max_delay < 0) /* Not set by the caller */
+ s->max_delay = 0;
++ ts->delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE);
+
+ // round up to a whole number of TS packets
+ ts->pes_payload_size = (ts->pes_payload_size + 14 + 183) / 184 * 184 - 14;
+@@ -1180,7 +1180,9 @@
+ /* MPEG pid values < 16 are reserved. Applications which set st->id in
+ * this range are assigned a calculated pid. */
+ if (st->id < 16) {
+- if (ts->m2ts_mode) {
++ if (ts->start_pid >= 0)
++ ts_st->pid = ts->start_pid + i;
++ else if (ts->m2ts_mode) {
+ switch (st->codecpar->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ ts_st->pid = ts->m2ts_video_pid++;
+@@ -1207,9 +1209,9 @@
+ av_log(s, AV_LOG_ERROR, "Cannot automatically assign PID for stream %d\n", st->index);
+ return AVERROR(EINVAL);
+ }
+- } else {
+- ts_st->pid = ts->start_pid + i;
+ }
++ else
++ ts_st->pid = START_PID + i;
+ } else {
+ ts_st->pid = st->id;
+ }
+@@ -1277,9 +1279,14 @@
+ ts->last_pat_ts = AV_NOPTS_VALUE;
+ ts->last_sdt_ts = AV_NOPTS_VALUE;
+ ts->last_nit_ts = AV_NOPTS_VALUE;
+- ts->pat_period = av_rescale(ts->pat_period_us, PCR_TIME_BASE, AV_TIME_BASE);
+- ts->sdt_period = av_rescale(ts->sdt_period_us, PCR_TIME_BASE, AV_TIME_BASE);
+- ts->nit_period = av_rescale(ts->nit_period_us, PCR_TIME_BASE, AV_TIME_BASE);
++ ts->pat_period = ts->pat_period_us < 0 ? -1 :
++ av_rescale(ts->pat_period_us, PCR_TIME_BASE, AV_TIME_BASE);
++ ts->sdt_period = ts->sdt_period_us < 0 ? -1 :
++ av_rescale(ts->sdt_period_us, PCR_TIME_BASE, AV_TIME_BASE);
++ ts->nit_period = ts->nit_period_us < 0 ? -1 :
++ av_rescale(ts->nit_period_us, PCR_TIME_BASE, AV_TIME_BASE);
++ ts->pcr = 0;
++ ts->pcr_pos = 0;
+
+ /* assign provider name */
+ provider = av_dict_get(s->metadata, "service_provider", NULL, 0);
+@@ -1295,8 +1302,8 @@
+ av_log(s, AV_LOG_VERBOSE, "muxrate %d, ", ts->mux_rate);
+ av_log(s, AV_LOG_VERBOSE,
+ "sdt every %"PRId64" ms, pat/pmt every %"PRId64" ms",
+- av_rescale(ts->sdt_period, 1000, PCR_TIME_BASE),
+- av_rescale(ts->pat_period, 1000, PCR_TIME_BASE));
++ ts->sdt_period < 0 ? -1 : av_rescale(ts->sdt_period, 1000, PCR_TIME_BASE),
++ ts->pat_period < 0 ? -1 : av_rescale(ts->pat_period, 1000, PCR_TIME_BASE));
+ if (ts->flags & MPEGTS_FLAG_NIT)
+ av_log(s, AV_LOG_VERBOSE, ", nit every %"PRId64" ms", av_rescale(ts->nit_period, 1000, PCR_TIME_BASE));
+ av_log(s, AV_LOG_VERBOSE, "\n");
+@@ -1305,36 +1312,40 @@
+ }
+
+ /* send SDT, NIT, PAT and PMT tables regularly */
+-static void retransmit_si_info(AVFormatContext *s, int force_pat, int force_sdt, int force_nit, int64_t pcr)
++static void retransmit_si_info(AVFormatContext *s, int force_pat, int force_sdt, int force_nit)
+ {
+ MpegTSWrite *ts = s->priv_data;
+ int i;
+
+- if ((pcr != AV_NOPTS_VALUE && ts->last_sdt_ts == AV_NOPTS_VALUE) ||
+- (pcr != AV_NOPTS_VALUE && pcr - ts->last_sdt_ts >= ts->sdt_period) ||
+- force_sdt
+- ) {
+- if (pcr != AV_NOPTS_VALUE)
+- ts->last_sdt_ts = FFMAX(pcr, ts->last_sdt_ts);
+- mpegts_write_sdt(s);
+- }
+- if ((pcr != AV_NOPTS_VALUE && ts->last_pat_ts == AV_NOPTS_VALUE) ||
+- (pcr != AV_NOPTS_VALUE && pcr - ts->last_pat_ts >= ts->pat_period) ||
+- force_pat) {
+- if (pcr != AV_NOPTS_VALUE)
+- ts->last_pat_ts = FFMAX(pcr, ts->last_pat_ts);
+- mpegts_write_pat(s);
+- for (i = 0; i < ts->nb_services; i++)
+- mpegts_write_pmt(s, ts->services[i]);
+- }
+- if ((pcr != AV_NOPTS_VALUE && ts->last_nit_ts == AV_NOPTS_VALUE) ||
+- (pcr != AV_NOPTS_VALUE && pcr - ts->last_nit_ts >= ts->nit_period) ||
+- force_nit
+- ) {
+- if (pcr != AV_NOPTS_VALUE)
+- ts->last_nit_ts = FFMAX(pcr, ts->last_nit_ts);
++ if (ts->sdt_period >= 0) {
++ int64_t pcr = get_pcr(ts, s->pb);
++ if (ts->last_sdt_ts == AV_NOPTS_VALUE || pcr >= ts->last_sdt_ts + ts->sdt_period)
++ force_sdt = 1;
++ if (force_sdt) {
++ ts->last_sdt_ts = pcr;
++ mpegts_write_sdt(s);
++ }
++ }
++ if (ts->pat_period >= 0) {
++ int64_t pcr = get_pcr(ts, s->pb);
++ if (ts->last_pat_ts == AV_NOPTS_VALUE || pcr >= ts->last_pat_ts + ts->pat_period)
++ force_pat = 1;
++ if (force_pat) {
++ ts->last_pat_ts = pcr;
++ mpegts_write_pat(s);
++ for (i = 0; i < ts->nb_services; i++)
++ mpegts_write_pmt(s, ts->services[i]);
++ }
++ }
++ if (ts->nit_period >= 0) {
++ int64_t pcr = get_pcr(ts, s->pb);
++ if (ts->last_nit_ts == AV_NOPTS_VALUE || pcr >= ts->last_nit_ts + ts->nit_period)
++ force_nit = 1;
++ if (force_nit) {
++ ts->last_nit_ts = pcr;
+ if (ts->flags & MPEGTS_FLAG_NIT)
+ mpegts_write_nit(s);
++ }
+ }
+ }
+
+@@ -1371,25 +1382,29 @@
+ static void mpegts_insert_pcr_only(AVFormatContext *s, AVStream *st)
+ {
+ MpegTSWrite *ts = s->priv_data;
+- MpegTSWriteStream *ts_st = st->priv_data;
++ int64_t pcr = get_pcr(ts, s->pb);
++ MpegTSWriteStream *ts_st = st ? st->priv_data : 0;
++ uint32_t pcr_pid = ts_st ? ts_st->pid : ts->pcr_stream_pid;
+ uint8_t *q;
+ uint8_t buf[TS_PACKET_SIZE];
+
+ q = buf;
+ *q++ = 0x47;
+- *q++ = ts_st->pid >> 8;
+- *q++ = ts_st->pid;
+- *q++ = 0x20 | ts_st->cc; /* Adaptation only */
++ *q++ = pcr_pid >> 8;
++ *q++ = pcr_pid;
++ uint32_t flags = 0x20; /* Adaptation only */
+ /* Continuity Count field does not increment (see 13818-1 section 2.4.3.3) */
++ if(ts_st) flags |= ts_st->cc;
++ *q++ = flags;
+ *q++ = TS_PACKET_SIZE - 5; /* Adaptation Field Length */
+ *q++ = 0x10; /* Adaptation flags: PCR present */
+- if (ts_st->discontinuity) {
++ if (ts_st && ts_st->discontinuity) {
+ q[-1] |= 0x80;
+ ts_st->discontinuity = 0;
+ }
+
+ /* PCR coded into 6 bytes */
+- q += write_pcr_bits(q, get_pcr(ts));
++ q += write_pcr_bits(q, pcr);
+
+ /* stuffing bytes */
+ memset(q, 0xFF, TS_PACKET_SIZE - (q - buf));
+@@ -1490,9 +1505,9 @@
+ int afc_len, stuffing_len;
+ int is_dvb_subtitle = (st->codecpar->codec_id == AV_CODEC_ID_DVB_SUBTITLE);
+ int is_dvb_teletext = (st->codecpar->codec_id == AV_CODEC_ID_DVB_TELETEXT);
+- int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE);
+ int force_pat = st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && key && !ts_st->prev_payload_key;
+ int force_sdt = 0;
++ int64_t pcr;
+ int force_nit = 0;
+
+ av_assert0(ts_st->payload != buf || st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO);
+@@ -1509,21 +1524,19 @@
+
+ is_start = 1;
+ while (payload_size > 0) {
+- int64_t pcr = AV_NOPTS_VALUE;
+- if (ts->mux_rate > 1)
+- pcr = get_pcr(ts);
+- else if (dts != AV_NOPTS_VALUE)
+- pcr = (dts - delay) * 300;
+-
+- retransmit_si_info(s, force_pat, force_sdt, force_nit, pcr);
+- force_pat = 0;
+- force_sdt = 0;
+- force_nit = 0;
++ // add 11, pcr references the last byte of program clock reference base
++ ts->pcr_pos = avio_tell(s->pb) + 11;
++ pcr = ts->pcr = ts->mux_rate != 1 ?
++ av_rescale(ts->pcr_pos, 8 * PCR_TIME_BASE, ts->mux_rate) :
++ (dts == AV_NOPTS_VALUE ? 0 : (dts - ts->delay) * 300);
++ if (force_pat || force_sdt || force_nit) {
++ retransmit_si_info(s, force_pat, force_sdt, force_nit);
++ force_pat = force_sdt = force_nit = 0;
++ }
+
+ write_pcr = 0;
+ if (ts->mux_rate > 1) {
+ /* Send PCR packets for all PCR streams if needed */
+- pcr = get_pcr(ts);
+ if (pcr >= ts->next_pcr) {
+ int64_t next_pcr = INT64_MAX;
+ for (int i = 0; i < s->nb_streams; i++) {
+@@ -1533,36 +1546,43 @@
+ AVStream *st2 = s->streams[st2_index];
+ MpegTSWriteStream *ts_st2 = st2->priv_data;
+ if (ts_st2->pcr_period) {
+- if (pcr - ts_st2->last_pcr >= ts_st2->pcr_period) {
+- ts_st2->last_pcr = FFMAX(pcr - ts_st2->pcr_period, ts_st2->last_pcr + ts_st2->pcr_period);
+- if (st2 != st) {
++ if (pcr >= ts_st2->pcr_timer) {
++ ts_st2->pcr_timer = pcr + ts_st2->pcr_period;
++ if (st2 != st) {
+ mpegts_insert_pcr_only(s, st2);
+- pcr = get_pcr(ts);
+ } else {
+ write_pcr = 1;
+ }
+ }
+- next_pcr = FFMIN(next_pcr, ts_st2->last_pcr + ts_st2->pcr_period);
++ next_pcr = FFMIN(next_pcr, ts_st2->pcr_timer);
+ }
+ }
+ ts->next_pcr = next_pcr;
+ }
+- if (dts != AV_NOPTS_VALUE && (dts - pcr / 300) > delay) {
+- /* pcr insert gets priority over null packet insert */
+- if (write_pcr)
+- mpegts_insert_pcr_only(s, st);
+- else
+- mpegts_insert_null_packet(s);
+- /* recalculate write_pcr and possibly retransmit si_info */
+- continue;
+- }
+- } else if (ts_st->pcr_period && pcr != AV_NOPTS_VALUE) {
+- if (pcr - ts_st->last_pcr >= ts_st->pcr_period && is_start) {
+- ts_st->last_pcr = FFMAX(pcr - ts_st->pcr_period, ts_st->last_pcr + ts_st->pcr_period);
++ }
++ else if (ts_st->pcr_period) {
++ if (pcr >= ts_st->pcr_timer) {
++ ts_st->pcr_timer = pcr + ts_st->pcr_period;
+ write_pcr = 1;
+ }
+ }
+
++ if (write_pcr && ts->pcr_stream_pid >= 0) {
++ mpegts_insert_pcr_only(s, 0);
++ continue;
++ }
++
++ if (ts->mux_rate > 1 && dts != AV_NOPTS_VALUE &&
++ (dts - pcr / 300) > ts->delay) {
++ /* pcr insert gets priority over null packet insert */
++ if (write_pcr)
++ mpegts_insert_pcr_only(s, st);
++ else
++ mpegts_insert_null_packet(s);
++ /* recalculate write_pcr and possibly retransimit si_info */
++ continue;
++ }
++
+ /* prepare packet header */
+ q = buf;
+ *q++ = 0x47;
+@@ -1592,7 +1612,6 @@
+ if (write_pcr) {
+ set_af_flag(buf, 0x10);
+ q = get_ts_payload_start(buf);
+- // add 11, pcr references the last byte of program clock reference base
+ if (dts != AV_NOPTS_VALUE && dts < pcr / 300)
+ av_log(s, AV_LOG_WARNING, "dts < pcr, TS is invalid\n");
+ extend_af(buf, write_pcr_bits(q, pcr));
+@@ -1864,8 +1883,8 @@
+ uint8_t *data = NULL;
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSWriteStream *ts_st = st->priv_data;
+- const int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE) * 2;
+- const int64_t max_audio_delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE) / 2;
++ const int64_t delay_ticks2 = ts->delay * 2;
++ const int64_t max_audio_delay = ts->delay / 2;
+ int64_t dts = pkt->dts, pts = pkt->pts;
+ int opus_samples = 0;
+ size_t side_data_size;
+@@ -1885,9 +1904,9 @@
+
+ if (ts->copyts < 1) {
+ if (pts != AV_NOPTS_VALUE)
+- pts += delay;
++ pts += delay_ticks2;
+ if (dts != AV_NOPTS_VALUE)
+- dts += delay;
++ dts += delay_ticks2;
+ }
+
+ if (!ts_st->first_timestamp_checked && (pts == AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)) {
+@@ -2354,8 +2373,10 @@
+ 0, AV_OPT_TYPE_CONST, { .i64 = MPEGTS_SERVICE_TYPE_HEVC_DIGITAL_HDTV }, 0x01, 0xff, ENC, .unit = "mpegts_service_type" },
+ { "mpegts_pmt_start_pid", "Set the first pid of the PMT.",
+ OFFSET(pmt_start_pid), AV_OPT_TYPE_INT, { .i64 = 0x1000 }, FIRST_OTHER_PID, LAST_OTHER_PID, ENC },
++ { "mpegts_pcr_stream_pid", "create seperate PCR stream on this pid.",
++ OFFSET(pcr_stream_pid), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 0x1f00, ENC },
+ { "mpegts_start_pid", "Set the first pid.",
+- OFFSET(start_pid), AV_OPT_TYPE_INT, { .i64 = 0x0100 }, FIRST_OTHER_PID, LAST_OTHER_PID, ENC },
++ OFFSET(start_pid), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, LAST_OTHER_PID, ENC },
+ { "mpegts_m2ts_mode", "Enable m2ts mode.", OFFSET(m2ts_mode), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, ENC },
+ { "muxrate", NULL, OFFSET(mux_rate), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, ENC },
+ { "pes_payload_size", "Minimum PES packet payload in bytes",
+@@ -2381,10 +2402,10 @@
+ OFFSET(omit_video_pes_length), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, ENC },
+ { "pcr_period", "PCR retransmission time in milliseconds",
+ OFFSET(pcr_period_ms), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, ENC },
+- { "pat_period", "PAT/PMT retransmission time limit in seconds",
++ { "pat_period", "PAT/PMT retransmission time limit in ms, -1 no pat",
+ OFFSET(pat_period_us), AV_OPT_TYPE_DURATION, { .i64 = PAT_RETRANS_TIME * 1000LL }, 0, INT64_MAX, ENC },
+- { "sdt_period", "SDT retransmission time limit in seconds",
+- OFFSET(sdt_period_us), AV_OPT_TYPE_DURATION, { .i64 = SDT_RETRANS_TIME * 1000LL }, 0, INT64_MAX, ENC },
++ { "sdt_period", "SDT retransmission time limit in ms, -1 no sdt",
++ OFFSET(sdt_period_us), AV_OPT_TYPE_INT64, { .i64 = SDT_RETRANS_TIME * 1000LL }, -1, INT64_MAX, ENC },
+ { "nit_period", "NIT retransmission time limit in seconds",
+ OFFSET(nit_period_us), AV_OPT_TYPE_DURATION, { .i64 = NIT_RETRANS_TIME * 1000LL }, 0, INT64_MAX, ENC },
+ { NULL },
+--- a/libavformat/mpegts.h
++++ b/libavformat/mpegts.h
+@@ -64,6 +64,7 @@
+ /* PID from 0x1FFC to 0x1FFE may be assigned as needed to PMT, elementary
+ * streams and other data tables */
+ #define NULL_PID 0x1FFF /* Null packet (used for fixed bandwidth padding) */
++#define START_PID 0x0400
+
+ /* m2ts pids */
+ #define M2TS_PMT_PID 0x0100
+--- a/libavformat/bluray.c
++++ b/libavformat/bluray.c
+@@ -27,7 +27,7 @@
+ #include "libavutil/opt.h"
+
+ #define BLURAY_PROTO_PREFIX "bluray:"
+-#define MIN_PLAYLIST_LENGTH 180 /* 3 min */
++#define MIN_PLAYLIST_LENGTH 0
+
+ typedef struct {
+ const AVClass *class;
+
+--- a/doc/muxers.texi
++++ b/doc/muxers.texi
+@@ -2920,7 +2920,8 @@
+ Maximum time in seconds between PAT/PMT tables. Default is @code{0.1}.
+
+ @item sdt_period @var{duration}
+-Maximum time in seconds between SDT tables. Default is @code{0.5}.
++Maximum time in seconds between SDT tables. Default is @code{0.5}. Regardless
++of this setting no SDT is written in m2ts mode.
+
+ @item nit_period @var{duration}
+ Maximum time in seconds between NIT tables. Default is @code{0.5}.
--- /dev/null
+--- a/libavformat/avformat.h
++++ b/libavformat/avformat.h
+@@ -499,6 +499,9 @@
+ The user or muxer can override this through
+ AVFormatContext.avoid_negative_ts
+ */
++#define AVFMT_SEEK_NOSTREAMS 0x80000 /**< Stream index ignored by seek,
++ or some streams fail to seek
++ */
+
+ #define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */
+
+@@ -562,7 +565,8 @@
+ /**
+ * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS,
+ * AVFMT_NOTIMESTAMPS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH,
+- * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
++ * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS,
++ * AVFMT_SEEK_NOSTREAMS
+ */
+ int flags;
+
+--- a/libavformat/dv.c
++++ b/libavformat/dv.c
+@@ -713,6 +713,7 @@
+ const FFInputFormat ff_dv_demuxer = {
+ .p.name = "dv",
+ .p.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
++ .p.flags = AVFMT_SEEK_NOSTREAMS,
+ .p.extensions = "dv,dif",
+ .priv_data_size = sizeof(RawDVContext),
+ .read_probe = dv_probe,
+
+--- a/libavformat/matroskadec.c
++++ b/libavformat/matroskadec.c
+@@ -4794,6 +4794,7 @@
+ const FFInputFormat ff_webm_dash_manifest_demuxer = {
+ .p.name = "webm_dash_manifest",
+ .p.long_name = NULL_IF_CONFIG_SMALL("WebM DASH Manifest"),
++ .p.flags = AVFMT_SEEK_NOSTREAMS,
+ .p.priv_class = &webm_dash_class,
+ .priv_data_size = sizeof(MatroskaDemuxContext),
+ .flags_internal = FF_INFMT_FLAG_INIT_CLEANUP,
+@@ -4806,6 +4807,7 @@
+ const FFInputFormat ff_matroska_demuxer = {
+ .p.name = "matroska,webm",
+ .p.long_name = NULL_IF_CONFIG_SMALL("Matroska / WebM"),
++ .p.flags = AVFMT_SEEK_NOSTREAMS,
+ .p.extensions = "mkv,mk3d,mka,mks,webm",
+ .p.mime_type = "audio/webm,audio/x-matroska,video/webm,video/x-matroska",
+ .priv_data_size = sizeof(MatroskaDemuxContext),
+
+--- a/libavformat/seek.c
++++ b/libavformat/seek.c
+@@ -605,6 +605,13 @@
+ return seek_frame_byte(s, stream_index, timestamp, flags);
+ }
+
++ if (stream_index != -1 && (s->iformat->flags & AVFMT_SEEK_NOSTREAMS)) {
++ timestamp = av_rescale_q(timestamp,
++ s->streams[stream_index]->time_base,
++ AV_TIME_BASE_Q);
++ stream_index = -1;
++ }
++
+ if (stream_index < 0) {
+ stream_index = av_find_default_stream_index(s);
+ if (stream_index < 0)
--- /dev/null
+--- a/libavformat/avidec.c
++++ b/libavformat/avidec.c
+@@ -2020,6 +2020,7 @@
+ .p.name = "avi",
+ .p.long_name = NULL_IF_CONFIG_SMALL("AVI (Audio Video Interleaved)"),
+ .p.extensions = "avi",
++ .p.flags = AVFMT_SEEK_NOSTREAMS,
+ .p.priv_class = &demuxer_class,
+ .priv_data_size = sizeof(AVIContext),
+ .flags_internal = FF_INFMT_FLAG_INIT_CLEANUP,
--- /dev/null
+--- a/libavfilter/formats.c
++++ b/libavfilter/formats.c
+@@ -110,11 +110,13 @@
+ possibly causing a lossy conversion elsewhere in the graph.
+ To avoid that, pretend that there are no common formats to force the
+ insertion of a conversion filter. */
+- if (type == AVMEDIA_TYPE_VIDEO)
++ if (type == AVMEDIA_TYPE_VIDEO) {
+ for (i = 0; i < a->nb_formats; i++) {
+ const AVPixFmtDescriptor *const adesc = av_pix_fmt_desc_get(a->formats[i]);
++ if( !adesc ) continue;
+ for (j = 0; j < b->nb_formats; j++) {
+ const AVPixFmtDescriptor *bdesc = av_pix_fmt_desc_get(b->formats[j]);
++ if( !bdesc ) continue;
+ alpha2 |= adesc->flags & bdesc->flags & AV_PIX_FMT_FLAG_ALPHA;
+ chroma2|= adesc->nb_components > 1 && bdesc->nb_components > 1;
+ if (a->formats[i] == b->formats[j]) {
+@@ -123,6 +125,7 @@
+ }
+ }
+ }
++ }
+
+ // If chroma or alpha can be lost through merging then do not merge
+ if (alpha2 > alpha1 || chroma2 > chroma1)
--- /dev/null
+--- a/libavcodec/vdpau_mpeg12.c
++++ b/libavcodec/vdpau_mpeg12.c
+@@ -117,6 +117,7 @@
+ .frame_priv_data_size = sizeof(struct vdpau_picture_context),
+ .init = vdpau_mpeg1_init,
+ .uninit = ff_vdpau_common_uninit,
++ .frame_params = ff_vdpau_common_frame_params,
+ .priv_data_size = sizeof(VDPAUContext),
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
+ };
--- /dev/null
+--- a/libavcodec/h263dec.c
++++ b/libavcodec/h263dec.c
+@@ -623,7 +623,7 @@
+ if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4)
+ ff_mpeg4_frame_end(avctx, buf, buf_size);
+
+- if (!s->divx_packed && avctx->hwaccel)
++ if (s->divx_packed && avctx->hwaccel)
+ ff_thread_finish_setup(avctx);
+
+ av_assert1(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
--- /dev/null
+--- a/libavformat/mpegenc.c
++++ b/libavformat/mpegenc.c
+@@ -987,9 +987,9 @@
+ PacketDesc *pkt_desc;
+
+ while ((pkt_desc = stream->predecode_packet) &&
++ pkt_desc != stream->premux_packet &&
+ scr > pkt_desc->dts) { // FIXME: > vs >=
+- if (stream->buffer_index < pkt_desc->size ||
+- stream->predecode_packet == stream->premux_packet) {
++ if (stream->buffer_index < pkt_desc->size) {
+ av_log(ctx, AV_LOG_ERROR,
+ "buffer underflow st=%d bufi=%d size=%d\n",
+ i, stream->buffer_index, pkt_desc->size);
--- /dev/null
+--- a/libavutil/hwcontext_vdpau.c
++++ b/libavutil/hwcontext_vdpau.c
+@@ -47,6 +47,11 @@
+ { 0, AV_PIX_FMT_NONE, },
+ };
+
++static const VDPAUPixFmtMap pix_fmts_420j[] = {
++ { VDP_YCBCR_FORMAT_YV12, AV_PIX_FMT_YUVJ420P },
++ { 0, AV_PIX_FMT_NONE, },
++};
++
+ static const VDPAUPixFmtMap pix_fmts_422[] = {
+ { VDP_YCBCR_FORMAT_NV12, AV_PIX_FMT_NV16 },
+ { VDP_YCBCR_FORMAT_YV12, AV_PIX_FMT_YUV422P },
+@@ -71,6 +76,7 @@
+ const VDPAUPixFmtMap *map;
+ } vdpau_pix_fmts[] = {
+ { VDP_CHROMA_TYPE_420, AV_PIX_FMT_YUV420P, pix_fmts_420 },
++ { VDP_CHROMA_TYPE_420, AV_PIX_FMT_YUVJ420P, pix_fmts_420j },
+ { VDP_CHROMA_TYPE_422, AV_PIX_FMT_YUV422P, pix_fmts_422 },
+ { VDP_CHROMA_TYPE_444, AV_PIX_FMT_YUV444P, pix_fmts_444 },
+ #ifdef VDP_YCBCR_FORMAT_P016
--- /dev/null
+--- a/libavcodec/encode.c
++++ b/libavcodec/encode.c
+@@ -320,7 +320,7 @@
+ }
+
+ if (!frame->buf[0]) {
+- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
++ if (avci->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
+ avci->frame_thread_encoder))
+ return AVERROR_EOF;
+
+@@ -339,8 +339,10 @@
+ ret = ff_encode_encode_cb(avctx, avpkt, frame, &got_packet);
+ }
+
+- if (avci->draining && !got_packet)
++ if (avci->draining && !got_packet) {
++ fflush(stderr);
+ avci->draining_done = 1;
++ }
+
+ return ret;
+ }
+@@ -515,10 +517,16 @@
+ if (avci->draining)
+ return AVERROR_EOF;
+
+- if (avci->buffer_frame->buf[0])
++ if (avci->buffer_frame->buf[0]) {
++ if (!frame) {
++ fflush(stderr);
++ av_frame_unref(avci->buffer_frame);
++ }
+ return AVERROR(EAGAIN);
++ }
+
+ if (!frame) {
++ fflush(stderr);
+ avci->draining = 1;
+ } else {
+ ret = encode_send_frame_internal(avctx, frame);
--- /dev/null
+--- a/libavcodec/pcm-dvdenc.c
++++ b/libavcodec/pcm-dvdenc.c
+@@ -38,6 +38,12 @@
+ int quant, freq, frame_size;
+
+ switch (avctx->sample_rate) {
++ case 32000:
++ freq = 3;
++ break;
++ case 44100:
++ freq = 2;
++ break;
+ case 48000:
+ freq = 0;
+ break;
+@@ -181,7 +187,7 @@
+ .priv_data_size = sizeof(PCMDVDContext),
+ .init = pcm_dvd_encode_init,
+ FF_CODEC_ENCODE_CB(pcm_dvd_encode_frame),
+- .p.supported_samplerates = (const int[]) { 48000, 96000, 0},
++ .p.supported_samplerates = (const int[]) { 32000, 44100, 48000, 96000, 0},
+ .p.ch_layouts = (const AVChannelLayout[]) { AV_CHANNEL_LAYOUT_MONO,
+ AV_CHANNEL_LAYOUT_STEREO,
+ AV_CHANNEL_LAYOUT_5POINT1,
--- /dev/null
+--- a/libavcodec/wrapped_avframe.c
++++ b/libavcodec/wrapped_avframe.c
+@@ -33,6 +33,38 @@
+ #include "libavutil/buffer.h"
+ #include "libavutil/pixdesc.h"
+
++
++
++static const enum AVPixelFormat pix_fmts_all[] = {
++ AV_PIX_FMT_YUV411P,
++ AV_PIX_FMT_YUV420P,
++ AV_PIX_FMT_YUVJ420P,
++ AV_PIX_FMT_YUV422P,
++ AV_PIX_FMT_YUVJ422P,
++ AV_PIX_FMT_YUV444P,
++ AV_PIX_FMT_YUVJ444P,
++ AV_PIX_FMT_YUV420P10,
++ AV_PIX_FMT_YUV422P10,
++ AV_PIX_FMT_YUV444P10,
++ AV_PIX_FMT_YUV420P12,
++ AV_PIX_FMT_YUV422P12,
++ AV_PIX_FMT_YUV444P12,
++ AV_PIX_FMT_YUV420P14,
++ AV_PIX_FMT_YUV422P14,
++ AV_PIX_FMT_YUV444P14,
++ AV_PIX_FMT_YUV420P16,
++ AV_PIX_FMT_YUV422P16,
++ AV_PIX_FMT_YUV444P16,
++ AV_PIX_FMT_GRAY8,
++ AV_PIX_FMT_GRAY9,
++ AV_PIX_FMT_GRAY10,
++ AV_PIX_FMT_GRAY12,
++ AV_PIX_FMT_GRAY16,
++ AV_PIX_FMT_NONE
++};
++
++
++
+ static void wrapped_avframe_release_buffer(void *unused, uint8_t *data)
+ {
+ AVFrame *frame = (AVFrame *)data;
+@@ -111,6 +143,7 @@
+ .p.id = AV_CODEC_ID_WRAPPED_AVFRAME,
+ .p.capabilities = AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE,
+ FF_CODEC_ENCODE_CB(wrapped_avframe_encode),
++ .p.pix_fmts = pix_fmts_all,
+ };
+
+ const FFCodec ff_wrapped_avframe_decoder = {
--- /dev/null
+--- a/libavformat/yuv4mpegenc.c
++++ b/libavformat/yuv4mpegenc.c
+@@ -268,7 +268,7 @@
+ av_log(s, AV_LOG_ERROR, "'%s' is not an official yuv4mpegpipe pixel format. "
+ "Use '-strict -1' to encode to this pixel format.\n",
+ av_get_pix_fmt_name(s->streams[0]->codecpar->format));
+- return AVERROR(EINVAL);
++ //return AVERROR(EINVAL);
+ }
+ av_log(s, AV_LOG_WARNING, "Warning: generating non standard YUV stream. "
+ "Mjpegtools will not work.\n");