#include <stdarg.h>
#include <fcntl.h>
#include <limits.h>
+#include <ctype.h>
+
// work arounds (centos)
#include <lzma.h>
#ifndef INT64_MAX
#define AUDIO_INBUF_SIZE 0x10000
#define VIDEO_REFILL_THRESH 0
#define AUDIO_REFILL_THRESH 0x1000
+#define AUDIO_MIN_FRAME_SZ 128
Mutex FFMPEG::fflock("FFMPEG::fflock");
{
inp = outp = bfr;
hpos = 0;
+ memset(bfr, 0, lmt-bfr);
}
void FFAudioStream::iseek(int64_t ofs)
{
+ if( ofs > hpos ) ofs = hpos;
+ if( ofs > sz ) ofs = sz;
outp = inp - ofs*nch;
if( outp < bfr ) outp += sz*nch;
}
need_packet = 1;
frame = fframe = 0;
bsfc = 0;
+ stats_fp = 0;
+ stats_filename = 0;
+ stats_in = 0;
+ pass = 0;
}
FFStream::~FFStream()
if( frame ) av_frame_free(&frame);
if( fframe ) av_frame_free(&fframe);
delete frm_lock;
+ if( stats_fp ) fclose(stats_fp);
+ if( stats_in ) av_freep(&stats_in);
+ delete [] stats_filename;
}
void FFStream::ff_lock(const char *cp)
if( decoder->capabilities & AV_CODEC_CAP_DR1 )
avctx->flags |= CODEC_FLAG_EMU_EDGE;
avcodec_parameters_to_context(avctx, st->codecpar);
+ if( !av_dict_get(copts, "threads", NULL, 0) )
+ avctx->thread_count = ffmpeg->ff_cpus();
ret = avcodec_open2(avctx, decoder, &copts);
}
if( ret >= 0 ) {
int FFStream::load_filter(AVFrame *frame)
{
- int ret = av_buffersrc_add_frame_flags(buffersrc_ctx,
- frame, AV_BUFFERSRC_FLAG_KEEP_REF);
- if( ret < 0 ) {
- av_frame_unref(frame);
+ int ret = av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0);
+ if( ret < 0 )
eprintf(_("av_buffersrc_add_frame_flags failed\n"));
- }
return ret;
}
int FFStream::read_frame(AVFrame *frame)
{
+ av_frame_unref(frame);
if( !filter_graph || !buffersrc_ctx || !buffersink_ctx )
return decode(frame);
if( !fframe && !(fframe=av_frame_alloc()) ) {
ret = write_packet(opkt);
if( ret < 0 ) break;
++pkts;
+ if( frame && stats_fp ) {
+ ret = write_stats_file();
+ if( ret < 0 ) break;
+ }
}
ff_err(ret, "FFStream::encode_frame: encode failed\n");
return -1;
if( writing < 0 )
return -1;
int ret = encode_frame(0);
+ if( ret >= 0 && stats_fp ) {
+ ret = write_stats_file();
+ close_stats_file();
+ }
if( ret < 0 )
ff_err(ret, "FFStream::flush");
return ret >= 0 ? 0 : 1;
}
+
+int FFStream::open_stats_file()
+{
+ stats_fp = fopen(stats_filename,"w");
+ return stats_fp ? 0 : AVERROR(errno);
+}
+
+int FFStream::close_stats_file()
+{
+ if( stats_fp ) {
+ fclose(stats_fp); stats_fp = 0;
+ }
+ return 0;
+}
+
+int FFStream::read_stats_file()
+{
+ int64_t len = 0; struct stat stats_st;
+ int fd = open(stats_filename, O_RDONLY);
+ int ret = fd >= 0 ? 0: ENOENT;
+ if( !ret && fstat(fd, &stats_st) )
+ ret = EINVAL;
+ if( !ret ) {
+ len = stats_st.st_size;
+ stats_in = (char *)av_malloc(len+1);
+ if( !stats_in )
+ ret = ENOMEM;
+ }
+ if( !ret && read(fd, stats_in, len+1) != len )
+ ret = EIO;
+ if( !ret ) {
+ stats_in[len] = 0;
+ avctx->stats_in = stats_in;
+ }
+ if( fd >= 0 )
+ close(fd);
+ return !ret ? 0 : AVERROR(ret);
+}
+
+int FFStream::write_stats_file()
+{
+ int ret = 0;
+ if( avctx->stats_out && (ret=strlen(avctx->stats_out)) > 0 ) {
+ int len = fwrite(avctx->stats_out, 1, ret, stats_fp);
+ if( ret != len )
+ ff_err(ret = AVERROR(errno), "FFStream::write_stats_file");
+ }
+ return ret;
+}
+
+int FFStream::init_stats_file()
+{
+ int ret = 0;
+ if( (pass & 2) && (ret = read_stats_file()) < 0 )
+ ff_err(ret, "stat file read: %s", stats_filename);
+ if( (pass & 1) && (ret=open_stats_file()) < 0 )
+ ff_err(ret, "stat file open: %s", stats_filename);
+ return ret >= 0 ? 0 : ret;
+}
+
int FFStream::seek(int64_t no, double rate)
{
- int64_t tstmp = -INT64_MAX+1;
// default ffmpeg native seek
int npkts = 1;
int64_t pos = no, pkt_pos = -1;
npkts = MAX_RETRY;
}
}
- if( pos > 0 && st->time_base.num > 0 ) {
- double secs = pos / rate;
- tstmp = secs * st->time_base.den / st->time_base.num;
- if( nudge != AV_NOPTS_VALUE ) tstmp += nudge;
- }
+ if( pos == curr_pos ) return 0;
+ double secs = pos < 0 ? 0. : pos / rate;
+ AVRational time_base = st->time_base;
+ int64_t tstmp = time_base.num > 0 ? secs * time_base.den/time_base.num : 0;
+ if( !tstmp ) {
+ if( st->nb_index_entries > 0 ) tstmp = st->index_entries[0].timestamp;
+ else if( st->start_time != AV_NOPTS_VALUE ) tstmp = st->start_time;
+ else if( st->first_dts != AV_NOPTS_VALUE ) tstmp = st->first_dts;
+ else tstmp = INT64_MIN+1;
+ }
+ else if( nudge != AV_NOPTS_VALUE ) tstmp += nudge;
+ int idx = st->index;
+#if 0
+// seek all streams using the default timebase.
+// this is how ffmpeg and ffplay work. stream seeks are less tested.
+ tstmp = av_rescale_q(tstmp, time_base, AV_TIME_BASE_Q);
+ idx = -1;
+#endif
+
avcodec_flush_buffers(avctx);
avformat_flush(fmt_ctx);
#if 0
seek = pkt_pos;
flags = AVSEEK_FLAG_BYTE;
}
- int ret = avformat_seek_file(fmt_ctx, st->index, -INT64_MAX, seek, INT64_MAX, flags);
+ int ret = avformat_seek_file(fmt_ctx, st->index, -INT64_MAX, seek, INT64_MAX, flags);
#else
- int ret = av_seek_frame(fmt_ctx, st->index, tstmp, AVSEEK_FLAG_ANY);
+// finds the first index frame below the target time
+ int flags = AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY;
+ int ret = av_seek_frame(fmt_ctx, idx, tstmp, flags);
#endif
int retry = MAX_RETRY;
while( ret >= 0 ) {
}
}
if( ret < 0 ) {
-printf("** seek fail %ld, %ld\n", pos, tstmp);
+printf("** seek fail %jd, %jd\n", pos, tstmp);
seeked = need_packet = 0;
- st_eof(flushed=1);
+ st_eof(flushed=1);
return -1;
}
//printf("seeked pos = %ld, %ld\n", pos, tstmp);
channel0 = channels = 0;
sample_rate = 0;
mbsz = 0;
+ frame_sz = AUDIO_MIN_FRAME_SZ;
length = 0;
resample_context = 0;
+ swr_ichs = swr_ifmt = swr_irate = 0;
aud_bfr_sz = 0;
aud_bfr = 0;
delete [] bfr;
}
+void FFAudioStream::init_swr(int ichs, int ifmt, int irate)
+{
+ if( resample_context ) {
+ if( swr_ichs == ichs && swr_ifmt == ifmt && swr_irate == irate )
+ return;
+ swr_free(&resample_context);
+ }
+ swr_ichs = ichs; swr_ifmt = ifmt; swr_irate = irate;
+ if( ichs == channels && ifmt == AV_SAMPLE_FMT_FLT && irate == sample_rate )
+ return;
+ uint64_t ilayout = av_get_default_channel_layout(ichs);
+ if( !ilayout ) ilayout = ((uint64_t)1<<ichs) - 1;
+ uint64_t olayout = av_get_default_channel_layout(channels);
+ if( !olayout ) olayout = ((uint64_t)1<<channels) - 1;
+ resample_context = swr_alloc_set_opts(NULL,
+ olayout, AV_SAMPLE_FMT_FLT, sample_rate,
+ ilayout, (AVSampleFormat)ifmt, irate,
+ 0, NULL);
+ if( resample_context )
+ swr_init(resample_context);
+}
+
int FFAudioStream::get_samples(float *&samples, uint8_t **data, int len)
{
samples = *(float **)data;
int FFAudioStream::encode_activate()
{
if( writing >= 0 ) return writing;
+ if( !avctx->codec ) return writing = 0;
frame_sz = avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
10000 : avctx->frame_size;
return FFStream::encode_activate();
}
if( mbsz < len ) mbsz = len;
int64_t end_pos = pos + len;
- int ret = 0;
- for( int i=0; ret>=0 && !flushed && curr_pos<end_pos && i<MAX_RETRY; ++i ) {
+ int ret = 0, i = len / frame_sz + MAX_RETRY;
+ while( ret>=0 && !flushed && curr_pos<end_pos && --i>=0 ) {
ret = read_frame(frame);
- if( ret > 0 ) {
+ if( ret > 0 && frame->nb_samples > 0 ) {
+ init_swr(frame->channels, frame->format, frame->sample_rate);
load_history(&frame->extended_data[0], frame->nb_samples);
curr_pos += frame->nb_samples;
}
int FFAudioStream::audio_seek(int64_t pos)
{
- if( decode_activate() < 0 ) return -1;
+ if( decode_activate() <= 0 ) return -1;
if( !st->codecpar ) return -1;
if( in_history(pos) ) return 0;
if( pos == curr_pos ) return 0;
return FFStream::encode_frame(frame);
}
+int FFAudioStream::write_packet(FFPacket &pkt)
+{
+ return FFStream::write_packet(pkt);
+}
+
void FFAudioStream::load_markers()
{
IndexState *index_state = ffmpeg->file_base->asset->index_state;
fprintf(stderr, "FFVideoStream::load: av_frame_alloc failed\n");
return -1;
}
- for( int i=0; ret>=0 && !flushed && curr_pos<=pos && i<MAX_RETRY; ++i ) {
+ int i = MAX_RETRY + pos - curr_pos;
+ while( ret>=0 && !flushed && curr_pos<=pos && --i>=0 ) {
ret = read_frame(frame);
if( ret > 0 ) ++curr_pos;
}
int FFVideoStream::video_seek(int64_t pos)
{
- if( decode_activate() < 0 ) return -1;
+ if( decode_activate() <= 0 ) return -1;
if( !st->codecpar ) return -1;
if( pos == curr_pos-1 && !seeked ) return 0;
// if close enough, just read up to current
return FFStream::encode_frame(frame);
}
+int FFVideoStream::write_packet(FFPacket &pkt)
+{
+ if( !(ffmpeg->fmt_ctx->oformat->flags & AVFMT_VARIABLE_FPS) )
+ pkt->duration = 1;
+ return FFStream::write_packet(pkt);
+}
+
AVPixelFormat FFVideoConvert::color_model_to_pix_fmt(int color_model)
{
switch( color_model ) {
case BC_RGB161616: return AV_PIX_FMT_RGB48LE;
case BC_RGBA16161616: return AV_PIX_FMT_RGBA64LE;
case BC_AYUV16161616: return AV_PIX_FMT_AYUV64LE;
+ case BC_GBRP: return AV_PIX_FMT_GBRP;
default: break;
}
case AV_PIX_FMT_RGB48LE: return BC_RGB161616;
case AV_PIX_FMT_RGBA64LE: return BC_RGBA16161616;
case AV_PIX_FMT_AYUV64LE: return BC_AYUV16161616;
+ case AV_PIX_FMT_GBRP: return BC_GBRP;
default: break;
}
estimated = 1;
}
}
- if( estimated )
+ static int notified = 0;
+ if( !notified && estimated ) {
+ notified = 1;
printf("FFMPEG::open_decoder: some stream times estimated\n");
+ }
ff_lock("FFMPEG::open_decoder");
int ret = 0, bad_time = 0;
aud->sample_rate = avpar->sample_rate;
double secs = to_secs(st->duration, st->time_base);
aud->length = secs * aud->sample_rate;
- if( avpar->format != AV_SAMPLE_FMT_FLT ) {
- uint64_t layout = av_get_default_channel_layout(avpar->channels);
- if( !layout ) layout = ((uint64_t)1<<aud->channels) - 1;
- AVSampleFormat sample_format = (AVSampleFormat)avpar->format;
- aud->resample_context = swr_alloc_set_opts(NULL,
- layout, AV_SAMPLE_FMT_FLT, avpar->sample_rate,
- layout, sample_format, avpar->sample_rate,
- 0, NULL);
- swr_init(aud->resample_context);
- }
+ aud->init_swr(aud->channels, avpar->format, aud->sample_rate);
aud->nudge = st->start_time;
aud->reading = -1;
if( opt_audio_filter )
sprintf(arg, "%d", asset->ff_audio_bitrate);
av_dict_set(&sopts, "b", arg, 0);
}
+ else if( asset->ff_audio_quality >= 0 ) {
+ ctx->global_quality = asset->ff_audio_quality * FF_QP2LAMBDA;
+ ctx->qmin = ctx->qmax = asset->ff_audio_quality;
+ ctx->mb_lmin = ctx->qmin * FF_QP2LAMBDA;
+ ctx->mb_lmax = ctx->qmax * FF_QP2LAMBDA;
+ ctx->flags |= CODEC_FLAG_QSCALE;
+ char arg[BCSTRLEN];
+ av_dict_set(&sopts, "flags", "+qscale", 0);
+ sprintf(arg, "%d", asset->ff_audio_quality);
+ av_dict_set(&sopts, "qscale", arg, 0);
+ sprintf(arg, "%d", ctx->global_quality);
+ av_dict_set(&sopts, "global_quality", arg, 0);
+ }
int aidx = ffaudio.size();
int fidx = aidx + ffvideo.size();
FFAudioStream *aud = new FFAudioStream(this, st, aidx, fidx);
vstrm_index.append(ffidx(vidx, 0));
vid->avctx = ctx; ffvideo.append(vid); fst = vid;
vid->width = asset->width;
- ctx->width = (vid->width+3) & ~3;
vid->height = asset->height;
- ctx->height = (vid->height+3) & ~3;
vid->frame_rate = asset->frame_rate;
+ AVPixelFormat pix_fmt = codec->pix_fmts ?
+ codec->pix_fmts[0] : AV_PIX_FMT_YUV420P;
+ AVDictionaryEntry *tag = av_dict_get(sopts, "cin_pix_fmt", NULL, 0);
+ if( tag != 0 ) {
+ int avfmt = av_get_pix_fmt(tag->value);
+ if( avfmt < 0 ) {
+ eprintf(_("cin_pix_fmt unknown = %s\n"), tag->value);
+ ret = 1;
+ break;
+ }
+ pix_fmt = (AVPixelFormat)avfmt;
+ }
+ ctx->pix_fmt = pix_fmt;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
+ int mask_w = (1<<desc->log2_chroma_w)-1;
+ ctx->width = (vid->width+mask_w) & ~mask_w;
+ int mask_h = (1<<desc->log2_chroma_h)-1;
+ ctx->height = (vid->height+mask_h) & ~mask_h;
ctx->sample_aspect_ratio = to_sample_aspect_ratio(asset);
- ctx->pix_fmt = codec->pix_fmts ? codec->pix_fmts[0] : AV_PIX_FMT_YUV420P;
AVRational frame_rate = check_frame_rate(codec, vid->frame_rate);
if( !frame_rate.num || !frame_rate.den ) {
eprintf(_("check_frame_rate failed %s\n"), filename);
ret = 1;
break;
}
+ av_reduce(&frame_rate.num, &frame_rate.den,
+ frame_rate.num, frame_rate.den, INT_MAX);
+ ctx->framerate = (AVRational) { frame_rate.num, frame_rate.den };
ctx->time_base = (AVRational) { frame_rate.den, frame_rate.num };
+ st->avg_frame_rate = frame_rate;
st->time_base = ctx->time_base;
vid->writing = -1;
vid->interlaced = asset->interlace_mode == ILACE_MODE_TOP_FIRST ||
eprintf(_("not audio/video, %s:%s\n"), codec_name, filename);
ret = 1;
}
+
+ if( ctx ) {
+ AVDictionaryEntry *tag;
+ if( (tag=av_dict_get(sopts, "cin_stats_filename", NULL, 0)) != 0 ) {
+ char suffix[BCSTRLEN]; sprintf(suffix,"-%d.log",fst->fidx);
+ fst->stats_filename = cstrcat(2, tag->value, suffix);
+ }
+ if( (tag=av_dict_get(sopts, "flags", NULL, 0)) != 0 ) {
+ int pass = fst->pass;
+ char *cp = tag->value;
+ while( *cp ) {
+ int ch = *cp++, pfx = ch=='-' ? -1 : ch=='+' ? 1 : 0;
+ if( !isalnum(!pfx ? ch : (ch=*cp++)) ) continue;
+ char id[BCSTRLEN], *bp = id, *ep = bp+sizeof(id)-1;
+ for( *bp++=ch; isalnum(ch=*cp); ++cp )
+ if( bp < ep ) *bp++ = ch;
+ *bp = 0;
+ if( !strcmp(id, "pass1") ) {
+ pass = pfx<0 ? (pass&~1) : pfx>0 ? (pass|1) : 1;
+ }
+ else if( !strcmp(id, "pass2") ) {
+ pass = pfx<0 ? (pass&~2) : pfx>0 ? (pass|2) : 2;
+ }
+ }
+ if( (fst->pass=pass) ) {
+ if( pass & 1 ) ctx->flags |= AV_CODEC_FLAG_PASS1;
+ if( pass & 2 ) ctx->flags |= AV_CODEC_FLAG_PASS2;
+ }
+ }
+ }
}
if( !ret ) {
if( fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER )
ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
+ if( fst->stats_filename && (ret=fst->init_stats_file()) )
+ eprintf(_("error: stats file = %s\n"), fst->stats_filename);
+ }
+ if( !ret ) {
av_dict_set(&sopts, "cin_bitrate", 0, 0);
av_dict_set(&sopts, "cin_quality", 0, 0);
+ if( !av_dict_get(sopts, "threads", NULL, 0) )
+ ctx->thread_count = ff_cpus();
ret = avcodec_open2(ctx, codec, &sopts);
if( ret >= 0 ) {
ret = avcodec_parameters_from_context(st->codecpar, ctx);
if( ret < 0 )
fprintf(stderr, "Could not copy the stream parameters\n");
}
+ if( ret >= 0 ) {
+_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+ ret = avcodec_copy_context(st->codec, ctx);
+_Pragma("GCC diagnostic warning \"-Wdeprecated-declarations\"")
+ if( ret < 0 )
+ fprintf(stderr, "Could not copy the stream context\n");
+ }
if( ret < 0 ) {
ff_err(ret,"FFMPEG::open_encoder");
eprintf(_("open failed %s:%s\n"), codec_name, filename);
if( st->start_time == AV_NOPTS_VALUE ) continue;
int vidx = ffvideo.size();
while( --vidx >= 0 && ffvideo[vidx]->fidx != i );
- if( vidx >= 0 && ffvideo[vidx]->nudge != AV_NOPTS_VALUE ) continue;
+ if( vidx < 0 ) continue;
+ if( ffvideo[vidx]->nudge != AV_NOPTS_VALUE ) continue;
if( vstart_time < st->start_time )
vstart_time = st->start_time;
break; }
if( st->start_time == AV_NOPTS_VALUE ) continue;
int aidx = ffaudio.size();
while( --aidx >= 0 && ffaudio[aidx]->fidx != i );
- if( aidx >= 0 && ffaudio[aidx]->nudge != AV_NOPTS_VALUE ) continue;
+ if( aidx < 0 ) continue;
+ if( ffaudio[aidx]->frame_sz < avpar->frame_size )
+ ffaudio[aidx]->frame_sz = avpar->frame_size;
+ if( ffaudio[aidx]->nudge != AV_NOPTS_VALUE ) continue;
if( astart_time < st->start_time )
astart_time = st->start_time;
break; }
return ffvideo[stream]->st->id;
}
+int FFMPEG::ff_video_mpeg_color_range(int stream)
+{
+ return ffvideo[stream]->st->codecpar->color_range == AVCOL_RANGE_MPEG ? 1 : 0;
+}
int FFMPEG::ff_cpus()
{
}
if( ret >= 0 ) {
avcodec_parameters_to_context(avctx, st->codecpar);
+ if( !av_dict_get(copts, "threads", NULL, 0) )
+ avctx->thread_count = ff_cpus();
ret = avcodec_open2(avctx, decoder, &copts);
}
av_dict_free(&copts);
- if( ret < 0 ) {
- fprintf(stderr,"FFMPEG::scan: ");
- fprintf(stderr,_("codec open failed\n"));
- continue;
- }
- AVCodecParameters *avpar = st->codecpar;
- switch( avpar->codec_type ) {
- case AVMEDIA_TYPE_VIDEO: {
- int vidx = ffvideo.size();
- while( --vidx>=0 && ffvideo[vidx]->fidx != i );
- if( vidx < 0 ) break;
- ffvideo[vidx]->avctx = avctx;
- break; }
- case AVMEDIA_TYPE_AUDIO: {
- int aidx = ffaudio.size();
- while( --aidx>=0 && ffaudio[aidx]->fidx != i );
- if( aidx < 0 ) continue;
- ffaudio[aidx]->avctx = avctx;
- break; }
- default: break;
+ if( ret >= 0 ) {
+ AVCodecParameters *avpar = st->codecpar;
+ switch( avpar->codec_type ) {
+ case AVMEDIA_TYPE_VIDEO: {
+ int vidx = ffvideo.size();
+ while( --vidx>=0 && ffvideo[vidx]->fidx != i );
+ if( vidx < 0 ) break;
+ ffvideo[vidx]->avctx = avctx;
+ continue; }
+ case AVMEDIA_TYPE_AUDIO: {
+ int aidx = ffaudio.size();
+ while( --aidx>=0 && ffaudio[aidx]->fidx != i );
+ if( aidx < 0 ) break;
+ ffaudio[aidx]->avctx = avctx;
+ continue; }
+ default: break;
+ }
}
+ fprintf(stderr,"FFMPEG::scan: ");
+ fprintf(stderr,_("codec open failed\n"));
+ avcodec_free_context(&avctx);
}
decode_activate();
while( --vidx>=0 && ffvideo[vidx]->fidx != i );
if( vidx < 0 ) break;
FFVideoStream *vid = ffvideo[vidx];
+ if( !vid->avctx ) break;
int64_t tstmp = pkt.dts;
if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.pts;
if( tstmp != AV_NOPTS_VALUE && (pkt.flags & AV_PKT_FLAG_KEY) && pkt.pos > 0 ) {
while( --aidx>=0 && ffaudio[aidx]->fidx != i );
if( aidx < 0 ) break;
FFAudioStream *aud = ffaudio[aidx];
+ if( !aud->avctx ) break;
int64_t tstmp = pkt.pts;
if( tstmp == AV_NOPTS_VALUE ) tstmp = pkt.dts;
if( tstmp != AV_NOPTS_VALUE && (pkt.flags & AV_PKT_FLAG_KEY) && pkt.pos > 0 ) {
index_state->pad_data(ch, nch, aud->curr_pos);
}
while( (ret=aud->decode_frame(frame)) > 0 ) {
- if( frame->channels != nch ) break;
+ //if( frame->channels != nch ) break;
+ aud->init_swr(frame->channels, frame->format, frame->sample_rate);
float *samples;
int len = aud->get_samples(samples,
&frame->extended_data[0], frame->nb_samples);