From 6a85ddeaab7b4a87cffb57f105b7a5a96a6e2ff4 Mon Sep 17 00:00:00 2001 From: Good Guy Date: Wed, 5 Aug 2020 18:46:51 -0600 Subject: [PATCH] rework ffmpeg a/v filter setup, rework ffmpeg hw decode with vid filters, rework ffmpeg frm auto-rotate, rework ydiff for rgb/yuv 16bit depth --- cinelerra-5.1/cinelerra/ffmpeg.C | 348 +++++++++++++++++++++---------- cinelerra-5.1/cinelerra/ffmpeg.h | 20 +- cinelerra-5.1/cinelerra/ydiff.C | 97 +++++++-- 3 files changed, 329 insertions(+), 136 deletions(-) diff --git a/cinelerra-5.1/cinelerra/ffmpeg.C b/cinelerra-5.1/cinelerra/ffmpeg.C index ba0e0a34..b3915cfc 100644 --- a/cinelerra-5.1/cinelerra/ffmpeg.C +++ b/cinelerra-5.1/cinelerra/ffmpeg.C @@ -263,6 +263,8 @@ FFStream::FFStream(FFMPEG *ffmpeg, AVStream *st, int fidx) fmt_ctx = 0; avctx = 0; filter_graph = 0; + filt_ctx = 0; + filt_id = 0; buffersrc_ctx = 0; buffersink_ctx = 0; frm_count = 0; @@ -1059,10 +1061,12 @@ FFVideoStream::FFVideoStream(FFMPEG *ffmpeg, AVStream *strm, int idx, int fidx) top_field_first = 0; color_space = -1; color_range = -1; + fconvert_ctx = 0; } FFVideoStream::~FFVideoStream() { + if( fconvert_ctx ) sws_freeContext(fconvert_ctx); } AVHWDeviceType FFVideoStream::decode_hw_activate() @@ -1261,6 +1265,78 @@ int FFVideoStream::init_frame(AVFrame *picture) return ret; } +int FFVideoStream::convert_hw_frame(AVFrame *ifrm, AVFrame *ofrm) +{ + AVPixelFormat ifmt = (AVPixelFormat)ifrm->format; + AVPixelFormat ofmt = (AVPixelFormat)st->codecpar->format; + ofrm->width = ifrm->width; + ofrm->height = ifrm->height; + ofrm->format = ofmt; + int ret = av_frame_get_buffer(ofrm, 32); + if( ret < 0 ) { + ff_err(ret, "FFVideoStream::convert_hw_frame:" + " av_frame_get_buffer failed\n"); + return -1; + } + fconvert_ctx = sws_getCachedContext(fconvert_ctx, + ifrm->width, ifrm->height, ifmt, + ofrm->width, ofrm->height, ofmt, + SWS_POINT, NULL, NULL, NULL); + if( !fconvert_ctx ) { + ff_err(AVERROR(EINVAL), "FFVideoStream::convert_hw_frame:" + " sws_getCachedContext() failed\n"); + return -1; + } + int codec_range = st->codecpar->color_range; + int codec_space = st->codecpar->color_space; + const int *codec_table = sws_getCoefficients(codec_space); + int *inv_table, *table, src_range, dst_range; + int brightness, contrast, saturation; + if( !sws_getColorspaceDetails(fconvert_ctx, + &inv_table, &src_range, &table, &dst_range, + &brightness, &contrast, &saturation) ) { + if( src_range != codec_range || dst_range != codec_range || + inv_table != codec_table || table != codec_table ) + sws_setColorspaceDetails(fconvert_ctx, + codec_table, codec_range, codec_table, codec_range, + brightness, contrast, saturation); + } + ret = sws_scale(fconvert_ctx, + ifrm->data, ifrm->linesize, 0, ifrm->height, + ofrm->data, ofrm->linesize); + if( ret < 0 ) { + ff_err(ret, "FFVideoStream::convert_hw_frame:" + " sws_scale() failed\nfile: %s\n", + ffmpeg->fmt_ctx->url); + return -1; + } + return 0; +} + +int FFVideoStream::load_filter(AVFrame *frame) +{ + AVPixelFormat pix_fmt = (AVPixelFormat)frame->format; + if( pix_fmt == hw_pixfmt ) { + AVFrame *hw_frame = this->frame; + av_frame_unref(hw_frame); + int ret = av_hwframe_transfer_data(hw_frame, frame, 0); + if( ret < 0 ) { + eprintf(_("Error retrieving data from GPU to CPU\nfile: %s\n"), + ffmpeg->fmt_ctx->url); + return -1; + } + av_frame_unref(frame); + ret = convert_hw_frame(hw_frame, frame); + if( ret < 0 ) { + eprintf(_("Error converting data from GPU to CPU\nfile: %s\n"), + ffmpeg->fmt_ctx->url); + return -1; + } + av_frame_unref(hw_frame); + } + return FFStream::load_filter(frame); +} + int FFVideoStream::encode(VFrame *vframe) { if( encode_activate() <= 0 ) return -1; @@ -1660,44 +1736,6 @@ IndexMarks *FFVideoStream::get_markers() return !index_state ? 0 : index_state->video_markers[idx]; } -double FFVideoStream::get_rotation_angle() -{ - int size = 0; - int *matrix = (int*)av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, &size); - int len = size/sizeof(*matrix); - if( !matrix || len < 5 ) return 0; - const double s = 1/65536.; - double theta = (!matrix[0] && !matrix[3]) || (!matrix[1] && !matrix[4]) ? 0 : - atan2( s*matrix[1] / hypot(s*matrix[1], s*matrix[4]), - s*matrix[0] / hypot(s*matrix[0], s*matrix[3])) * 180/M_PI; - return theta; -} - -void FFVideoStream::flip() -{ - transpose = 0; - if( !ffmpeg->file_base ) return; - double theta = get_rotation_angle(), tolerance = 1; - if( fabs(theta-0) < tolerance ) return; - if( fabs(theta-90) < tolerance ) { - create_filter("transpose=clock", st->codecpar); - transpose = 1; - } - else if( fabs(theta-180) < tolerance ) { - create_filter("hflip", st->codecpar); - create_filter("vflip", st->codecpar); - } - else if (fabs(theta-270) < tolerance ) { - create_filter("transpose=cclock", st->codecpar); - transpose = 1; - } - else { - char rotate[BCSTRLEN]; - sprintf(rotate, "rotate=%f", theta*M_PI/180.); - create_filter(rotate, st->codecpar); - } -} - FFMPEG::FFMPEG(FileBase *file_base) { @@ -2533,10 +2571,7 @@ int FFMPEG::open_decoder() vid->aspect_ratio = (double)st->sample_aspect_ratio.num / st->sample_aspect_ratio.den; vid->nudge = st->start_time; vid->reading = -1; - if( opt_video_filter ) - ret = vid->create_filter(opt_video_filter, avpar); - if( file_base && file_base->file->preferences->auto_rotate ) - vid->flip(); + ret = vid->create_filter(opt_video_filter); break; } case AVMEDIA_TYPE_AUDIO: { if( avpar->channels < 1 ) continue; @@ -2555,8 +2590,7 @@ int FFMPEG::open_decoder() aud->init_swr(aud->channels, avpar->format, aud->sample_rate); aud->nudge = st->start_time; aud->reading = -1; - if( opt_audio_filter ) - ret = aud->create_filter(opt_audio_filter, avpar); + ret = aud->create_filter(opt_audio_filter); break; } default: break; } @@ -3447,22 +3481,72 @@ Preferences *FFMPEG::ff_prefs() return !file_base ? 0 : file_base->file->preferences; } -int FFVideoStream::create_filter(const char *filter_spec, AVCodecParameters *avpar) +double FFVideoStream::get_rotation_angle() { + int size = 0; + int *matrix = (int*)av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, &size); + int len = size/sizeof(*matrix); + if( !matrix || len < 5 ) return 0; + const double s = 1/65536.; + double theta = (!matrix[0] && !matrix[3]) || (!matrix[1] && !matrix[4]) ? 0 : + atan2( s*matrix[1] / hypot(s*matrix[1], s*matrix[4]), + s*matrix[0] / hypot(s*matrix[0], s*matrix[3])) * 180/M_PI; + return theta; +} + +int FFVideoStream::flip(double theta) +{ + int ret = 0; + transpose = 0; + Preferences *preferences = ffmpeg->ff_prefs(); + if( !preferences || !preferences->auto_rotate ) return ret; + double tolerance = 1; + if( fabs(theta-0) < tolerance ) return ret; + if( (theta=fmod(theta, 360)) < 0 ) theta += 360; + if( fabs(theta-90) < tolerance ) { + if( (ret = insert_filter("transpose", "clock")) < 0 ) + return ret; + transpose = 1; + } + else if( fabs(theta-180) < tolerance ) { + if( (ret=insert_filter("hflip", 0)) < 0 ) + return ret; + if( (ret=insert_filter("vflip", 0)) < 0 ) + return ret; + } + else if (fabs(theta-270) < tolerance ) { + if( (ret=insert_filter("transpose", "cclock")) < 0 ) + return ret; + transpose = 1; + } + else { + char angle[BCSTRLEN]; + sprintf(angle, "%f", theta*M_PI/180.); + if( (ret=insert_filter("rotate", angle)) < 0 ) + return ret; + } + return 1; +} + +int FFVideoStream::create_filter(const char *filter_spec) +{ + double theta = get_rotation_angle(); + if( !theta && !filter_spec ) + return 0; avfilter_register_all(); - const char *sp = filter_spec; - char filter_name[BCSTRLEN], *np = filter_name; - int i = sizeof(filter_name); - while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++; - *np = 0; - const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name); - if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) { - ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec); - return -1; + if( filter_spec ) { + const char *sp = filter_spec; + char filter_name[BCSTRLEN], *np = filter_name; + int i = sizeof(filter_name); + while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++; + *np = 0; + const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name); + if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_VIDEO ) { + ff_err(AVERROR(EINVAL), "FFVideoStream::create_filter: %s\n", filter_spec); + return -1; + } } - filter_graph = avfilter_graph_alloc(); - const AVFilter *buffersrc = avfilter_get_by_name("buffer"); - const AVFilter *buffersink = avfilter_get_by_name("buffersink"); + AVCodecParameters *avpar = st->codecpar; int sa_num = avpar->sample_aspect_ratio.num; if( !sa_num ) sa_num = 1; int sa_den = avpar->sample_aspect_ratio.den; @@ -3474,51 +3558,66 @@ int FFVideoStream::create_filter(const char *filter_spec, AVCodecParameters *avp "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", avpar->width, avpar->height, (int)pix_fmt, st->time_base.num, st->time_base.den, sa_num, sa_den); + if( ret >= 0 ) { + filt_ctx = 0; + ret = insert_filter("buffer", args, "in"); + buffersrc_ctx = filt_ctx; + } if( ret >= 0 ) - ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", - args, NULL, filter_graph); - if( ret >= 0 ) - ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", - NULL, NULL, filter_graph); - if( ret >= 0 ) + ret = flip(theta); + AVFilterContext *fsrc = filt_ctx; + if( ret >= 0 ) { + filt_ctx = 0; + ret = insert_filter("buffersink", 0, "out"); + buffersink_ctx = filt_ctx; + } + if( ret >= 0 ) { ret = av_opt_set_bin(buffersink_ctx, "pix_fmts", (uint8_t*)&pix_fmt, sizeof(pix_fmt), AV_OPT_SEARCH_CHILDREN); - if( ret < 0 ) - ff_err(ret, "FFVideoStream::create_filter"); + } + if( ret >= 0 ) + ret = config_filters(filter_spec, fsrc); else - ret = FFStream::create_filter(filter_spec); + ff_err(ret, "FFVideoStream::create_filter"); return ret >= 0 ? 0 : -1; } -int FFAudioStream::create_filter(const char *filter_spec, AVCodecParameters *avpar) +int FFAudioStream::create_filter(const char *filter_spec) { + if( !filter_spec ) + return 0; avfilter_register_all(); - const char *sp = filter_spec; - char filter_name[BCSTRLEN], *np = filter_name; - int i = sizeof(filter_name); - while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++; - *np = 0; - const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name); - if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) { - ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec); - return -1; + if( filter_spec ) { + const char *sp = filter_spec; + char filter_name[BCSTRLEN], *np = filter_name; + int i = sizeof(filter_name); + while( --i>=0 && *sp!=0 && !strchr(" \t:=,",*sp) ) *np++ = *sp++; + *np = 0; + const AVFilter *filter = !filter_name[0] ? 0 : avfilter_get_by_name(filter_name); + if( !filter || avfilter_pad_get_type(filter->inputs,0) != AVMEDIA_TYPE_AUDIO ) { + ff_err(AVERROR(EINVAL), "FFAudioStream::create_filter: %s\n", filter_spec); + return -1; + } } - filter_graph = avfilter_graph_alloc(); - const AVFilter *buffersrc = avfilter_get_by_name("abuffer"); - const AVFilter *buffersink = avfilter_get_by_name("abuffersink"); int ret = 0; char args[BCTEXTLEN]; + AVCodecParameters *avpar = st->codecpar; AVSampleFormat sample_fmt = (AVSampleFormat)avpar->format; snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%jx", st->time_base.num, st->time_base.den, avpar->sample_rate, av_get_sample_fmt_name(sample_fmt), avpar->channel_layout); - if( ret >= 0 ) - ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", - args, NULL, filter_graph); - if( ret >= 0 ) - ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", - NULL, NULL, filter_graph); + if( ret >= 0 ) { + filt_ctx = 0; + ret = insert_filter("abuffer", args, "in"); + buffersrc_ctx = filt_ctx; + } + AVFilterContext *fsrc = filt_ctx; + if( ret >= 0 ) { + filt_ctx = 0; + ret = insert_filter("abuffersink", 0, "out"); + buffersink_ctx = filt_ctx; + } if( ret >= 0 ) ret = av_opt_set_bin(buffersink_ctx, "sample_fmts", (uint8_t*)&sample_fmt, sizeof(sample_fmt), @@ -3531,42 +3630,75 @@ int FFAudioStream::create_filter(const char *filter_spec, AVCodecParameters *avp ret = av_opt_set_bin(buffersink_ctx, "sample_rates", (uint8_t*)&sample_rate, sizeof(sample_rate), AV_OPT_SEARCH_CHILDREN); - if( ret < 0 ) - ff_err(ret, "FFAudioStream::create_filter"); + if( ret >= 0 ) + ret = config_filters(filter_spec, fsrc); else - ret = FFStream::create_filter(filter_spec); + ff_err(ret, "FFAudioStream::create_filter"); return ret >= 0 ? 0 : -1; } -int FFStream::create_filter(const char *filter_spec) +int FFStream::insert_filter(const char *name, const char *arg, const char *inst_name) { - /* Endpoints for the filter graph. */ - AVFilterInOut *outputs = avfilter_inout_alloc(); - outputs->name = av_strdup("in"); - outputs->filter_ctx = buffersrc_ctx; - outputs->pad_idx = 0; - outputs->next = 0; - - AVFilterInOut *inputs = avfilter_inout_alloc(); - inputs->name = av_strdup("out"); - inputs->filter_ctx = buffersink_ctx; - inputs->pad_idx = 0; - inputs->next = 0; - - int ret = !outputs->name || !inputs->name ? -1 : 0; + const AVFilter *filter = avfilter_get_by_name(name); + if( !filter ) return -1; + char filt_inst[BCSTRLEN]; + if( !inst_name ) { + snprintf(filt_inst, sizeof(filt_inst), "%s_%d", name, ++filt_id); + inst_name = filt_inst; + } + if( !filter_graph ) + filter_graph = avfilter_graph_alloc(); + AVFilterContext *fctx = 0; + int ret = avfilter_graph_create_filter(&fctx, + filter, inst_name, arg, NULL, filter_graph); + if( ret >= 0 && filt_ctx ) + ret = avfilter_link(filt_ctx, 0, fctx, 0); if( ret >= 0 ) - ret = avfilter_graph_parse_ptr(filter_graph, filter_spec, - &inputs, &outputs, NULL); + filt_ctx = fctx; + else + avfilter_free(fctx); + return ret; +} + +int FFStream::config_filters(const char *filter_spec, AVFilterContext *fsrc) +{ + int ret = 0; + AVFilterContext *fsink = buffersink_ctx; + if( filter_spec ) { + /* Endpoints for the filter graph. */ + AVFilterInOut *outputs = avfilter_inout_alloc(); + AVFilterInOut *inputs = avfilter_inout_alloc(); + if( !inputs || !outputs ) ret = -1; + if( ret >= 0 ) { + outputs->filter_ctx = fsrc; + outputs->pad_idx = 0; + outputs->next = 0; + if( !(outputs->name = av_strdup(fsrc->name)) ) ret = -1; + } + if( ret >= 0 ) { + inputs->filter_ctx = fsink; + inputs->pad_idx = 0; + inputs->next = 0; + if( !(inputs->name = av_strdup(fsink->name)) ) ret = -1; + } + if( ret >= 0 ) { + int len = strlen(fsrc->name)+2 + strlen(filter_spec) + 1; + char spec[len]; sprintf(spec, "[%s]%s", fsrc->name, filter_spec); + ret = avfilter_graph_parse_ptr(filter_graph, spec, + &inputs, &outputs, NULL); + } + avfilter_inout_free(&inputs); + avfilter_inout_free(&outputs); + } + else + ret = avfilter_link(fsrc, 0, fsink, 0); if( ret >= 0 ) ret = avfilter_graph_config(filter_graph, NULL); - if( ret < 0 ) { ff_err(ret, "FFStream::create_filter"); avfilter_graph_free(&filter_graph); filter_graph = 0; } - avfilter_inout_free(&inputs); - avfilter_inout_free(&outputs); return ret; } diff --git a/cinelerra-5.1/cinelerra/ffmpeg.h b/cinelerra-5.1/cinelerra/ffmpeg.h index 18516d36..1a514e1d 100644 --- a/cinelerra-5.1/cinelerra/ffmpeg.h +++ b/cinelerra-5.1/cinelerra/ffmpeg.h @@ -99,11 +99,12 @@ public: virtual int decode_frame(AVFrame *frame) = 0; virtual int encode_frame(AVFrame *frame) = 0; virtual int init_frame(AVFrame *frame) = 0; - virtual int create_filter(const char *filter_spec, AVCodecParameters *avpar) = 0; + virtual int create_filter(const char *filter_spec) = 0; virtual void load_markers() = 0; virtual IndexMarks *get_markers() = 0; - int create_filter(const char *filter_spec); - int load_filter(AVFrame *frame); + int insert_filter(const char *name, const char *arg, const char *inst_name=0); + int config_filters(const char *filter_spec, AVFilterContext *fsrc); + virtual int load_filter(AVFrame *frame); int read_filter(AVFrame *frame); int read_frame(AVFrame *frame); int open_stats_file(); @@ -117,8 +118,10 @@ public: AVFormatContext *fmt_ctx; AVCodecContext *avctx; - AVFilterContext *buffersink_ctx; + AVFilterContext *filt_ctx; + int filt_id; AVFilterContext *buffersrc_ctx; + AVFilterContext *buffersink_ctx; AVFilterGraph *filter_graph; AVFrame *frame, *fframe; AVFrame *probe_frame; @@ -177,7 +180,7 @@ public: int load_history(uint8_t **data, int len); int decode_frame(AVFrame *frame); int encode_frame(AVFrame *frame); - int create_filter(const char *filter_spec, AVCodecParameters *avpar); + int create_filter(const char *filter_spec); void load_markers(); IndexMarks *get_markers(); @@ -247,7 +250,7 @@ public: AVHWDeviceType encode_hw_activate(const char *hw_dev); int encode_hw_write(FFrame *picture); int encode_frame(AVFrame *frame); - int create_filter(const char *filter_spec, AVCodecParameters *avpar); + int create_filter(const char *filter_spec); void load_markers(); IndexMarks *get_markers(); @@ -256,8 +259,10 @@ public: int video_seek(int64_t pos); int encode(VFrame *vframe); int drain(); + int convert_hw_frame(AVFrame *ifrm, AVFrame *ofrm); + int load_filter(AVFrame *frame); double get_rotation_angle(); - void flip(); + int flip(double theta); int idx; double frame_rate; @@ -268,6 +273,7 @@ public: int interlaced; int top_field_first; int color_space, color_range; + struct SwsContext *fconvert_ctx; }; class FFCodecRemap diff --git a/cinelerra-5.1/cinelerra/ydiff.C b/cinelerra-5.1/cinelerra/ydiff.C index 5b8f548c..900a8f6f 100644 --- a/cinelerra-5.1/cinelerra/ydiff.C +++ b/cinelerra-5.1/cinelerra/ydiff.C @@ -333,8 +333,6 @@ int ffcmpr::open_decoder(const char *filename, int vid_no) { struct stat fst; if( stat(filename, &fst) ) return 1; - - av_log_set_level(AV_LOG_VERBOSE); fmt_ctx = 0; AVDictionary *fopts = 0; av_register_all(); @@ -416,7 +414,21 @@ AVFrame *ffcmpr::read_frame() return 0; } -static int diff_frame(AVFrame *afrm, AVFrame *bfrm, gg_ximage *ximg, int w, int h) +static inline int get_depth(AVPixelFormat pix_fmt) +{ + int depth = 0; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); + if( desc ) { + for( int i=desc->nb_components; --i>=0; ) { + int bits = desc->comp[i].depth; + if( depth < bits ) depth = bits; + } + } + return depth; +} + +static int diff_frame(AVFrame *afrm, AVFrame *bfrm, + gg_ximage *ximg, int w, int h, int s, int a1) { int n = 0, m = 0; uint8_t *arow = afrm->data[0]; @@ -429,11 +441,15 @@ static int diff_frame(AVFrame *afrm, AVFrame *bfrm, gg_ximage *ximg, int w, int uint32_t *lsb = ximg->lsb; for( int y=h; --y>=0; arow+=asz, brow+=bsz, frow+=fsz ) { - uint8_t *ap = arow, *bp = brow, *fp = frow; + uint16_t *ap = (uint16_t*)arow + a1; + uint16_t *bp = (uint16_t*)brow + a1; + uint8_t *fp = frow; for( int x=rsz; --x>=0; ) { uint32_t rgb = 0; uint8_t *rp = fp; for( int i=0; i<3; ++i ) { int d = *ap++ - *bp++; + if( s > 0 ) d >>= s; + else if( s < 0 ) d <<= -s; int v = d + 128; if( v > 255 ) v = 255; else if( v < 0 ) v = 0; @@ -446,7 +462,7 @@ static int diff_frame(AVFrame *afrm, AVFrame *bfrm, gg_ximage *ximg, int w, int for( int i=3; --i>=0; ) *rp++ = rgb>>(8*i); else for( int i=0; i<3; ++i ) *rp++ = rgb>>(8*i); - fp += bpp; + ++ap; ++bp; fp += bpp; } } int sz = h*rsz; @@ -457,7 +473,6 @@ static int diff_frame(AVFrame *afrm, AVFrame *bfrm, gg_ximage *ximg, int w, int int main(int ac, char **av) { - int ret; setbuf(stdout,NULL); XInitThreads(); Display *display = XOpenDisplay(getenv("DISPLAY")); @@ -465,18 +480,45 @@ int main(int ac, char **av) fprintf(stderr,"Unable to open display\n"); exit(1); } + if( ac < 3 ) { + printf("usage: %s a.fmt b.fmt \n" + " a = src media, b = src media, frm0 = a/b skew\n" + " s = shift <0:lt, =0:none(dft), >0:rt\n" + " env var GG_LOG_LEVEL=q/f/e/v/d/\n", av[0]); + exit(1); + } + const char *cp = getenv("GG_LOG_LEVEL"); + if( cp ) { + int lvl = -1; + switch( *cp ) { + case 'q': lvl = AV_LOG_QUIET; break; + case 'f': lvl = AV_LOG_FATAL; break; + case 'e': lvl = AV_LOG_ERROR; break; + case 'v': lvl = AV_LOG_VERBOSE; break; + case 'd': lvl = AV_LOG_DEBUG; break; + case '0'...'9': lvl = atoi(cp); break; + } + if( lvl >= 0 ) + av_log_set_level(lvl); + } ffcmpr a, b; if( a.open_decoder(av[1],0) ) return 1; if( b.open_decoder(av[2],0) ) return 1; + int64_t err = 0; + int frm_no = 0; + int frm0 = ac>3 ? atoi(av[3]) : 0; + int s = ac>4 ? atoi(av[4]) : 0; + printf("file a:%s\n", av[1]); printf(" id 0x%06x:", a.ctx->codec_id); const AVCodecDescriptor *adesc = avcodec_descriptor_get(a.ctx->codec_id); printf(" video %s\n", adesc ? adesc->name : " (unkn)"); printf(" %dx%d %5.2f", a.width, a.height, a.frame_rate); const char *apix = av_get_pix_fmt_name(a.pix_fmt); - printf(" pix %s\n", apix ? apix : "(unkn)"); + int ad = get_depth(a.pix_fmt); + printf(" pix %s, depth=%d\n", apix ? apix : "(unkn)", ad); printf("file b:%s\n", av[2]); printf(" id 0x%06x:", b.ctx->codec_id); @@ -484,7 +526,14 @@ int main(int ac, char **av) printf(" video %s\n", bdesc ? bdesc->name : " (unkn)"); printf(" %dx%d %5.2f", b.width, b.height, b.frame_rate); const char *bpix = av_get_pix_fmt_name(b.pix_fmt); - printf(" pix %s\n", bpix ? bpix : "(unkn)"); + int bd = get_depth(b.pix_fmt); + printf(" pix %s, depth=%d\n", bpix ? bpix : "(unkn)", bd); + int d = ad>bd ? ad : bd; + s = 16-d + s; + int lsb = s, msb = lsb + 7; + if( lsb < 0 ) lsb = 0; + if( msb > 15 ) msb = 15; + printf("shift: %d, msb..lsb: %d..%d of uint16\n", s, msb, lsb); // if( a.ctx->codec_id != b.ctx->codec_id ) { printf("codec mismatch\n"); return 1;} if( a.width != b.width ) { printf("width mismatch\n"); return 1;} @@ -493,35 +542,41 @@ int main(int ac, char **av) // if( a.pix_fmt != b.pix_fmt ) { printf("format mismatch\n"); return 1;} signal(SIGINT,sigint); + const AVPixFmtDescriptor *afmt = av_pix_fmt_desc_get(a.pix_fmt); + AVPixelFormat a_pix_fmt = afmt->flags & AV_PIX_FMT_FLAG_RGB ? + AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_AYUV64LE ; + const AVPixFmtDescriptor *bfmt = av_pix_fmt_desc_get(b.pix_fmt); + AVPixelFormat b_pix_fmt = bfmt->flags & AV_PIX_FMT_FLAG_RGB ? + AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_AYUV64LE ; + if( a_pix_fmt != b_pix_fmt ) { + printf(" a/b yuv/rgb mismatched, using a = %s\n", apix); + b_pix_fmt = a_pix_fmt; + } + int a1 = a_pix_fmt == AV_PIX_FMT_AYUV64LE ? 1 : 0; // alpha 1st chan struct SwsContext *a_cvt = sws_getCachedContext(0, a.width, a.height, a.pix_fmt, - a.width, a.height, AV_PIX_FMT_RGB24, SWS_POINT, 0, 0, 0); + a.width, a.height, a_pix_fmt, SWS_POINT, 0, 0, 0); struct SwsContext *b_cvt = sws_getCachedContext(0, b.width, b.height, b.pix_fmt, - b.width, b.height, AV_PIX_FMT_RGB24, SWS_POINT, 0, 0, 0); + b.width, b.height, b_pix_fmt, SWS_POINT, 0, 0, 0); if( !a_cvt || !b_cvt ) { printf("sws_getCachedContext() failed\n"); - return 1; + exit(1); } AVFrame *afrm = av_frame_alloc(); av_image_alloc(afrm->data, afrm->linesize, - a.width, a.height, AV_PIX_FMT_RGB24, 1); + a.width, a.height, a_pix_fmt, 1); AVFrame *bfrm = av_frame_alloc(); av_image_alloc(bfrm->data, bfrm->linesize, - b.width, b.height, AV_PIX_FMT_RGB24, 1); + b.width, b.height, b_pix_fmt, 1); { gg_window gw(display, 10,10, a.width,a.height); gw.show(); gg_thread thr(gw, 1); thr.start(); - int64_t err = 0; - int frm_no = 0; - - if( ac>3 && (ret=atoi(av[3])) ) { - while( ret > 0 ) { a.read_frame(); --ret; } - while( ret < 0 ) { b.read_frame(); ++ret; } - } + while( frm0 > 0 ) { a.read_frame(); --frm0; } + while( frm0 < 0 ) { b.read_frame(); ++frm0; } while( !done ) { AVFrame *ap = a.read_frame(); @@ -534,7 +589,7 @@ int main(int ac, char **av) bfrm->data, bfrm->linesize); thr.draw_lock(); gg_ximage *fimg = thr.next_img(); - ret = diff_frame(afrm, bfrm, fimg, ap->width, ap->height); + int ret = diff_frame(afrm, bfrm, fimg, ap->width, ap->height, s, a1); thr.post(fimg); err += ret; ++frm_no; printf(" %d\n",frm_no); -- 2.26.2