Go to the documentation of this file.
78 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
79 #define OFFSET(x) offsetof(XPSNRContext, x)
92 static uint64_t
highds(
const int x_act,
const int y_act,
const int w_act,
const int h_act,
const int16_t *o_m0,
const int o)
96 for (
int y = y_act; y < h_act; y += 2) {
97 for (
int x = x_act; x < w_act; x += 2) {
98 const int f = 12 * ((int)o_m0[ y *o + x ] + (
int)o_m0[ y *o + x+1] + (int)o_m0[(y+1)*o + x ] + (int)o_m0[(y+1)*o + x+1])
99 - 3 * ((
int)o_m0[(y-1)*o + x ] + (
int)o_m0[(y-1)*o + x+1] + (
int)o_m0[(y+2)*o + x ] + (
int)o_m0[(y+2)*o + x+1])
100 - 3 * ((int)o_m0[ y *o + x-1] + (
int)o_m0[ y *o + x+2] + (int)o_m0[(y+1)*o + x-1] + (int)o_m0[(y+1)*o + x+2])
101 - 2 * ((
int)o_m0[(y-1)*o + x-1] + (
int)o_m0[(y-1)*o + x+2] + (
int)o_m0[(y+2)*o + x-1] + (
int)o_m0[(y+2)*o + x+2])
102 - ((int)o_m0[(y-2)*o + x-1] + (int)o_m0[(y-2)*o + x ] + (int)o_m0[(y-2)*o + x+1] + (int)o_m0[(y-2)*o + x+2]
103 + (int)o_m0[(y+3)*o + x-1] + (int)o_m0[(y+3)*o + x ] + (int)o_m0[(y+3)*o + x+1] + (int)o_m0[(y+3)*o + x+2]
104 + (int)o_m0[(y-1)*o + x-2] + (int)o_m0[ y *o + x-2] + (
int)o_m0[(y+1)*o + x-2] + (
int)o_m0[(y+2)*o + x-2]
105 + (
int)o_m0[(y-1)*o + x+3] + (
int)o_m0[ y *o + x+3] + (int)o_m0[(y+1)*o + x+3] + (int)o_m0[(y+2)*o + x+3]);
106 sa_act += (uint64_t)
abs(
f);
112 static uint64_t
diff1st(
const uint32_t w_act,
const uint32_t h_act,
const int16_t *o_m0, int16_t *o_m1,
const int o)
116 for (uint32_t y = 0; y < h_act; y += 2) {
117 for (uint32_t x = 0; x < w_act; x += 2) {
118 const int t = (int)o_m0[y*o + x] + (
int)o_m0[y*o + x+1] + (int)o_m0[(y+1)*o + x] + (int)o_m0[(y+1)*o + x+1]
119 - ((int)o_m1[y*o + x] + (
int)o_m1[y*o + x+1] + (int)o_m1[(y+1)*o + x] + (int)o_m1[(y+1)*o + x+1]);
120 ta_act += (uint64_t)
abs(t);
121 o_m1[y*o + x ] = o_m0[y*o + x ]; o_m1[(y+1)*o + x ] = o_m0[(y+1)*o + x ];
122 o_m1[y*o + x+1] = o_m0[y*o + x+1]; o_m1[(y+1)*o + x+1] = o_m0[(y+1)*o + x+1];
128 static uint64_t
diff2nd(
const uint32_t w_act,
const uint32_t h_act,
const int16_t *o_m0, int16_t *o_m1, int16_t *o_m2,
const int o)
132 for (uint32_t y = 0; y < h_act; y += 2) {
133 for (uint32_t x = 0; x < w_act; x += 2) {
134 const int t = (int)o_m0[y*o + x] + (
int)o_m0[y*o + x+1] + (int)o_m0[(y+1)*o + x] + (int)o_m0[(y+1)*o + x+1]
135 - 2 * ((int)o_m1[y*o + x] + (
int)o_m1[y*o + x+1] + (int)o_m1[(y+1)*o + x] + (int)o_m1[(y+1)*o + x+1])
136 + (
int)o_m2[y*o + x] + (int)o_m2[y*o + x+1] + (
int)o_m2[(y+1)*o + x] + (
int)o_m2[(y+1)*o + x+1];
137 ta_act += (uint64_t)
abs(t);
138 o_m2[y*o + x ] = o_m1[y*o + x ]; o_m2[(y+1)*o + x ] = o_m1[(y+1)*o + x ];
139 o_m2[y*o + x+1] = o_m1[y*o + x+1]; o_m2[(y+1)*o + x+1] = o_m1[(y+1)*o + x+1];
140 o_m1[y*o + x ] = o_m0[y*o + x ]; o_m1[(y+1)*o + x ] = o_m0[(y+1)*o + x ];
141 o_m1[y*o + x+1] = o_m0[y*o + x+1]; o_m1[(y+1)*o + x+1] = o_m0[(y+1)*o + x+1];
148 const int16_t *blk_org,
const uint32_t stride_org,
149 const int16_t *blk_rec,
const uint32_t stride_rec,
150 const uint32_t block_width,
const uint32_t block_height)
154 for (uint32_t y = 0; y < block_height; y++) {
155 sse +=
s->pdsp.sse_line((
const uint8_t *) blk_org, (
const uint8_t *) blk_rec, (
int) block_width);
156 blk_org += stride_org;
157 blk_rec += stride_rec;
165 const int16_t *pic_org,
const uint32_t stride_org,
166 int16_t *pic_org_m1, int16_t *pic_org_m2,
167 const int16_t *pic_rec,
const uint32_t stride_rec,
168 const uint32_t offset_x,
const uint32_t offset_y,
169 const uint32_t block_width,
const uint32_t block_height,
170 const uint32_t
bit_depth,
const uint32_t int_frame_rate,
double *ms_act)
172 const int o = (int) stride_org;
173 const int r = (int) stride_rec;
174 const int16_t *o_m0 = pic_org + offset_y * o + offset_x;
175 int16_t *o_m1 = pic_org_m1 + offset_y * o + offset_x;
176 int16_t *o_m2 = pic_org_m2 + offset_y * o + offset_x;
177 const int16_t *r_m0 = pic_rec + offset_y *
r + offset_x;
178 const int b_val = (
s->plane_width[0] *
s->plane_height[0] > 2048 * 1152 ? 2 : 1);
179 const int x_act = (offset_x > 0 ? 0 : b_val);
180 const int y_act = (offset_y > 0 ? 0 : b_val);
181 const int w_act = (offset_x + block_width < (uint32_t)
s->plane_width [0] ? (
int) block_width : (int) block_width - b_val);
182 const int h_act = (offset_y + block_height < (uint32_t)
s->plane_height[0] ? (
int) block_height : (int) block_height - b_val);
186 block_width, block_height);
190 if (w_act <= x_act || h_act <= y_act)
195 sa_act =
s->dsp.highds_func(x_act, y_act, w_act, h_act, o_m0, o);
197 highds(x_act, y_act, w_act, h_act, o_m0, o);
199 for (
int y = y_act; y < h_act; y++) {
200 for (
int x = x_act; x < w_act; x++) {
201 const int f = 12 * (int)o_m0[y*o + x] - 2 * ((
int)o_m0[y*o + x-1] + (int)o_m0[y*o + x+1] + (
int)o_m0[(y-1)*o + x] + (
int)o_m0[(y+1)*o + x])
202 - ((int)o_m0[(y-1)*o + x-1] + (int)o_m0[(y-1)*o + x+1] + (int)o_m0[(y+1)*o + x-1] + (int)o_m0[(y+1)*o + x+1]);
203 sa_act += (uint64_t)
abs(
f);
209 *ms_act = (
double) sa_act / ((
double) (w_act - x_act) * (
double) (h_act - y_act));
212 if (int_frame_rate < 32)
213 ta_act =
s->dsp.diff1st_func(block_width, block_height, o_m0, o_m1, o);
215 ta_act =
s->dsp.diff2nd_func(block_width, block_height, o_m0, o_m1, o_m2, o);
217 if (int_frame_rate < 32) {
218 for (uint32_t y = 0; y < block_height; y++) {
219 for (uint32_t x = 0; x < block_width; x++) {
220 const int t = (int)o_m0[y * o + x] - (
int)o_m1[y * o + x];
223 o_m1[y * o + x] = o_m0[y * o + x];
227 for (uint32_t y = 0; y < block_height; y++) {
228 for (uint32_t x = 0; x < block_width; x++) {
229 const int t = (int)o_m0[y * o + x] - 2 * (
int)o_m1[y * o + x] + (int)o_m2[y * o + x];
232 o_m2[y * o + x] = o_m1[y * o + x];
233 o_m1[y * o + x] = o_m0[y * o + x];
240 *ms_act += (
double) ta_act / ((
double) block_width * (
double) block_height);
243 if (*ms_act < (
double) (1 << (
bit_depth - 6)))
244 *ms_act = (
double) (1 << (
bit_depth - 6));
252 static inline double get_avg_xpsnr (
const double sqrt_wsse_val,
const double sum_xpsnr_val,
253 const uint32_t image_width,
const uint32_t image_height,
254 const uint64_t max_error_64,
const uint64_t num_frames_64)
256 if (num_frames_64 == 0)
259 if (sqrt_wsse_val >= (
double) num_frames_64) {
260 const double avg_dist = sqrt_wsse_val / (
double) num_frames_64;
261 const uint64_t num64 = (uint64_t) image_width * (uint64_t) image_height * max_error_64;
263 return 10.0 * log10((
double) num64 / ((
double) avg_dist * (
double) avg_dist));
266 return sum_xpsnr_val / (
double) num_frames_64;
270 uint64_t *
const wsse64)
273 const uint32_t
w =
s->plane_width [0];
274 const uint32_t
h =
s->plane_height[0];
275 const double r = (
double)(
w *
h) / (3840.0 * 2160.0);
278 const uint32_t w_blk = (
w +
b - 1) /
b;
279 const double avg_act = sqrt(16.0 * (
double) (1 << (2 *
s->depth - 9)) / sqrt(
FFMAX(0.00001,
281 const int *stride_org = (
s->bpp == 1 ?
s->plane_width :
s->line_sizes);
282 uint32_t x, y, idx_blk = 0;
283 double *
const sse_luma =
s->sse_luma;
287 if (!wsse64 || (
s->depth < 6) || (
s->depth > 16) || (
s->num_comps <= 0) ||
288 (
s->num_comps > 3) || (
w == 0) || (
h == 0)) {
292 if (!
weights || (
b >= 4 && !sse_luma)) {
298 const int16_t *p_org = org[0];
299 const uint32_t s_org = stride_org[0] /
s->bpp;
300 const int16_t *p_rec = rec[0];
301 const uint32_t s_rec =
s->plane_width[0];
302 int16_t *p_org_m1 = org_m1[0];
303 int16_t *p_org_m2 = org_m2[0];
304 double wsse_luma = 0.0;
306 for (y = 0; y <
h; y +=
b) {
307 const uint32_t block_height = (y +
b >
h ?
h - y :
b);
309 for (x = 0; x <
w; x +=
b, idx_blk++) {
310 const uint32_t block_width = (x +
b >
w ?
w - x :
b);
311 double ms_act = 1.0, ms_act_prev = 0.0;
317 block_width, block_height,
318 s->depth,
s->frame_rate, &ms_act);
319 weights[idx_blk] = 1.0 / sqrt(ms_act);
321 if (
w *
h <= 640 * 480) {
323 ms_act_prev = (idx_blk > 1 ?
weights[idx_blk - 2] : 0);
328 ms_act_prev =
FFMAX(ms_act_prev,
weights[idx_blk - 1 - w_blk]);
329 if ((idx_blk > 0) && (
weights[idx_blk - 1] > ms_act_prev))
330 weights[idx_blk - 1] = ms_act_prev;
332 if ((x +
b >=
w) && (y +
b >=
h) && (idx_blk > w_blk)) {
334 if (
weights[idx_blk] > ms_act_prev)
335 weights[idx_blk] = ms_act_prev;
341 for (y = idx_blk = 0; y <
h; y +=
b) {
342 for (x = 0; x <
w; x +=
b, idx_blk++) {
343 wsse_luma += sse_luma[idx_blk] *
weights[idx_blk];
346 wsse64[0] = (wsse_luma <= 0.0 ? 0 : (uint64_t) (wsse_luma * avg_act + 0.5));
349 for (
c = 0;
c <
s->num_comps;
c++) {
350 const int16_t *p_org = org[
c];
351 const uint32_t s_org = stride_org[
c] /
s->bpp;
352 const int16_t *p_rec = rec[
c];
353 const uint32_t s_rec =
s->plane_width[
c];
354 const uint32_t w_pln =
s->plane_width[
c];
355 const uint32_t h_pln =
s->plane_height[
c];
362 const uint32_t bx = (
b * w_pln) /
w;
363 const uint32_t by = (
b * h_pln) /
h;
364 double wsse_chroma = 0.0;
366 for (y = idx_blk = 0; y < h_pln; y += by) {
367 const uint32_t block_height = (y + by > h_pln ? h_pln - y : by);
369 for (x = 0; x < w_pln; x += bx, idx_blk++) {
370 const uint32_t block_width = (x + bx > w_pln ? w_pln - x : bx);
373 p_rec + y * s_rec + x, s_rec,
374 block_width, block_height) *
weights[idx_blk];
377 wsse64[
c] = (wsse_chroma <= 0.0 ? 0 : (uint64_t) (wsse_chroma * avg_act + 0.5));
401 const uint32_t
w =
s->plane_width [0];
402 const uint32_t
h =
s->plane_height[0];
403 const uint32_t
b =
FFMAX(0, 4 * (
int32_t) (32.0 * sqrt((
double) (
w *
h) / (3840.0 * 2160.0)) + 0.5));
404 const uint32_t w_blk = (
w +
b - 1) /
b;
405 const uint32_t h_blk = (
h +
b - 1) /
b;
411 uint64_t wsse64 [3] = {0, 0, 0};
418 if (
ctx->is_disabled || !
ref)
420 metadata = &
master->metadata;
428 for (
c = 0;
c <
s->num_comps;
c++) {
432 const int stride_org_bpp = (
s->bpp == 1 ?
s->plane_width[
c] :
s->line_sizes[
c] /
s->bpp);
434 if (!
s->buf_org_m1[
c])
436 if (!
s->buf_org_m2[
c])
439 porg_m1[
c] = (int16_t *)
s->buf_org_m1[
c]->data;
440 porg_m2[
c] = (int16_t *)
s->buf_org_m2[
c]->data;
445 for (
c = 0;
c <
s->num_comps;
c++) {
446 const int m =
s->line_sizes[
c];
447 const int r =
ref->linesize[
c];
448 const int o =
s->plane_width[
c];
455 porg[
c] = (int16_t *)
s->buf_org[
c]->data;
456 prec[
c] = (int16_t *)
s->buf_rec[
c]->data;
458 for (
int y = 0; y <
s->plane_height[
c]; y++) {
459 for (
int x = 0; x <
s->plane_width[
c]; x++) {
460 porg[
c][y * o + x] = (int16_t)
master->data[
c][y * m + x];
461 prec[
c][y * o + x] = (int16_t)
ref->data[
c][y *
r + x];
466 for (
c = 0;
c <
s->num_comps;
c++) {
467 porg[
c] = (int16_t *)
master->data[
c];
468 prec[
c] = (int16_t *)
ref->data[
c];
473 ret_value =
get_wsse(
ctx, (int16_t **) &porg, (int16_t **) &porg_m1, (int16_t **) &porg_m2,
474 (int16_t **) &prec, wsse64);
478 for (
c = 0;
c <
s->num_comps;
c++) {
479 const double sqrt_wsse = sqrt((
double) wsse64[
c]);
482 s->plane_width[
c],
s->plane_height[
c],
483 s->max_error_64, 1 );
484 s->sum_wdist[
c] += sqrt_wsse;
485 s->sum_xpsnr[
c] += cur_xpsnr[
c];
486 s->and_is_inf[
c] &=
isinf(cur_xpsnr[
c]);
490 for (
int j = 0; j <
s->num_comps; j++) {
491 int c =
s->is_rgb ?
s->rgba_map[j] : j;
492 set_meta(metadata,
"lavfi.xpsnr.xpsnr.",
s->comps[j], cur_xpsnr[
c]);
496 fprintf(
s->stats_file,
"n: %4"PRId64
"",
s->num_frames_64);
498 for (
c = 0;
c <
s->num_comps;
c++)
499 fprintf(
s->stats_file,
" XPSNR %c: %3.4f",
s->comps[
c], cur_xpsnr[
c]);
500 fprintf(
s->stats_file,
"\n");
511 if (
s->stats_file_str) {
512 if (!strcmp(
s->stats_file_str,
"-"))
513 s->stats_file = stdout;
517 if (!
s->stats_file) {
518 const int err =
AVERROR(errno);
531 for (
c = 0;
c < 3;
c++) {
536 s->sum_wdist [
c] = 0.0;
537 s->sum_xpsnr [
c] = 0.0;
538 s->and_is_inf[
c] = 1;
548 #define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
549 #define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
550 #define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
568 if ((
ctx->inputs[0]->w !=
ctx->inputs[1]->w) ||
569 (
ctx->inputs[0]->h !=
ctx->inputs[1]->h)) {
573 if (
ctx->inputs[0]->format !=
ctx->inputs[1]->format) {
578 s->bpp = (
desc->comp[0].depth <= 8 ? 1 : 2);
579 s->depth =
desc->comp[0].depth;
580 s->max_error_64 = (1 <<
s->depth) - 1;
581 s->max_error_64 *=
s->max_error_64;
585 s->num_comps = (
desc->nb_components > 3 ? 3 :
desc->nb_components);
588 s->comps[0] = (
s->is_rgb ?
'r' :
'y');
589 s->comps[1] = (
s->is_rgb ?
'g' :
'u');
590 s->comps[2] = (
s->is_rgb ?
'b' :
'v');
594 s->plane_width [0] =
s->plane_width [3] =
inlink->w;
596 s->plane_height[0] =
s->plane_height[3] =
inlink->h;
619 outlink->
w = mainlink->
w;
620 outlink->
h = mainlink->
h;
632 av_log(
ctx,
AV_LOG_WARNING,
"not matching timebases found between first input: %d/%d and second input %d/%d, results may be incorrect!\n",
634 ctx->inputs[1]->time_base.num,
ctx->inputs[1]->time_base.den);
651 if (
s->num_frames_64 > 0) {
652 const double xpsnr_luma =
get_avg_xpsnr(
s->sum_wdist[0],
s->sum_xpsnr[0],
653 s->plane_width[0],
s->plane_height[0],
654 s->max_error_64,
s->num_frames_64);
655 double xpsnr_min = xpsnr_luma;
660 fprintf(
s->stats_file,
"\nXPSNR average, %"PRId64
" frames",
s->num_frames_64);
661 fprintf(
s->stats_file,
" %c: %3.4f",
s->comps[0], xpsnr_luma);
664 for (
c = 1;
c <
s->num_comps;
c++) {
666 s->plane_width[
c],
s->plane_height[
c],
667 s->max_error_64,
s->num_frames_64);
668 if (xpsnr_min > xpsnr_chroma)
669 xpsnr_min = xpsnr_chroma;
672 if (
s->stats_file &&
s->stats_file != stdout)
673 fprintf(
s->stats_file,
" %c: %3.4f",
s->comps[
c], xpsnr_chroma);
676 if (
s->num_comps > 1) {
678 if (
s->stats_file &&
s->stats_file != stdout)
679 fprintf(
s->stats_file,
" (minimum: %3.4f)\n", xpsnr_min);
682 if (
s->stats_file &&
s->stats_file != stdout)
683 fprintf(
s->stats_file,
"\n");
689 if (
s->stats_file &&
s->stats_file != stdout)
690 fclose(
s->stats_file);
695 for (
c = 0;
c <
s->num_comps;
c++) {
702 for (
c = 0;
c <
s->num_comps;
c++) {
732 .description =
NULL_IF_CONFIG_SMALL(
"Calculate the extended perceptually weighted peak signal-to-noise ratio (XPSNR) between two video streams."),
733 .preinit = xpsnr_framesync_preinit,
738 .priv_class = &xpsnr_class,
#define AV_PIX_FMT_GBRAP16
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
void ff_psnr_init(PSNRDSPContext *dsp, int bpp)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define FILTER_PIXFMTS_ARRAY(array)
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
FRAMESYNC_DEFINE_CLASS(xpsnr, XPSNRContext, fs)
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static av_cold int init(AVFilterContext *ctx)
AVBufferRef * buf_org_m2[3]
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
static av_cold void uninit(AVFilterContext *ctx)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
const char * name
Filter name.
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
A link between two filters.
AVBufferRef * buf_org_m1[3]
Link properties exposed to filter code, but not external callers.
const AVFilter ff_vf_xpsnr
static int activate(AVFilterContext *ctx)
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GRAY16
A filter pad used for either input or output.
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static double calc_squared_error_and_weight(XPSNRContext const *s, const int16_t *pic_org, const uint32_t stride_org, int16_t *pic_org_m1, int16_t *pic_org_m2, const int16_t *pic_rec, const uint32_t stride_rec, const uint32_t offset_x, const uint32_t offset_y, const uint32_t block_width, const uint32_t block_height, const uint32_t bit_depth, const uint32_t int_frame_rate, double *ms_act)
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP12
#define AV_CEIL_RSHIFT(a, b)
static int config_output(AVFilterLink *outlink)
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
#define AV_PIX_FMT_GRAY14
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GBRP16
Describe the class of an AVClass context structure.
#define fs(width, name, subs,...)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
static void set_meta(AVDictionary **metadata, const char *key, char comp, float d)
static uint64_t diff1st(const uint32_t w_act, const uint32_t h_act, const int16_t *o_m0, int16_t *o_m1, const int o)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static FilterLink * ff_filter_link(AVFilterLink *link)
static enum AVPixelFormat xpsnr_formats[]
static const AVOption xpsnr_options[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
AVFilterContext * src
source filter
static uint64_t calc_squared_error(XPSNRContext const *s, const int16_t *blk_org, const uint32_t stride_org, const int16_t *blk_rec, const uint32_t stride_rec, const uint32_t block_width, const uint32_t block_height)
#define AV_LOG_INFO
Standard information.
static const AVFilterPad xpsnr_outputs[]
int w
agreed upon image width
#define AV_PIX_FMT_GBRP12
#define av_malloc_array(a, b)
static const int weights[]
static double get_avg_xpsnr(const double sqrt_wsse_val, const double sum_xpsnr_val, const uint32_t image_width, const uint32_t image_height, const uint64_t max_error_64, const uint64_t num_frames_64)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static uint64_t highds(const int x_act, const int y_act, const int w_act, const int h_act, const int16_t *o_m0, const int o)
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
const char * name
Pad name.
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static const AVFilterPad xpsnr_inputs[]
int h
agreed upon image height
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
static int ref[MAX_W *MAX_W]
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int get_wsse(AVFilterContext *ctx, int16_t **org, int16_t **org_m1, int16_t **org_m2, int16_t **rec, uint64_t *const wsse64)
static int config_input_ref(AVFilterLink *inlink)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
A reference to a data buffer.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable.
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
static int do_xpsnr(FFFrameSync *fs)
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
#define AV_PIX_FMT_GRAY12
static uint64_t diff2nd(const uint32_t w_act, const uint32_t h_act, const int16_t *o_m0, int16_t *o_m1, int16_t *o_m2, const int o)