FFmpeg
vf_libvmaf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3  * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Calculate the VMAF between two input videos.
25  */
26 
27 #include "config_components.h"
28 
29 #include <libvmaf.h>
30 
31 #include "libavutil/avstring.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "avfilter.h"
36 #include "drawutils.h"
37 #include "filters.h"
38 #include "formats.h"
39 #include "framesync.h"
40 #include "video.h"
41 
42 #if CONFIG_LIBVMAF_CUDA_FILTER
43 #include <libvmaf_cuda.h>
44 
45 #include "libavutil/hwcontext.h"
47 #endif
48 
49 typedef struct LIBVMAFContext {
50  const AVClass *class;
52  char *log_path;
53  char *log_fmt;
54  char *pool;
55  int n_threads;
57  char *model_cfg;
58  char *feature_cfg;
59  VmafContext *vmaf;
60  VmafModel **model;
61  unsigned model_cnt;
62  unsigned frame_cnt;
63  unsigned bpc;
64 #if CONFIG_LIBVMAF_CUDA_FILTER
65  VmafCudaState *cu_state;
66 #endif
68 
69 #define OFFSET(x) offsetof(LIBVMAFContext, x)
70 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
71 
72 static const AVOption libvmaf_options[] = {
73  {"log_path", "Set the file path to be used to write log.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
74  {"log_fmt", "Set the format of the log (csv, json, xml, or sub).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str="xml"}, 0, 1, FLAGS},
75  {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
76  {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
77  {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
78  {"model", "Set the model to be used for computing vmaf.", OFFSET(model_cfg), AV_OPT_TYPE_STRING, {.str="version=vmaf_v0.6.1"}, 0, 1, FLAGS},
79  {"feature", "Set the feature to be used for computing vmaf.", OFFSET(feature_cfg), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
80  { NULL }
81 };
82 
84 
85 static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt)
86 {
87  switch (av_pix_fmt) {
88  case AV_PIX_FMT_YUV420P:
92  return VMAF_PIX_FMT_YUV420P;
93  case AV_PIX_FMT_YUV422P:
97  return VMAF_PIX_FMT_YUV422P;
98  case AV_PIX_FMT_YUV444P:
102  return VMAF_PIX_FMT_YUV444P;
103  default:
104  return VMAF_PIX_FMT_UNKNOWN;
105  }
106 }
107 
108 static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc)
109 {
110  const int bytes_per_value = bpc > 8 ? 2 : 1;
111  int err = vmaf_picture_alloc(dst, pix_fmt_map(src->format), bpc,
112  src->width, src->height);
113  if (err)
114  return AVERROR(ENOMEM);
115 
116  for (unsigned i = 0; i < 3; i++) {
117  uint8_t *src_data = src->data[i];
118  uint8_t *dst_data = dst->data[i];
119  for (unsigned j = 0; j < dst->h[i]; j++) {
120  memcpy(dst_data, src_data, bytes_per_value * dst->w[i]);
121  src_data += src->linesize[i];
122  dst_data += dst->stride[i];
123  }
124  }
125 
126  return 0;
127 }
128 
129 static int do_vmaf(FFFrameSync *fs)
130 {
131  AVFilterContext *ctx = fs->parent;
132  LIBVMAFContext *s = ctx->priv;
133  VmafPicture pic_ref, pic_dist;
134  AVFrame *ref, *dist;
135  int err = 0;
136 
137  int ret = ff_framesync_dualinput_get(fs, &dist, &ref);
138  if (ret < 0)
139  return ret;
140  if (ctx->is_disabled || !ref)
141  return ff_filter_frame(ctx->outputs[0], dist);
142 
143  if (dist->color_range != ref->color_range) {
144  av_log(ctx, AV_LOG_WARNING, "distorted and reference "
145  "frames use different color ranges (%s != %s)\n",
147  av_color_range_name(ref->color_range));
148  }
149 
150  err = copy_picture_data(ref, &pic_ref, s->bpc);
151  if (err) {
152  av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
153  return AVERROR(ENOMEM);
154  }
155 
156  err = copy_picture_data(dist, &pic_dist, s->bpc);
157  if (err) {
158  av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
159  vmaf_picture_unref(&pic_ref);
160  return AVERROR(ENOMEM);
161  }
162 
163  err = vmaf_read_pictures(s->vmaf, &pic_ref, &pic_dist, s->frame_cnt++);
164  if (err) {
165  av_log(s, AV_LOG_ERROR, "problem during vmaf_read_pictures.\n");
166  return AVERROR(EINVAL);
167  }
168 
169  return ff_filter_frame(ctx->outputs[0], dist);
170 }
171 
172 static AVDictionary **delimited_dict_parse(char *str, unsigned *cnt)
173 {
174  AVDictionary **dict = NULL;
175  char *str_copy = NULL;
176  char *saveptr = NULL;
177  unsigned cnt2;
178  int err = 0;
179 
180  if (!str)
181  return NULL;
182 
183  cnt2 = 1;
184  for (char *p = str; *p; p++) {
185  if (*p == '|')
186  cnt2++;
187  }
188 
189  dict = av_calloc(cnt2, sizeof(*dict));
190  if (!dict)
191  goto fail;
192 
193  str_copy = av_strdup(str);
194  if (!str_copy)
195  goto fail;
196 
197  *cnt = 0;
198  for (unsigned i = 0; i < cnt2; i++) {
199  char *s = av_strtok(i == 0 ? str_copy : NULL, "|", &saveptr);
200  if (!s)
201  continue;
202  err = av_dict_parse_string(&dict[(*cnt)++], s, "=", ":", 0);
203  if (err)
204  goto fail;
205  }
206 
207  av_free(str_copy);
208  return dict;
209 
210 fail:
211  if (dict) {
212  for (unsigned i = 0; i < *cnt; i++) {
213  if (dict[i])
214  av_dict_free(&dict[i]);
215  }
216  av_free(dict);
217  }
218 
219  av_free(str_copy);
220  *cnt = 0;
221  return NULL;
222 }
223 
225 {
226  LIBVMAFContext *s = ctx->priv;
227  AVDictionary **dict = NULL;
228  unsigned dict_cnt;
229  int err = 0;
230 
231  if (!s->feature_cfg)
232  return 0;
233 
234  dict = delimited_dict_parse(s->feature_cfg, &dict_cnt);
235  if (!dict) {
237  "could not parse feature config: %s\n", s->feature_cfg);
238  return AVERROR(EINVAL);
239  }
240 
241  for (unsigned i = 0; i < dict_cnt; i++) {
242  char *feature_name = NULL;
243  VmafFeatureDictionary *feature_opts_dict = NULL;
244  const AVDictionaryEntry *e = NULL;
245 
246  while (e = av_dict_iterate(dict[i], e)) {
247  if (!strcmp(e->key, "name")) {
248  feature_name = e->value;
249  continue;
250  }
251 
252  err = vmaf_feature_dictionary_set(&feature_opts_dict, e->key,
253  e->value);
254  if (err) {
256  "could not set feature option: %s.%s=%s\n",
257  feature_name, e->key, e->value);
258  goto exit;
259  }
260  }
261 
262  err = vmaf_use_feature(s->vmaf, feature_name, feature_opts_dict);
263  if (err) {
265  "problem during vmaf_use_feature: %s\n", feature_name);
266  goto exit;
267  }
268  }
269 
270 exit:
271  for (unsigned i = 0; i < dict_cnt; i++) {
272  if (dict[i])
273  av_dict_free(&dict[i]);
274  }
275  av_free(dict);
276  return err;
277 }
278 
280 {
281  LIBVMAFContext *s = ctx->priv;
282  AVDictionary **dict;
283  unsigned dict_cnt;
284  int err = 0;
285 
286  if (!s->model_cfg) return 0;
287 
288  dict_cnt = 0;
289  dict = delimited_dict_parse(s->model_cfg, &dict_cnt);
290  if (!dict) {
292  "could not parse model config: %s\n", s->model_cfg);
293  return AVERROR(EINVAL);
294  }
295 
296  s->model_cnt = dict_cnt;
297  s->model = av_calloc(s->model_cnt, sizeof(*s->model));
298  if (!s->model)
299  return AVERROR(ENOMEM);
300 
301  for (unsigned i = 0; i < dict_cnt; i++) {
302  VmafModelConfig model_cfg = { 0 };
303  const AVDictionaryEntry *e = NULL;
304  char *version = NULL;
305  char *path = NULL;
306 
307  while (e = av_dict_iterate(dict[i], e)) {
308  if (!strcmp(e->key, "disable_clip")) {
309  model_cfg.flags |= !strcmp(e->value, "true") ?
310  VMAF_MODEL_FLAG_DISABLE_CLIP : 0;
311  continue;
312  }
313 
314  if (!strcmp(e->key, "enable_transform")) {
315  model_cfg.flags |= !strcmp(e->value, "true") ?
316  VMAF_MODEL_FLAG_ENABLE_TRANSFORM : 0;
317  continue;
318  }
319 
320  if (!strcmp(e->key, "name")) {
321  model_cfg.name = e->value;
322  continue;
323  }
324 
325  if (!strcmp(e->key, "version")) {
326  version = e->value;
327  continue;
328  }
329 
330  if (!strcmp(e->key, "path")) {
331  path = e->value;
332  continue;
333  }
334  }
335 
336  if (version) {
337  err = vmaf_model_load(&s->model[i], &model_cfg, version);
338  if (err) {
340  "could not load libvmaf model with version: %s\n",
341  version);
342  goto exit;
343  }
344  }
345 
346  if (path && !s->model[i]) {
347  err = vmaf_model_load_from_path(&s->model[i], &model_cfg, path);
348  if (err) {
350  "could not load libvmaf model with path: %s\n",
351  path);
352  goto exit;
353  }
354  }
355 
356  if (!s->model[i]) {
358  "could not load libvmaf model with config: %s\n",
359  s->model_cfg);
360  goto exit;
361  }
362 
363  while (e = av_dict_iterate(dict[i], e)) {
364  VmafFeatureDictionary *feature_opts_dict = NULL;
365  char *feature_opt = NULL;
366 
367  char *feature_name = av_strtok(e->key, ".", &feature_opt);
368  if (!feature_opt)
369  continue;
370 
371  err = vmaf_feature_dictionary_set(&feature_opts_dict,
372  feature_opt, e->value);
373  if (err) {
375  "could not set feature option: %s.%s=%s\n",
376  feature_name, feature_opt, e->value);
377  err = AVERROR(EINVAL);
378  goto exit;
379  }
380 
381  err = vmaf_model_feature_overload(s->model[i], feature_name,
382  feature_opts_dict);
383  if (err) {
385  "could not overload feature: %s\n", feature_name);
386  err = AVERROR(EINVAL);
387  goto exit;
388  }
389  }
390  }
391 
392  for (unsigned i = 0; i < s->model_cnt; i++) {
393  err = vmaf_use_features_from_model(s->vmaf, s->model[i]);
394  if (err) {
396  "problem during vmaf_use_features_from_model\n");
397  err = AVERROR(EINVAL);
398  goto exit;
399  }
400  }
401 
402 exit:
403  for (unsigned i = 0; i < dict_cnt; i++) {
404  if (dict[i])
405  av_dict_free(&dict[i]);
406  }
407  av_free(dict);
408  return err;
409 }
410 
411 static enum VmafLogLevel log_level_map(int log_level)
412 {
413  switch (log_level) {
414  case AV_LOG_QUIET:
415  return VMAF_LOG_LEVEL_NONE;
416  case AV_LOG_ERROR:
417  return VMAF_LOG_LEVEL_ERROR;
418  case AV_LOG_WARNING:
419  return VMAF_LOG_LEVEL_WARNING;
420  case AV_LOG_INFO:
421  return VMAF_LOG_LEVEL_INFO;
422  case AV_LOG_DEBUG:
423  return VMAF_LOG_LEVEL_DEBUG;
424  default:
425  return VMAF_LOG_LEVEL_INFO;
426  }
427 }
428 
430 {
431  LIBVMAFContext *s = ctx->priv;
432  int err = 0;
433 
434  VmafConfiguration cfg = {
435  .log_level = log_level_map(av_log_get_level()),
436  .n_subsample = s->n_subsample,
437  .n_threads = s->n_threads,
438  };
439 
440  err = vmaf_init(&s->vmaf, cfg);
441  if (err)
442  return AVERROR(EINVAL);
443 
444  err = parse_models(ctx);
445  if (err)
446  return err;
447 
448  err = parse_features(ctx);
449  if (err)
450  return err;
451 
452  s->fs.on_event = do_vmaf;
453  return 0;
454 }
455 
456 static const enum AVPixelFormat pix_fmts[] = {
462 };
463 
465 {
466  AVFilterContext *ctx = inlink->dst;
467  LIBVMAFContext *s = ctx->priv;
468  const AVPixFmtDescriptor *desc;
469  int err = 0;
470 
471  if (ctx->inputs[0]->w != ctx->inputs[1]->w) {
472  av_log(ctx, AV_LOG_ERROR, "input width must match.\n");
473  err |= AVERROR(EINVAL);
474  }
475 
476  if (ctx->inputs[0]->h != ctx->inputs[1]->h) {
477  av_log(ctx, AV_LOG_ERROR, "input height must match.\n");
478  err |= AVERROR(EINVAL);
479  }
480 
481  if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
482  av_log(ctx, AV_LOG_ERROR, "input pix_fmt must match.\n");
483  err |= AVERROR(EINVAL);
484  }
485 
486  if (err)
487  return err;
488 
489  desc = av_pix_fmt_desc_get(inlink->format);
490  s->bpc = desc->comp[0].depth;
491 
492  return 0;
493 }
494 
495 static int config_output(AVFilterLink *outlink)
496 {
497  AVFilterContext *ctx = outlink->src;
498  LIBVMAFContext *s = ctx->priv;
499  AVFilterLink *mainlink = ctx->inputs[0];
500  FilterLink *il = ff_filter_link(mainlink);
501  FilterLink *ol = ff_filter_link(outlink);
502  int ret;
503 
505  if (ret < 0)
506  return ret;
507  outlink->w = mainlink->w;
508  outlink->h = mainlink->h;
509  outlink->time_base = mainlink->time_base;
510  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
511  ol->frame_rate = il->frame_rate;
512  if ((ret = ff_framesync_configure(&s->fs)) < 0)
513  return ret;
514 
515  return 0;
516 }
517 
519 {
520  LIBVMAFContext *s = ctx->priv;
521  return ff_framesync_activate(&s->fs);
522 }
523 
524 static enum VmafOutputFormat log_fmt_map(const char *log_fmt)
525 {
526  if (log_fmt) {
527  if (!strcmp(log_fmt, "xml"))
528  return VMAF_OUTPUT_FORMAT_XML;
529  if (!strcmp(log_fmt, "json"))
530  return VMAF_OUTPUT_FORMAT_JSON;
531  if (!strcmp(log_fmt, "csv"))
532  return VMAF_OUTPUT_FORMAT_CSV;
533  if (!strcmp(log_fmt, "sub"))
534  return VMAF_OUTPUT_FORMAT_SUB;
535  }
536 
537  return VMAF_OUTPUT_FORMAT_XML;
538 }
539 
540 static enum VmafPoolingMethod pool_method_map(const char *pool_method)
541 {
542  if (pool_method) {
543  if (!strcmp(pool_method, "min"))
544  return VMAF_POOL_METHOD_MIN;
545  if (!strcmp(pool_method, "mean"))
546  return VMAF_POOL_METHOD_MEAN;
547  if (!strcmp(pool_method, "harmonic_mean"))
548  return VMAF_POOL_METHOD_HARMONIC_MEAN;
549  }
550 
551  return VMAF_POOL_METHOD_MEAN;
552 }
553 
555 {
556  LIBVMAFContext *s = ctx->priv;
557  int err = 0;
558 
559  ff_framesync_uninit(&s->fs);
560 
561  if (!s->frame_cnt)
562  goto clean_up;
563 
564  err = vmaf_read_pictures(s->vmaf, NULL, NULL, 0);
565  if (err) {
567  "problem flushing libvmaf context.\n");
568  }
569 
570  for (unsigned i = 0; i < s->model_cnt; i++) {
571  double vmaf_score;
572  err = vmaf_score_pooled(s->vmaf, s->model[i], pool_method_map(s->pool),
573  &vmaf_score, 0, s->frame_cnt - 1);
574  if (err) {
576  "problem getting pooled vmaf score.\n");
577  }
578 
579  av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n", vmaf_score);
580  }
581 
582  if (s->vmaf) {
583  if (s->log_path && !err)
584  vmaf_write_output(s->vmaf, s->log_path, log_fmt_map(s->log_fmt));
585  }
586 
587 clean_up:
588  if (s->model) {
589  for (unsigned i = 0; i < s->model_cnt; i++) {
590  if (s->model[i])
591  vmaf_model_destroy(s->model[i]);
592  }
593  av_free(s->model);
594  }
595 
596  if (s->vmaf)
597  vmaf_close(s->vmaf);
598 }
599 
600 static const AVFilterPad libvmaf_inputs[] = {
601  {
602  .name = "main",
603  .type = AVMEDIA_TYPE_VIDEO,
604  },
605  {
606  .name = "reference",
607  .type = AVMEDIA_TYPE_VIDEO,
608  .config_props = config_input_ref,
609  },
610 };
611 
612 static const AVFilterPad libvmaf_outputs[] = {
613  {
614  .name = "default",
615  .type = AVMEDIA_TYPE_VIDEO,
616  .config_props = config_output,
617  },
618 };
619 
621  .name = "libvmaf",
622  .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
623  .preinit = libvmaf_framesync_preinit,
624  .init = init,
625  .uninit = uninit,
626  .activate = activate,
627  .priv_size = sizeof(LIBVMAFContext),
628  .priv_class = &libvmaf_class,
632 };
633 
634 #if CONFIG_LIBVMAF_CUDA_FILTER
635 static const enum AVPixelFormat supported_formats[] = {
638 };
639 
640 static int format_is_supported(enum AVPixelFormat fmt)
641 {
642  int i;
643 
644  for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
645  if (supported_formats[i] == fmt)
646  return 1;
647  return 0;
648 }
649 
650 static int config_props_cuda(AVFilterLink *outlink)
651 {
652  int err;
653  AVFilterContext *ctx = outlink->src;
654  LIBVMAFContext *s = ctx->priv;
655  AVFilterLink *inlink = ctx->inputs[0];
657  AVHWFramesContext *frames_ctx = (AVHWFramesContext*) inl->hw_frames_ctx->data;
658  AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx;
659  CUcontext cu_ctx = device_hwctx->cuda_ctx;
661 
662  VmafConfiguration cfg = {
663  .log_level = log_level_map(av_log_get_level()),
664  .n_subsample = s->n_subsample,
665  .n_threads = s->n_threads,
666  };
667 
668  VmafCudaPictureConfiguration cuda_pic_cfg = {
669  .pic_params = {
670  .bpc = desc->comp[0].depth,
671  .w = inlink->w,
672  .h = inlink->h,
673  .pix_fmt = pix_fmt_map(frames_ctx->sw_format),
674  },
675  .pic_prealloc_method = VMAF_CUDA_PICTURE_PREALLOCATION_METHOD_DEVICE,
676  };
677 
678  VmafCudaConfiguration cuda_cfg = {
679  .cu_ctx = cu_ctx,
680  };
681 
682  if (!format_is_supported(frames_ctx->sw_format)) {
684  "Unsupported input format: %s\n", desc->name);
685  return AVERROR(EINVAL);
686  }
687 
688  err = vmaf_init(&s->vmaf, cfg);
689  if (err)
690  return AVERROR(EINVAL);
691 
692  err = vmaf_cuda_state_init(&s->cu_state, cuda_cfg);
693  if (err)
694  return AVERROR(EINVAL);
695 
696  err = vmaf_cuda_import_state(s->vmaf, s->cu_state);
697  if (err)
698  return AVERROR(EINVAL);
699 
700  err = vmaf_cuda_preallocate_pictures(s->vmaf, cuda_pic_cfg);
701  if (err < 0)
702  return err;
703 
704  err = parse_models(ctx);
705  if (err)
706  return err;
707 
708  err = parse_features(ctx);
709  if (err)
710  return err;
711 
712  return config_output(outlink);
713 }
714 
715 static int copy_picture_data_cuda(VmafContext* vmaf,
716  AVCUDADeviceContext* device_hwctx,
717  AVFrame* src, VmafPicture* dst,
718  enum AVPixelFormat pix_fmt)
719 {
721  CudaFunctions *cu = device_hwctx->internal->cuda_dl;
722 
723  CUDA_MEMCPY2D m = {
724  .srcMemoryType = CU_MEMORYTYPE_DEVICE,
725  .dstMemoryType = CU_MEMORYTYPE_DEVICE,
726  };
727 
728  int err = vmaf_cuda_fetch_preallocated_picture(vmaf, dst);
729  if (err)
730  return AVERROR(ENOMEM);
731 
732  err = cu->cuCtxPushCurrent(device_hwctx->cuda_ctx);
733  if (err)
734  return AVERROR_EXTERNAL;
735 
736  for (unsigned i = 0; i < pix_desc->nb_components; i++) {
737  m.srcDevice = (CUdeviceptr) src->data[i];
738  m.srcPitch = src->linesize[i];
739  m.dstDevice = (CUdeviceptr) dst->data[i];
740  m.dstPitch = dst->stride[i];
741  m.WidthInBytes = dst->w[i] * ((dst->bpc + 7) / 8);
742  m.Height = dst->h[i];
743 
744  err = cu->cuMemcpy2D(&m);
745  if (err)
746  return AVERROR_EXTERNAL;
747  break;
748  }
749 
750  err = cu->cuCtxPopCurrent(NULL);
751  if (err)
752  return AVERROR_EXTERNAL;
753 
754  return 0;
755 }
756 
757 static int do_vmaf_cuda(FFFrameSync* fs)
758 {
759  AVFilterContext* ctx = fs->parent;
760  LIBVMAFContext* s = ctx->priv;
761  AVFilterLink *inlink = ctx->inputs[0];
764  AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx;
765  VmafPicture pic_ref, pic_dist;
766  AVFrame *ref, *dist;
767 
768  int err = 0;
769 
770  err = ff_framesync_dualinput_get(fs, &dist, &ref);
771  if (err < 0)
772  return err;
773  if (ctx->is_disabled || !ref)
774  return ff_filter_frame(ctx->outputs[0], dist);
775 
776  err = copy_picture_data_cuda(s->vmaf, device_hwctx, ref, &pic_ref,
777  frames_ctx->sw_format);
778  if (err) {
779  av_log(s, AV_LOG_ERROR, "problem during copy_picture_data_cuda.\n");
780  return AVERROR(ENOMEM);
781  }
782 
783  err = copy_picture_data_cuda(s->vmaf, device_hwctx, dist, &pic_dist,
784  frames_ctx->sw_format);
785  if (err) {
786  av_log(s, AV_LOG_ERROR, "problem during copy_picture_data_cuda.\n");
787  return AVERROR(ENOMEM);
788  }
789 
790  err = vmaf_read_pictures(s->vmaf, &pic_ref, &pic_dist, s->frame_cnt++);
791  if (err) {
792  av_log(s, AV_LOG_ERROR, "problem during vmaf_read_pictures.\n");
793  return AVERROR(EINVAL);
794  }
795 
796  return ff_filter_frame(ctx->outputs[0], dist);
797 }
798 
799 static av_cold int init_cuda(AVFilterContext *ctx)
800 {
801  LIBVMAFContext *s = ctx->priv;
802  s->fs.on_event = do_vmaf_cuda;
803  return 0;
804 }
805 
806 static const AVFilterPad libvmaf_outputs_cuda[] = {
807  {
808  .name = "default",
809  .type = AVMEDIA_TYPE_VIDEO,
810  .config_props = config_props_cuda,
811  },
812 };
813 
815  .name = "libvmaf_cuda",
816  .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
817  .preinit = libvmaf_framesync_preinit,
818  .init = init_cuda,
819  .uninit = uninit,
820  .activate = activate,
821  .priv_size = sizeof(LIBVMAFContext),
822  .priv_class = &libvmaf_class,
824  FILTER_OUTPUTS(libvmaf_outputs_cuda),
826  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
827 };
828 #endif
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:85
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:137
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:668
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
hwcontext_cuda_internal.h
libvmaf_inputs
static const AVFilterPad libvmaf_inputs[]
Definition: vf_libvmaf.c:600
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: filters.h:242
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:301
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:191
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1062
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
LIBVMAFContext
Definition: vf_libvmaf.c:49
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
pixdesc.h
log_fmt_map
static enum VmafOutputFormat log_fmt_map(const char *log_fmt)
Definition: vf_libvmaf.c:524
AVOption
AVOption.
Definition: opt.h:429
AV_PIX_FMT_YUV420P16LE
@ AV_PIX_FMT_YUV420P16LE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:128
AVDictionary
Definition: dict.c:34
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
FFFrameSync
Frame sync structure.
Definition: framesync.h:168
video.h
AV_PIX_FMT_YUV444P16LE
@ AV_PIX_FMT_YUV444P16LE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:132
formats.h
AV_PIX_FMT_YUV420P12LE
@ AV_PIX_FMT_YUV420P12LE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:268
fail
#define fail()
Definition: checkasm.h:188
LIBVMAFContext::fs
FFFrameSync fs
Definition: vf_libvmaf.c:51
LIBVMAFContext::n_subsample
int n_subsample
Definition: vf_libvmaf.c:56
AVCUDADeviceContext::cuda_ctx
CUcontext cuda_ctx
Definition: hwcontext_cuda.h:43
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
AV_PIX_FMT_YUV420P10LE
@ AV_PIX_FMT_YUV420P10LE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:156
AV_PIX_FMT_YUV444P12LE
@ AV_PIX_FMT_YUV444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:276
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:515
LIBVMAFContext::model
VmafModel ** model
Definition: vf_libvmaf.c:60
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ff_vf_libvmaf
const AVFilter ff_vf_libvmaf
Definition: vf_libvmaf.c:620
config_input_ref
static int config_input_ref(AVFilterLink *inlink)
Definition: vf_libvmaf.c:464
AVDictionaryEntry::key
char * key
Definition: dict.h:90
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
copy_picture_data
static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc)
Definition: vf_libvmaf.c:108
filters.h
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
format_is_supported
static int format_is_supported(enum AVPixelFormat fmt)
Definition: vf_bilateral_cuda.c:143
LIBVMAFContext::model_cnt
unsigned model_cnt
Definition: vf_libvmaf.c:61
LIBVMAFContext::frame_cnt
unsigned frame_cnt
Definition: vf_libvmaf.c:62
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
FRAMESYNC_DEFINE_CLASS
FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs)
AV_PIX_FMT_YUV444P10LE
@ AV_PIX_FMT_YUV444P10LE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:162
LIBVMAFContext::vmaf
VmafContext * vmaf
Definition: vf_libvmaf.c:59
ff_vf_libvmaf_cuda
const AVFilter ff_vf_libvmaf_cuda
activate
static int activate(AVFilterContext *ctx)
Definition: vf_libvmaf.c:518
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3486
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:442
AV_PIX_FMT_YUV422P16LE
@ AV_PIX_FMT_YUV422P16LE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:130
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:210
do_vmaf
static int do_vmaf(FFFrameSync *fs)
Definition: vf_libvmaf.c:129
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
AVPixFmtDescriptor::nb_components
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
LIBVMAFContext::log_fmt
char * log_fmt
Definition: vf_libvmaf.c:53
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_libvmaf.c:554
LIBVMAFContext::feature_cfg
char * feature_cfg
Definition: vf_libvmaf.c:58
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_libvmaf.c:456
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: filters.h:206
pool_method_map
static enum VmafPoolingMethod pool_method_map(const char *pool_method)
Definition: vf_libvmaf.c:540
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_libvmaf.c:495
AVCUDADeviceContext::internal
AVCUDADeviceContextInternal * internal
Definition: hwcontext_cuda.h:45
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:372
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:158
LIBVMAFContext::n_threads
int n_threads
Definition: vf_libvmaf.c:55
LIBVMAFContext::log_path
char * log_path
Definition: vf_libvmaf.c:52
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
supported_formats
static enum AVPixelFormat supported_formats[]
Definition: vf_bilateral_cuda.c:37
LIBVMAFContext::model_cfg
char * model_cfg
Definition: vf_libvmaf.c:57
version
version
Definition: libkvazaar.c:321
LIBVMAFContext::bpc
unsigned bpc
Definition: vf_libvmaf.c:63
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
delimited_dict_parse
static AVDictionary ** delimited_dict_parse(char *str, unsigned *cnt)
Definition: vf_libvmaf.c:172
AVCUDADeviceContextInternal::cuda_dl
CudaFunctions * cuda_dl
Definition: hwcontext_cuda_internal.h:32
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AVFilter
Filter definition.
Definition: avfilter.h:201
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:115
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
ret
ret
Definition: filter_design.txt:187
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:134
libvmaf_outputs
static const AVFilterPad libvmaf_outputs[]
Definition: vf_libvmaf.c:612
pix_fmt_map
static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt)
Definition: vf_libvmaf.c:85
framesync.h
libvmaf_options
static const AVOption libvmaf_options[]
Definition: vf_libvmaf.c:72
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
av_dict_parse_string
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:200
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
LIBVMAFContext::pool
char * pool
Definition: vf_libvmaf.c:54
OFFSET
#define OFFSET(x)
Definition: vf_libvmaf.c:69
FLAGS
#define FLAGS
Definition: vf_libvmaf.c:70
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_libvmaf.c:429
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
log_level_map
static enum VmafLogLevel log_level_map(int log_level)
Definition: vf_libvmaf.c:411
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
parse_models
static int parse_models(AVFilterContext *ctx)
Definition: vf_libvmaf.c:279
AVDictionaryEntry::value
char * value
Definition: dict.h:91
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:352
avstring.h
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:390
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
drawutils.h
parse_features
static int parse_features(AVFilterContext *ctx)
Definition: vf_libvmaf.c:224
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: filters.h:252
AV_PIX_FMT_YUV422P12LE
@ AV_PIX_FMT_YUV422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:272
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
src
#define src
Definition: vp8dsp.c:248