FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "cpu.h"
24 #include "dict.h"
25 #include "frame.h"
26 #include "imgutils.h"
27 #include "mem.h"
28 #include "samplefmt.h"
29 #include "hwcontext.h"
30 
31 static const AVSideDataDescriptor sd_props[] = {
32  [AV_FRAME_DATA_PANSCAN] = { "AVPanScan" },
33  [AV_FRAME_DATA_A53_CC] = { "ATSC A53 Part 4 Closed Captions" },
34  [AV_FRAME_DATA_MATRIXENCODING] = { "AVMatrixEncoding" },
35  [AV_FRAME_DATA_DOWNMIX_INFO] = { "Metadata relevant to a downmix procedure" },
36  [AV_FRAME_DATA_AFD] = { "Active format description" },
37  [AV_FRAME_DATA_MOTION_VECTORS] = { "Motion vectors" },
38  [AV_FRAME_DATA_SKIP_SAMPLES] = { "Skip samples" },
39  [AV_FRAME_DATA_GOP_TIMECODE] = { "GOP timecode" },
40  [AV_FRAME_DATA_S12M_TIMECODE] = { "SMPTE 12-1 timecode" },
41  [AV_FRAME_DATA_DYNAMIC_HDR_PLUS] = { "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)" },
42  [AV_FRAME_DATA_DYNAMIC_HDR_VIVID] = { "HDR Dynamic Metadata CUVA 005.1 2021 (Vivid)" },
43  [AV_FRAME_DATA_REGIONS_OF_INTEREST] = { "Regions Of Interest" },
44  [AV_FRAME_DATA_VIDEO_ENC_PARAMS] = { "Video encoding parameters" },
45  [AV_FRAME_DATA_FILM_GRAIN_PARAMS] = { "Film grain parameters" },
46  [AV_FRAME_DATA_DETECTION_BBOXES] = { "Bounding boxes for object detection and classification" },
47  [AV_FRAME_DATA_DOVI_RPU_BUFFER] = { "Dolby Vision RPU Data" },
48  [AV_FRAME_DATA_DOVI_METADATA] = { "Dolby Vision Metadata" },
49  [AV_FRAME_DATA_LCEVC] = { "LCEVC NAL data" },
50  [AV_FRAME_DATA_VIEW_ID] = { "View ID" },
52  [AV_FRAME_DATA_REPLAYGAIN] = { "AVReplayGain", AV_SIDE_DATA_PROP_GLOBAL },
53  [AV_FRAME_DATA_DISPLAYMATRIX] = { "3x3 displaymatrix", AV_SIDE_DATA_PROP_GLOBAL },
54  [AV_FRAME_DATA_AUDIO_SERVICE_TYPE] = { "Audio service type", AV_SIDE_DATA_PROP_GLOBAL },
55  [AV_FRAME_DATA_MASTERING_DISPLAY_METADATA] = { "Mastering display metadata", AV_SIDE_DATA_PROP_GLOBAL },
56  [AV_FRAME_DATA_CONTENT_LIGHT_LEVEL] = { "Content light level metadata", AV_SIDE_DATA_PROP_GLOBAL },
57  [AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT] = { "Ambient viewing environment", AV_SIDE_DATA_PROP_GLOBAL },
58  [AV_FRAME_DATA_SPHERICAL] = { "Spherical Mapping", AV_SIDE_DATA_PROP_GLOBAL },
60  [AV_FRAME_DATA_SEI_UNREGISTERED] = { "H.26[45] User Data Unregistered SEI message", AV_SIDE_DATA_PROP_MULTI },
61 };
62 
64 {
65  memset(frame, 0, sizeof(*frame));
66 
67  frame->pts =
68  frame->pkt_dts = AV_NOPTS_VALUE;
69  frame->best_effort_timestamp = AV_NOPTS_VALUE;
70  frame->duration = 0;
71 #if FF_API_FRAME_PKT
73  frame->pkt_pos = -1;
74  frame->pkt_size = -1;
76 #endif
77  frame->time_base = (AVRational){ 0, 1 };
78  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
79  frame->format = -1; /* unknown */
80  frame->extended_data = frame->data;
81  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
82  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
83  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
84  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
85  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
86  frame->flags = 0;
87 }
88 
89 static void free_side_data(AVFrameSideData **ptr_sd)
90 {
91  AVFrameSideData *sd = *ptr_sd;
92 
93  av_buffer_unref(&sd->buf);
94  av_dict_free(&sd->metadata);
95  av_freep(ptr_sd);
96 }
97 
98 static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
99 {
100  for (int i = 0; i < *nb_side_data; i++) {
101  free_side_data(&((*sd)[i]));
102  }
103  *nb_side_data = 0;
104 
105  av_freep(sd);
106 }
107 
109 {
110  wipe_side_data(&frame->side_data, &frame->nb_side_data);
111 }
112 
114 {
115  wipe_side_data(sd, nb_sd);
116 }
117 
118 static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data,
119  const enum AVFrameSideDataType type)
120 {
121  for (int i = *nb_side_data - 1; i >= 0; i--) {
122  AVFrameSideData *entry = ((*sd)[i]);
123  if (entry->type != type)
124  continue;
125 
127 
128  ((*sd)[i]) = ((*sd)[*nb_side_data - 1]);
129  (*nb_side_data)--;
130  }
131 }
132 
133 static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd,
134  const AVFrameSideData *target)
135 {
136  for (int i = *nb_sd - 1; i >= 0; i--) {
137  AVFrameSideData *entry = ((*sd)[i]);
138  if (entry != target)
139  continue;
140 
142 
143  ((*sd)[i]) = ((*sd)[*nb_sd - 1]);
144  (*nb_sd)--;
145 
146  return;
147  }
148 }
149 
151 {
152  AVFrame *frame = av_malloc(sizeof(*frame));
153 
154  if (!frame)
155  return NULL;
156 
158 
159  return frame;
160 }
161 
163 {
164  if (!frame || !*frame)
165  return;
166 
168  av_freep(frame);
169 }
170 
171 #define ALIGN (HAVE_SIMD_ALIGN_64 ? 64 : 32)
172 
174 {
176  int ret, padded_height, total_size;
177  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
178  ptrdiff_t linesizes[4];
179  size_t sizes[4];
180 
181  if (!desc)
182  return AVERROR(EINVAL);
183 
184  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
185  return ret;
186 
187  if (!frame->linesize[0]) {
188  if (align <= 0)
189  align = ALIGN;
190 
191  for (int i = 1; i <= align; i += i) {
192  ret = av_image_fill_linesizes(frame->linesize, frame->format,
193  FFALIGN(frame->width, i));
194  if (ret < 0)
195  return ret;
196  if (!(frame->linesize[0] & (align-1)))
197  break;
198  }
199 
200  for (int i = 0; i < 4 && frame->linesize[i]; i++)
201  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
202  }
203 
204  for (int i = 0; i < 4; i++)
205  linesizes[i] = frame->linesize[i];
206 
207  padded_height = FFALIGN(frame->height, 32);
208  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
209  padded_height, linesizes)) < 0)
210  return ret;
211 
212  total_size = 4*plane_padding;
213  for (int i = 0; i < 4; i++) {
214  if (sizes[i] > INT_MAX - total_size)
215  return AVERROR(EINVAL);
216  total_size += sizes[i];
217  }
218 
219  frame->buf[0] = av_buffer_alloc(total_size);
220  if (!frame->buf[0]) {
221  ret = AVERROR(ENOMEM);
222  goto fail;
223  }
224 
225  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
226  frame->buf[0]->data, frame->linesize)) < 0)
227  goto fail;
228 
229  for (int i = 1; i < 4; i++) {
230  if (frame->data[i])
231  frame->data[i] += i * plane_padding;
232  }
233 
234  frame->extended_data = frame->data;
235 
236  return 0;
237 fail:
239  return ret;
240 }
241 
243 {
244  int planar = av_sample_fmt_is_planar(frame->format);
245  int channels, planes;
246  int ret;
247 
248  channels = frame->ch_layout.nb_channels;
249  planes = planar ? channels : 1;
250  if (!frame->linesize[0]) {
251  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
252  frame->nb_samples, frame->format,
253  align);
254  if (ret < 0)
255  return ret;
256  }
257 
259  frame->extended_data = av_calloc(planes,
260  sizeof(*frame->extended_data));
261  frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
262  sizeof(*frame->extended_buf));
263  if (!frame->extended_data || !frame->extended_buf) {
264  av_freep(&frame->extended_data);
265  av_freep(&frame->extended_buf);
266  return AVERROR(ENOMEM);
267  }
268  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
269  } else
270  frame->extended_data = frame->data;
271 
272  for (int i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
273  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
274  if (!frame->buf[i]) {
276  return AVERROR(ENOMEM);
277  }
278  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
279  }
280  for (int i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
281  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
282  if (!frame->extended_buf[i]) {
284  return AVERROR(ENOMEM);
285  }
286  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
287  }
288  return 0;
289 
290 }
291 
293 {
294  if (frame->format < 0)
295  return AVERROR(EINVAL);
296 
297  if (frame->width > 0 && frame->height > 0)
298  return get_video_buffer(frame, align);
299  else if (frame->nb_samples > 0 &&
300  (av_channel_layout_check(&frame->ch_layout)))
301  return get_audio_buffer(frame, align);
302 
303  return AVERROR(EINVAL);
304 }
305 
306 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
307 {
308  int ret;
309 
310 #if FF_API_FRAME_KEY
312  dst->key_frame = src->key_frame;
314 #endif
315  dst->pict_type = src->pict_type;
316  dst->sample_aspect_ratio = src->sample_aspect_ratio;
317  dst->crop_top = src->crop_top;
318  dst->crop_bottom = src->crop_bottom;
319  dst->crop_left = src->crop_left;
320  dst->crop_right = src->crop_right;
321  dst->pts = src->pts;
322  dst->duration = src->duration;
323  dst->repeat_pict = src->repeat_pict;
324 #if FF_API_INTERLACED_FRAME
326  dst->interlaced_frame = src->interlaced_frame;
327  dst->top_field_first = src->top_field_first;
329 #endif
330 #if FF_API_PALETTE_HAS_CHANGED
332  dst->palette_has_changed = src->palette_has_changed;
334 #endif
335  dst->sample_rate = src->sample_rate;
336  dst->opaque = src->opaque;
337  dst->pkt_dts = src->pkt_dts;
338 #if FF_API_FRAME_PKT
340  dst->pkt_pos = src->pkt_pos;
341  dst->pkt_size = src->pkt_size;
343 #endif
344  dst->time_base = src->time_base;
345  dst->quality = src->quality;
346  dst->best_effort_timestamp = src->best_effort_timestamp;
347  dst->flags = src->flags;
348  dst->decode_error_flags = src->decode_error_flags;
349  dst->color_primaries = src->color_primaries;
350  dst->color_trc = src->color_trc;
351  dst->colorspace = src->colorspace;
352  dst->color_range = src->color_range;
353  dst->chroma_location = src->chroma_location;
354 
355  av_dict_copy(&dst->metadata, src->metadata, 0);
356 
357  for (int i = 0; i < src->nb_side_data; i++) {
358  const AVFrameSideData *sd_src = src->side_data[i];
359  AVFrameSideData *sd_dst;
360  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
361  && (src->width != dst->width || src->height != dst->height))
362  continue;
363  if (force_copy) {
364  sd_dst = av_frame_new_side_data(dst, sd_src->type,
365  sd_src->size);
366  if (!sd_dst) {
368  return AVERROR(ENOMEM);
369  }
370  memcpy(sd_dst->data, sd_src->data, sd_src->size);
371  } else {
372  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
373  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
374  if (!sd_dst) {
377  return AVERROR(ENOMEM);
378  }
379  }
380  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
381  }
382 
383  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
384  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
385  return ret;
386 }
387 
389 {
390  int ret = 0;
391 
392  av_assert1(dst->width == 0 && dst->height == 0);
393  av_assert1(dst->ch_layout.nb_channels == 0 &&
394  dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
395 
396  dst->format = src->format;
397  dst->width = src->width;
398  dst->height = src->height;
399  dst->nb_samples = src->nb_samples;
400 
401  ret = frame_copy_props(dst, src, 0);
402  if (ret < 0)
403  goto fail;
404 
405  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
406  if (ret < 0)
407  goto fail;
408 
409  /* duplicate the frame data if it's not refcounted */
410  if (!src->buf[0]) {
412  if (ret < 0)
413  goto fail;
414 
415  ret = av_frame_copy(dst, src);
416  if (ret < 0)
417  goto fail;
418 
419  return 0;
420  }
421 
422  /* ref the buffers */
423  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
424  if (!src->buf[i])
425  continue;
426  dst->buf[i] = av_buffer_ref(src->buf[i]);
427  if (!dst->buf[i]) {
428  ret = AVERROR(ENOMEM);
429  goto fail;
430  }
431  }
432 
433  if (src->extended_buf) {
434  dst->extended_buf = av_calloc(src->nb_extended_buf,
435  sizeof(*dst->extended_buf));
436  if (!dst->extended_buf) {
437  ret = AVERROR(ENOMEM);
438  goto fail;
439  }
440  dst->nb_extended_buf = src->nb_extended_buf;
441 
442  for (int i = 0; i < src->nb_extended_buf; i++) {
443  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
444  if (!dst->extended_buf[i]) {
445  ret = AVERROR(ENOMEM);
446  goto fail;
447  }
448  }
449  }
450 
451  if (src->hw_frames_ctx) {
452  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
453  if (!dst->hw_frames_ctx) {
454  ret = AVERROR(ENOMEM);
455  goto fail;
456  }
457  }
458 
459  /* duplicate extended data */
460  if (src->extended_data != src->data) {
461  int ch = dst->ch_layout.nb_channels;
462 
463  if (!ch) {
464  ret = AVERROR(EINVAL);
465  goto fail;
466  }
467 
468  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
469  if (!dst->extended_data) {
470  ret = AVERROR(ENOMEM);
471  goto fail;
472  }
473  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
474  } else
475  dst->extended_data = dst->data;
476 
477  memcpy(dst->data, src->data, sizeof(src->data));
478  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
479 
480  return 0;
481 
482 fail:
484  return ret;
485 }
486 
488 {
489  int ret = 0;
490 
491  if (dst == src)
492  return AVERROR(EINVAL);
493 
494  if (!src->buf[0]) {
496 
497  /* duplicate the frame data if it's not refcounted */
498  if ( src->data[0] || src->data[1]
499  || src->data[2] || src->data[3])
500  return av_frame_ref(dst, src);
501 
502  ret = frame_copy_props(dst, src, 0);
503  if (ret < 0)
504  goto fail;
505  }
506 
507  dst->format = src->format;
508  dst->width = src->width;
509  dst->height = src->height;
510  dst->nb_samples = src->nb_samples;
511 
512  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
513  if (ret < 0)
514  goto fail;
515 
517  av_dict_free(&dst->metadata);
518  ret = frame_copy_props(dst, src, 0);
519  if (ret < 0)
520  goto fail;
521 
522  /* replace the buffers */
523  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
524  ret = av_buffer_replace(&dst->buf[i], src->buf[i]);
525  if (ret < 0)
526  goto fail;
527  }
528 
529  if (src->extended_buf) {
530  if (dst->nb_extended_buf != src->nb_extended_buf) {
531  int nb_extended_buf = FFMIN(dst->nb_extended_buf, src->nb_extended_buf);
532  void *tmp;
533 
534  for (int i = nb_extended_buf; i < dst->nb_extended_buf; i++)
535  av_buffer_unref(&dst->extended_buf[i]);
536 
537  tmp = av_realloc_array(dst->extended_buf, sizeof(*dst->extended_buf),
538  src->nb_extended_buf);
539  if (!tmp) {
540  ret = AVERROR(ENOMEM);
541  goto fail;
542  }
543  dst->extended_buf = tmp;
544  dst->nb_extended_buf = src->nb_extended_buf;
545 
546  memset(&dst->extended_buf[nb_extended_buf], 0,
547  (src->nb_extended_buf - nb_extended_buf) * sizeof(*dst->extended_buf));
548  }
549 
550  for (int i = 0; i < src->nb_extended_buf; i++) {
551  ret = av_buffer_replace(&dst->extended_buf[i], src->extended_buf[i]);
552  if (ret < 0)
553  goto fail;
554  }
555  } else if (dst->extended_buf) {
556  for (int i = 0; i < dst->nb_extended_buf; i++)
557  av_buffer_unref(&dst->extended_buf[i]);
558  av_freep(&dst->extended_buf);
559  }
560 
561  ret = av_buffer_replace(&dst->hw_frames_ctx, src->hw_frames_ctx);
562  if (ret < 0)
563  goto fail;
564 
565  if (dst->extended_data != dst->data)
566  av_freep(&dst->extended_data);
567 
568  if (src->extended_data != src->data) {
569  int ch = dst->ch_layout.nb_channels;
570 
571  if (!ch) {
572  ret = AVERROR(EINVAL);
573  goto fail;
574  }
575 
576  if (ch > SIZE_MAX / sizeof(*dst->extended_data))
577  goto fail;
578 
579  dst->extended_data = av_memdup(src->extended_data, sizeof(*dst->extended_data) * ch);
580  if (!dst->extended_data) {
581  ret = AVERROR(ENOMEM);
582  goto fail;
583  }
584  } else
585  dst->extended_data = dst->data;
586 
587  memcpy(dst->data, src->data, sizeof(src->data));
588  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
589 
590  return 0;
591 
592 fail:
594  return ret;
595 }
596 
598 {
600 
601  if (!ret)
602  return NULL;
603 
604  if (av_frame_ref(ret, src) < 0)
605  av_frame_free(&ret);
606 
607  return ret;
608 }
609 
611 {
612  if (!frame)
613  return;
614 
616 
617  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
618  av_buffer_unref(&frame->buf[i]);
619  for (int i = 0; i < frame->nb_extended_buf; i++)
620  av_buffer_unref(&frame->extended_buf[i]);
621  av_freep(&frame->extended_buf);
622  av_dict_free(&frame->metadata);
623 
624  av_buffer_unref(&frame->hw_frames_ctx);
625 
626  av_buffer_unref(&frame->opaque_ref);
627  av_buffer_unref(&frame->private_ref);
628 
629  if (frame->extended_data != frame->data)
630  av_freep(&frame->extended_data);
631 
632  av_channel_layout_uninit(&frame->ch_layout);
633 
635 }
636 
638 {
639  av_assert1(dst->width == 0 && dst->height == 0);
640  av_assert1(dst->ch_layout.nb_channels == 0 &&
641  dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
642 
643  *dst = *src;
644  if (src->extended_data == src->data)
645  dst->extended_data = dst->data;
647 }
648 
650 {
651  int ret = 1;
652 
653  /* assume non-refcounted frames are not writable */
654  if (!frame->buf[0])
655  return 0;
656 
657  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
658  if (frame->buf[i])
659  ret &= !!av_buffer_is_writable(frame->buf[i]);
660  for (int i = 0; i < frame->nb_extended_buf; i++)
661  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
662 
663  return ret;
664 }
665 
667 {
668  AVFrame tmp;
669  int ret;
670 
672  return 0;
673 
674  memset(&tmp, 0, sizeof(tmp));
675  tmp.format = frame->format;
676  tmp.width = frame->width;
677  tmp.height = frame->height;
678  tmp.nb_samples = frame->nb_samples;
679  ret = av_channel_layout_copy(&tmp.ch_layout, &frame->ch_layout);
680  if (ret < 0) {
682  return ret;
683  }
684 
685  if (frame->hw_frames_ctx)
686  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
687  else
688  ret = av_frame_get_buffer(&tmp, 0);
689  if (ret < 0)
690  return ret;
691 
692  ret = av_frame_copy(&tmp, frame);
693  if (ret < 0) {
695  return ret;
696  }
697 
699  if (ret < 0) {
701  return ret;
702  }
703 
705 
706  *frame = tmp;
707  if (tmp.data == tmp.extended_data)
708  frame->extended_data = frame->data;
709 
710  return 0;
711 }
712 
714 {
715  return frame_copy_props(dst, src, 1);
716 }
717 
719 {
720  uint8_t *data;
721  int planes;
722 
723  if (frame->nb_samples) {
724  int channels = frame->ch_layout.nb_channels;
725  if (!channels)
726  return NULL;
727  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
728  } else
729  planes = 4;
730 
731  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
732  return NULL;
733  data = frame->extended_data[plane];
734 
735  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
736  AVBufferRef *buf = frame->buf[i];
737  if (data >= buf->data && data < buf->data + buf->size)
738  return buf;
739  }
740  for (int i = 0; i < frame->nb_extended_buf; i++) {
741  AVBufferRef *buf = frame->extended_buf[i];
742  if (data >= buf->data && data < buf->data + buf->size)
743  return buf;
744  }
745  return NULL;
746 }
747 
749  int *nb_sd,
751  AVBufferRef *buf, uint8_t *data,
752  size_t size)
753 {
754  AVFrameSideData *ret, **tmp;
755 
756  // *nb_sd + 1 needs to fit into an int and a size_t.
757  if ((unsigned)*nb_sd >= FFMIN(INT_MAX, SIZE_MAX))
758  return NULL;
759 
760  tmp = av_realloc_array(*sd, sizeof(**sd), *nb_sd + 1);
761  if (!tmp)
762  return NULL;
763  *sd = tmp;
764 
765  ret = av_mallocz(sizeof(*ret));
766  if (!ret)
767  return NULL;
768 
769  ret->buf = buf;
770  ret->data = data;
771  ret->size = size;
772  ret->type = type;
773 
774  (*sd)[(*nb_sd)++] = ret;
775 
776  return ret;
777 }
778 
780  int *nb_sd,
782  AVBufferRef *buf)
783 {
784  if (!buf)
785  return NULL;
786 
787  return add_side_data_from_buf_ext(sd, nb_sd, type, buf, buf->data, buf->size);
788 }
789 
792  AVBufferRef *buf)
793 {
794  return
796  &frame->side_data, &frame->nb_side_data, type, buf);
797 }
798 
801  size_t size)
802 {
806  if (!ret)
807  av_buffer_unref(&buf);
808  return ret;
809 }
810 
812  AVBufferRef *buf, int flags)
813 {
815  return NULL;
816 
817  av_dict_free(&dst->metadata);
818  av_buffer_unref(&dst->buf);
819  dst->buf = buf;
820  dst->data = buf->data;
821  dst->size = buf->size;
822  return dst;
823 }
824 
827  size_t size, unsigned int flags)
828 {
832 
834  remove_side_data(sd, nb_sd, type);
835  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
836  (ret = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, type))) {
838  if (!ret)
839  av_buffer_unref(&buf);
840  return ret;
841  }
842 
843  ret = add_side_data_from_buf(sd, nb_sd, type, buf);
844  if (!ret)
845  av_buffer_unref(&buf);
846 
847  return ret;
848 }
849 
852  AVBufferRef **pbuf, unsigned int flags)
853 {
855  AVFrameSideData *sd_dst = NULL;
856  AVBufferRef *buf = *pbuf;
857 
859  remove_side_data(sd, nb_sd, type);
860  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
861  (sd_dst = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, type))) {
862  sd_dst = replace_side_data_from_buf(sd_dst, buf, flags);
863  if (sd_dst)
864  *pbuf = NULL;
865  return sd_dst;
866  }
867 
868  sd_dst = add_side_data_from_buf(sd, nb_sd, type, buf);
869  if (!sd_dst)
870  return NULL;
871 
872  *pbuf = NULL;
873  return sd_dst;
874 }
875 
877  const AVFrameSideData *src, unsigned int flags)
878 {
879  const AVSideDataDescriptor *desc;
880  AVBufferRef *buf = NULL;
881  AVFrameSideData *sd_dst = NULL;
882  int ret = AVERROR_BUG;
883 
884  if (!sd || !src || !nb_sd || (*nb_sd && !*sd))
885  return AVERROR(EINVAL);
886 
889  remove_side_data(sd, nb_sd, src->type);
890  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
891  (sd_dst = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, src->type))) {
892  AVDictionary *dict = NULL;
893 
895  return AVERROR(EEXIST);
896 
897  ret = av_dict_copy(&dict, src->metadata, 0);
898  if (ret < 0)
899  return ret;
900 
901  ret = av_buffer_replace(&sd_dst->buf, src->buf);
902  if (ret < 0) {
903  av_dict_free(&dict);
904  return ret;
905  }
906 
907  av_dict_free(&sd_dst->metadata);
908  sd_dst->metadata = dict;
909  sd_dst->data = src->data;
910  sd_dst->size = src->size;
911  return 0;
912  }
913 
914  buf = av_buffer_ref(src->buf);
915  if (!buf)
916  return AVERROR(ENOMEM);
917 
918  sd_dst = add_side_data_from_buf_ext(sd, nb_sd, src->type, buf,
919  src->data, src->size);
920  if (!sd_dst) {
921  av_buffer_unref(&buf);
922  return AVERROR(ENOMEM);
923  }
924 
925  ret = av_dict_copy(&sd_dst->metadata, src->metadata, 0);
926  if (ret < 0) {
927  remove_side_data_by_entry(sd, nb_sd, sd_dst);
928  return ret;
929  }
930 
931  return 0;
932 }
933 
935  const int nb_sd,
937 {
938  for (int i = 0; i < nb_sd; i++) {
939  if (sd[i]->type == type)
940  return sd[i];
941  }
942  return NULL;
943 }
944 
947 {
948  remove_side_data(sd, nb_sd, type);
949 }
950 
953 {
955  frame->side_data, frame->nb_side_data,
956  type
957  );
958 }
959 
960 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
961 {
962  int planes;
963 
964  if (dst->width < src->width ||
965  dst->height < src->height)
966  return AVERROR(EINVAL);
967 
968  if (src->hw_frames_ctx || dst->hw_frames_ctx)
969  return av_hwframe_transfer_data(dst, src, 0);
970 
972  for (int i = 0; i < planes; i++)
973  if (!dst->data[i] || !src->data[i])
974  return AVERROR(EINVAL);
975 
976  av_image_copy2(dst->data, dst->linesize,
977  src->data, src->linesize,
978  dst->format, src->width, src->height);
979 
980  return 0;
981 }
982 
983 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
984 {
985  int planar = av_sample_fmt_is_planar(dst->format);
986  int channels = dst->ch_layout.nb_channels;
987  int planes = planar ? channels : 1;
988 
989  if (dst->nb_samples != src->nb_samples ||
990  av_channel_layout_compare(&dst->ch_layout, &src->ch_layout))
991  return AVERROR(EINVAL);
992 
993  for (int i = 0; i < planes; i++)
994  if (!dst->extended_data[i] || !src->extended_data[i])
995  return AVERROR(EINVAL);
996 
997  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
998  dst->nb_samples, channels, dst->format);
999 
1000  return 0;
1001 }
1002 
1004 {
1005  if (dst->format != src->format || dst->format < 0)
1006  return AVERROR(EINVAL);
1007 
1008  if (dst->width > 0 && dst->height > 0)
1009  return frame_copy_video(dst, src);
1010  else if (dst->nb_samples > 0 &&
1011  (av_channel_layout_check(&dst->ch_layout)))
1012  return frame_copy_audio(dst, src);
1013 
1014  return AVERROR(EINVAL);
1015 }
1016 
1018 {
1019  remove_side_data(&frame->side_data, &frame->nb_side_data, type);
1020 }
1021 
1023 {
1024  unsigned t = type;
1025  if (t < FF_ARRAY_ELEMS(sd_props) && sd_props[t].name)
1026  return &sd_props[t];
1027  return NULL;
1028 }
1029 
1031 {
1033  return desc ? desc->name : NULL;
1034 }
1035 
1036 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
1037  const AVPixFmtDescriptor *desc)
1038 {
1039  for (int i = 0; frame->data[i]; i++) {
1040  const AVComponentDescriptor *comp = NULL;
1041  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
1042  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
1043 
1044  if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
1045  offsets[i] = 0;
1046  break;
1047  }
1048 
1049  /* find any component descriptor for this plane */
1050  for (int j = 0; j < desc->nb_components; j++) {
1051  if (desc->comp[j].plane == i) {
1052  comp = &desc->comp[j];
1053  break;
1054  }
1055  }
1056  if (!comp)
1057  return AVERROR_BUG;
1058 
1059  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
1060  (frame->crop_left >> shift_x) * comp->step;
1061  }
1062 
1063  return 0;
1064 }
1065 
1067 {
1068  const AVPixFmtDescriptor *desc;
1069  size_t offsets[4];
1070 
1071  if (!(frame->width > 0 && frame->height > 0))
1072  return AVERROR(EINVAL);
1073 
1074  if (frame->crop_left >= INT_MAX - frame->crop_right ||
1075  frame->crop_top >= INT_MAX - frame->crop_bottom ||
1076  (frame->crop_left + frame->crop_right) >= frame->width ||
1077  (frame->crop_top + frame->crop_bottom) >= frame->height)
1078  return AVERROR(ERANGE);
1079 
1080  desc = av_pix_fmt_desc_get(frame->format);
1081  if (!desc)
1082  return AVERROR_BUG;
1083 
1084  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
1085  * formats cannot be easily handled here either (and corresponding decoders
1086  * should not export any cropping anyway), so do the same for those as well.
1087  * */
1089  frame->width -= frame->crop_right;
1090  frame->height -= frame->crop_bottom;
1091  frame->crop_right = 0;
1092  frame->crop_bottom = 0;
1093  return 0;
1094  }
1095 
1096  /* calculate the offsets for each plane */
1098 
1099  /* adjust the offsets to avoid breaking alignment */
1100  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
1101  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
1102  int min_log2_align = INT_MAX;
1103 
1104  for (int i = 0; frame->data[i]; i++) {
1105  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
1106  min_log2_align = FFMIN(log2_align, min_log2_align);
1107  }
1108 
1109  /* we assume, and it should always be true, that the data alignment is
1110  * related to the cropping alignment by a constant power-of-2 factor */
1111  if (log2_crop_align < min_log2_align)
1112  return AVERROR_BUG;
1113 
1114  if (min_log2_align < 5 && log2_crop_align != INT_MAX) {
1115  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
1117  }
1118  }
1119 
1120  for (int i = 0; frame->data[i]; i++)
1121  frame->data[i] += offsets[i];
1122 
1123  frame->width -= (frame->crop_left + frame->crop_right);
1124  frame->height -= (frame->crop_top + frame->crop_bottom);
1125  frame->crop_left = 0;
1126  frame->crop_right = 0;
1127  frame->crop_top = 0;
1128  frame->crop_bottom = 0;
1129 
1130  return 0;
1131 }
av_samples_copy
int av_samples_copy(uint8_t *const *dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:89
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
entry
#define entry
Definition: aom_film_grain_template.c:66
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:173
sd_props
static const AVSideDataDescriptor sd_props[]
Definition: frame.c:31
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:292
ff_ctz
#define ff_ctz
Definition: intmath.h:107
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:951
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:81
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:799
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
add_side_data_from_buf_ext
static AVFrameSideData * add_side_data_from_buf_ext(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf, uint8_t *data, size_t size)
Definition: frame.c:748
AV_FRAME_DATA_DOVI_METADATA
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
Definition: frame.h:208
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:188
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:666
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:270
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:614
data
const char data[16]
Definition: mxf.c:149
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:201
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:306
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:242
AVDictionary
Definition: dict.c:34
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: frame.c:876
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:960
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:1066
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3210
ALIGN
#define ALIGN
Definition: frame.c:171
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:68
fail
#define fail()
Definition: checkasm.h:188
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
samplefmt.h
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:1014
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:145
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:63
avassert.h
AV_FRAME_SIDE_DATA_FLAG_UNIQUE
#define AV_FRAME_SIDE_DATA_FLAG_UNIQUE
Remove existing entries before adding new ones.
Definition: frame.h:1060
AVFrameSideData::size
size_t size
Definition: frame.h:268
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
remove_side_data_by_entry
static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *target)
Definition: frame.c:133
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:217
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:114
replace_side_data_from_buf
static AVFrameSideData * replace_side_data_from_buf(AVFrameSideData *dst, AVBufferRef *buf, int flags)
Definition: frame.c:811
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
wipe_side_data
static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
Definition: frame.c:98
channels
channels
Definition: aptx.h:31
AV_SIDE_DATA_PROP_MULTI
@ AV_SIDE_DATA_PROP_MULTI
Multiple instances of this side data type can be meaningfully present in a single side data array.
Definition: frame.h:285
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:597
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:279
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:589
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:215
planes
static const struct @465 planes[]
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:983
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:131
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:60
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:713
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: frame.c:945
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:790
AV_FRAME_SIDE_DATA_FLAG_REPLACE
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
Definition: frame.h:1065
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(const AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:718
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:683
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:178
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:77
AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
@ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
Ambient viewing environment metadata, as defined by H.274.
Definition: frame.h:220
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:1003
cpu.h
AV_FRAME_DATA_LCEVC
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
Definition: frame.h:236
size
int size
Definition: twinvq_data.h:10344
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:390
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:124
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:649
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:737
frame.h
buffer.h
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:411
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1017
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:807
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
AV_FRAME_DATA_VIEW_ID
@ AV_FRAME_DATA_VIEW_ID
This side data must be associated with a video frame.
Definition: frame.h:245
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:109
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: frame.c:113
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:64
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:637
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
remove_side_data
static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data, const enum AVFrameSideDataType type)
Definition: frame.c:118
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:643
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:147
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:781
dict.h
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:433
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:487
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:159
channel_layout.h
av_frame_side_data_new
AVFrameSideData * av_frame_side_data_new(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, size_t size, unsigned int flags)
Add new side data entry to an array.
Definition: frame.c:825
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:170
av_image_copy2
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
Definition: imgutils.h:184
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:440
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:266
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:292
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:447
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
desc
const char * desc
Definition: libsvtav1.c:79
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: frame.c:1022
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:1036
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
add_side_data_from_buf
static AVFrameSideData * add_side_data_from_buf(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf)
Definition: frame.c:779
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:1030
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1158
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:165
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:269
av_frame_side_data_get_c
const AVFrameSideData * av_frame_side_data_get_c(const AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Get a side data entry of a specific type from an array.
Definition: frame.c:934
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
frame_side_data_wipe
static void frame_side_data_wipe(AVFrame *frame)
Definition: frame.c:108
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:491
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
av_frame_side_data_add
AVFrameSideData * av_frame_side_data_add(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef **pbuf, unsigned int flags)
Add a new side data entry to an array from an existing AVBufferRef.
Definition: frame.c:850
src
#define src
Definition: vp8dsp.c:248
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194