FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #define CACHED_BITSTREAM_READER !ARCH_X86_32
26 
27 #include "libavutil/mem.h"
28 #include "libavutil/pixdesc.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "codec_internal.h"
33 #include "decode.h"
34 #include "get_bits.h"
35 #include "lossless_videodsp.h"
36 #include "thread.h"
37 
38 #define VLC_BITS 12
39 
40 typedef struct Slice {
41  uint32_t start;
42  uint32_t size;
43 } Slice;
44 
45 typedef enum Prediction {
46  LEFT = 1,
49 } Prediction;
50 
51 typedef struct HuffEntry {
52  uint8_t len;
53  uint16_t sym;
54 } HuffEntry;
55 
56 typedef struct MagicYUVContext {
58  int max;
59  int bps;
61  int nb_slices;
62  int planes; // number of encoded planes in bitstream
63  int decorrelate; // postprocessing work
64  int color_matrix; // video color matrix
65  int flags;
66  int interlaced; // video is interlaced
67  const uint8_t *buf; // pointer to AVPacket->data
68  int hshift[4];
69  int vshift[4];
70  Slice *slices[4]; // slice bitstream positions for each plane
71  unsigned int slices_size[4]; // slice sizes for each plane
72  VLC vlc[4]; // VLC for each plane
73  VLC_MULTI multi[4]; // Buffer for joint VLC data
74  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
75  int j, int threadnr);
77  HuffEntry he[1 << 14];
78  uint8_t len[1 << 14];
80 
81 static int huff_build(AVCodecContext *avctx,
82  const uint8_t len[], uint16_t codes_pos[33],
83  VLC *vlc, VLC_MULTI *multi, int nb_elems, void *logctx)
84 {
85  MagicYUVContext *s = avctx->priv_data;
86  HuffEntry *he = s->he;
87 
88  for (int i = 31; i > 0; i--)
89  codes_pos[i] += codes_pos[i + 1];
90 
91  for (unsigned i = nb_elems; i-- > 0;)
92  he[--codes_pos[len[i]]] = (HuffEntry){ len[i], i };
93 
94  ff_vlc_free(vlc);
95  ff_vlc_free_multi(multi);
96  return ff_vlc_init_multi_from_lengths(vlc, multi, FFMIN(he[0].len, VLC_BITS), nb_elems, nb_elems,
97  &he[0].len, sizeof(he[0]),
98  &he[0].sym, sizeof(he[0]), sizeof(he[0].sym),
99  0, 0, logctx);
100 }
101 
102 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
103  const uint16_t *diff, intptr_t w,
104  int *left, int *left_top, int max)
105 {
106  int i;
107  uint16_t l, lt;
108 
109  l = *left;
110  lt = *left_top;
111 
112  for (i = 0; i < w; i++) {
113  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
114  l &= max;
115  lt = src1[i];
116  dst[i] = l;
117  }
118 
119  *left = l;
120  *left_top = lt;
121 }
122 
123 #define READ_PLANE(dst, plane, b, c) \
124 { \
125  x = 0; \
126  for (; CACHED_BITSTREAM_READER && x < width-c && get_bits_left(&gb) > 0;) {\
127  ret = get_vlc_multi(&gb, (uint8_t *)dst + x * b, multi, \
128  vlc, vlc_bits, 3, b); \
129  if (ret <= 0) \
130  return AVERROR_INVALIDDATA; \
131  x += ret; \
132  } \
133  for (; x < width && get_bits_left(&gb) > 0; x++) \
134  dst[x] = get_vlc2(&gb, vlc, vlc_bits, 3); \
135  dst += stride; \
136 }
137 
138 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
139  int j, int threadnr)
140 {
141  const MagicYUVContext *s = avctx->priv_data;
142  int interlaced = s->interlaced;
143  const int bps = s->bps;
144  const int max = s->max - 1;
145  AVFrame *p = s->p;
146  int i, k, x;
147  GetBitContext gb;
148  uint16_t *dst;
149 
150  for (i = 0; i < s->planes; i++) {
151  int left, lefttop, top;
152  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
153  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
154  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
155  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
156  ptrdiff_t stride = p->linesize[i] / 2;
157  const VLC_MULTI_ELEM *const multi = s->multi[i].table;
158  const VLCElem *const vlc = s->vlc[i].table;
159  const int vlc_bits = s->vlc[i].bits;
160  int flags, pred;
161  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
162  s->slices[i][j].size);
163 
164  if (ret < 0)
165  return ret;
166 
167  flags = get_bits(&gb, 8);
168  pred = get_bits(&gb, 8);
169 
170  dst = (uint16_t *)p->data[i] + j * sheight * stride;
171  if (flags & 1) {
172  if (get_bits_left(&gb) < bps * width * height)
173  return AVERROR_INVALIDDATA;
174  for (k = 0; k < height; k++) {
175  for (x = 0; x < width; x++)
176  dst[x] = get_bits(&gb, bps);
177 
178  dst += stride;
179  }
180  } else {
181  for (k = 0; k < height; k++)
182  READ_PLANE(dst, i, 2, 3)
183  }
184 
185  switch (pred) {
186  case LEFT:
187  dst = (uint16_t *)p->data[i] + j * sheight * stride;
188  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
189  dst += stride;
190  if (interlaced) {
191  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
192  dst += stride;
193  }
194  for (k = 1 + interlaced; k < height; k++) {
195  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
196  dst += stride;
197  }
198  break;
199  case GRADIENT:
200  dst = (uint16_t *)p->data[i] + j * sheight * stride;
201  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
202  dst += stride;
203  if (interlaced) {
204  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
205  dst += stride;
206  }
207  for (k = 1 + interlaced; k < height; k++) {
208  top = dst[-fake_stride];
209  left = top + dst[0];
210  dst[0] = left & max;
211  for (x = 1; x < width; x++) {
212  top = dst[x - fake_stride];
213  lefttop = dst[x - (fake_stride + 1)];
214  left += top - lefttop + dst[x];
215  dst[x] = left & max;
216  }
217  dst += stride;
218  }
219  break;
220  case MEDIAN:
221  dst = (uint16_t *)p->data[i] + j * sheight * stride;
222  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
223  dst += stride;
224  if (interlaced) {
225  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
226  dst += stride;
227  }
228  lefttop = left = dst[0];
229  for (k = 1 + interlaced; k < height; k++) {
230  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
231  lefttop = left = dst[0];
232  dst += stride;
233  }
234  break;
235  default:
236  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
237  }
238  }
239 
240  if (s->decorrelate) {
241  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
242  int width = avctx->coded_width;
243  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
244  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
245  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
246 
247  for (i = 0; i < height; i++) {
248  for (k = 0; k < width; k++) {
249  b[k] = (b[k] + g[k]) & max;
250  r[k] = (r[k] + g[k]) & max;
251  }
252  b += p->linesize[0] / 2;
253  g += p->linesize[1] / 2;
254  r += p->linesize[2] / 2;
255  }
256  }
257 
258  return 0;
259 }
260 
261 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
262  int j, int threadnr)
263 {
264  const MagicYUVContext *s = avctx->priv_data;
265  int interlaced = s->interlaced;
266  AVFrame *p = s->p;
267  int i, k, x, min_width;
268  GetBitContext gb;
269  uint8_t *dst;
270 
271  for (i = 0; i < s->planes; i++) {
272  int left, lefttop, top;
273  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
274  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
275  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
276  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
277  ptrdiff_t stride = p->linesize[i];
278  const uint8_t *slice = s->buf + s->slices[i][j].start;
279  const VLC_MULTI_ELEM *const multi = s->multi[i].table;
280  const VLCElem *const vlc = s->vlc[i].table;
281  const int vlc_bits = s->vlc[i].bits;
282  int flags, pred;
283 
284  flags = bytestream_get_byte(&slice);
285  pred = bytestream_get_byte(&slice);
286 
287  dst = p->data[i] + j * sheight * stride;
288  if (flags & 1) {
289  if (s->slices[i][j].size - 2 < width * height)
290  return AVERROR_INVALIDDATA;
291  for (k = 0; k < height; k++) {
292  bytestream_get_buffer(&slice, dst, width);
293  dst += stride;
294  }
295  } else {
296  int ret = init_get_bits8(&gb, slice, s->slices[i][j].size - 2);
297 
298  if (ret < 0)
299  return ret;
300 
301  for (k = 0; k < height; k++)
302  READ_PLANE(dst, i, 1, 7)
303  }
304 
305  switch (pred) {
306  case LEFT:
307  dst = p->data[i] + j * sheight * stride;
308  s->llviddsp.add_left_pred(dst, dst, width, 0);
309  dst += stride;
310  if (interlaced) {
311  s->llviddsp.add_left_pred(dst, dst, width, 0);
312  dst += stride;
313  }
314  for (k = 1 + interlaced; k < height; k++) {
315  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
316  dst += stride;
317  }
318  break;
319  case GRADIENT:
320  dst = p->data[i] + j * sheight * stride;
321  s->llviddsp.add_left_pred(dst, dst, width, 0);
322  dst += stride;
323  if (interlaced) {
324  s->llviddsp.add_left_pred(dst, dst, width, 0);
325  dst += stride;
326  }
327  min_width = FFMIN(width, 32);
328  for (k = 1 + interlaced; k < height; k++) {
329  top = dst[-fake_stride];
330  left = top + dst[0];
331  dst[0] = left;
332  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
333  top = dst[x - fake_stride];
334  lefttop = dst[x - (fake_stride + 1)];
335  left += top - lefttop + dst[x];
336  dst[x] = left;
337  }
338  if (width > 32)
339  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
340  dst += stride;
341  }
342  break;
343  case MEDIAN:
344  dst = p->data[i] + j * sheight * stride;
345  s->llviddsp.add_left_pred(dst, dst, width, 0);
346  dst += stride;
347  if (interlaced) {
348  s->llviddsp.add_left_pred(dst, dst, width, 0);
349  dst += stride;
350  }
351  if (1 + interlaced < height)
352  lefttop = left = dst[0];
353  for (k = 1 + interlaced; k < height; k++) {
354  s->llviddsp.add_median_pred(dst, dst - fake_stride,
355  dst, width, &left, &lefttop);
356  lefttop = left = dst[0];
357  dst += stride;
358  }
359  break;
360  default:
361  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
362  }
363  }
364 
365  if (s->decorrelate) {
366  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
367  int width = avctx->coded_width;
368  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
369  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
370  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
371 
372  for (i = 0; i < height; i++) {
373  s->llviddsp.add_bytes(b, g, width);
374  s->llviddsp.add_bytes(r, g, width);
375  b += p->linesize[0];
376  g += p->linesize[1];
377  r += p->linesize[2];
378  }
379  }
380 
381  return 0;
382 }
383 
384 static int build_huffman(AVCodecContext *avctx, const uint8_t *table,
385  int table_size, int max)
386 {
387  MagicYUVContext *s = avctx->priv_data;
388  GetByteContext gb;
389  uint8_t *len = s->len;
390  uint16_t length_count[33] = { 0 };
391  int i = 0, j = 0, k;
392 
393  bytestream2_init(&gb, table, table_size);
394 
395  while (bytestream2_get_bytes_left(&gb) > 0) {
396  int b = bytestream2_peek_byteu(&gb) & 0x80;
397  int x = bytestream2_get_byteu(&gb) & ~0x80;
398  int l = 1;
399 
400  if (b) {
401  if (bytestream2_get_bytes_left(&gb) <= 0)
402  break;
403  l += bytestream2_get_byteu(&gb);
404  }
405  k = j + l;
406  if (k > max || x == 0 || x > 32) {
407  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
408  return AVERROR_INVALIDDATA;
409  }
410 
411  length_count[x] += l;
412  for (; j < k; j++)
413  len[j] = x;
414 
415  if (j == max) {
416  j = 0;
417  if (huff_build(avctx, len, length_count, &s->vlc[i], &s->multi[i], max, avctx)) {
418  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
419  return AVERROR_INVALIDDATA;
420  }
421  i++;
422  if (i == s->planes) {
423  break;
424  }
425  memset(length_count, 0, sizeof(length_count));
426  }
427  }
428 
429  if (i != s->planes) {
430  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
431  return AVERROR_INVALIDDATA;
432  }
433 
434  return 0;
435 }
436 
438  int *got_frame, AVPacket *avpkt)
439 {
440  MagicYUVContext *s = avctx->priv_data;
441  GetByteContext gb;
442  uint32_t first_offset, offset, next_offset, header_size, slice_width;
443  int width, height, format, version, table_size;
444  int ret, i, j;
445 
446  if (avpkt->size < 36)
447  return AVERROR_INVALIDDATA;
448 
449  bytestream2_init(&gb, avpkt->data, avpkt->size);
450  if (bytestream2_get_le32u(&gb) != MKTAG('M', 'A', 'G', 'Y'))
451  return AVERROR_INVALIDDATA;
452 
453  header_size = bytestream2_get_le32u(&gb);
454  if (header_size < 32 || header_size >= avpkt->size) {
455  av_log(avctx, AV_LOG_ERROR,
456  "header or packet too small %"PRIu32"\n", header_size);
457  return AVERROR_INVALIDDATA;
458  }
459 
460  version = bytestream2_get_byteu(&gb);
461  if (version != 7) {
462  avpriv_request_sample(avctx, "Version %d", version);
463  return AVERROR_PATCHWELCOME;
464  }
465 
466  format = bytestream2_get_byteu(&gb);
467  switch (format) {
468  case 0x65:
469  avctx->pix_fmt = AV_PIX_FMT_GBRP;
470  break;
471  case 0x66:
472  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
473  break;
474  case 0x67:
475  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
476  break;
477  case 0x68:
478  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
479  break;
480  case 0x69:
481  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
482  break;
483  case 0x6a:
484  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
485  break;
486  case 0x6b:
487  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
488  break;
489  case 0x6c:
490  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
491  break;
492  case 0x76:
493  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
494  break;
495  case 0x6d:
496  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
497  break;
498  case 0x6e:
499  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
500  break;
501  case 0x6f:
502  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
503  break;
504  case 0x70:
505  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
506  break;
507  case 0x71:
508  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
509  break;
510  case 0x72:
511  avctx->pix_fmt = AV_PIX_FMT_GBRAP14;
512  break;
513  case 0x73:
514  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
515  break;
516  case 0x7b:
517  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
518  break;
519  default:
520  avpriv_request_sample(avctx, "Format 0x%X", format);
521  return AVERROR_PATCHWELCOME;
522  }
524  av_assert1(desc);
525  int is_rgb = s->decorrelate = !!(desc->flags & AV_PIX_FMT_FLAG_RGB);
526  s->hshift[1] = s->hshift[2] = desc->log2_chroma_w;
527  s->vshift[1] = s->vshift[2] = desc->log2_chroma_h;
528  s->bps = desc->comp[0].depth;
529  s->max = 1 << s->bps;
530  s->magy_decode_slice = s->bps == 8 ? magy_decode_slice : magy_decode_slice10;
531  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
532 
533  bytestream2_skipu(&gb, 1);
534  s->color_matrix = bytestream2_get_byteu(&gb);
535  s->flags = bytestream2_get_byteu(&gb);
536  s->interlaced = !!(s->flags & 2);
537  bytestream2_skipu(&gb, 3);
538 
539  width = bytestream2_get_le32u(&gb);
540  height = bytestream2_get_le32u(&gb);
541  ret = ff_set_dimensions(avctx, width, height);
542  if (ret < 0)
543  return ret;
544 
545  slice_width = bytestream2_get_le32u(&gb);
546  if (slice_width != avctx->coded_width) {
547  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
548  return AVERROR_PATCHWELCOME;
549  }
550  s->slice_height = bytestream2_get_le32u(&gb);
551  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
552  av_log(avctx, AV_LOG_ERROR,
553  "invalid slice height: %d\n", s->slice_height);
554  return AVERROR_INVALIDDATA;
555  }
556 
557  bytestream2_skipu(&gb, 4);
558 
559  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
560  if (s->nb_slices > INT_MAX / FFMAX(sizeof(Slice), 4 * 5)) {
561  av_log(avctx, AV_LOG_ERROR,
562  "invalid number of slices: %d\n", s->nb_slices);
563  return AVERROR_INVALIDDATA;
564  }
565 
566  if (s->interlaced) {
567  if ((s->slice_height >> s->vshift[1]) < 2) {
568  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
569  return AVERROR_INVALIDDATA;
570  }
571  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
572  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
573  return AVERROR_INVALIDDATA;
574  }
575  }
576 
577  if (bytestream2_get_bytes_left(&gb) <= s->nb_slices * s->planes * 5)
578  return AVERROR_INVALIDDATA;
579  for (i = 0; i < s->planes; i++) {
580  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
581  if (!s->slices[i])
582  return AVERROR(ENOMEM);
583 
584  offset = bytestream2_get_le32u(&gb);
585  if (offset >= avpkt->size - header_size)
586  return AVERROR_INVALIDDATA;
587 
588  if (i == 0)
589  first_offset = offset;
590 
591  for (j = 0; j < s->nb_slices - 1; j++) {
592  s->slices[i][j].start = offset + header_size;
593 
594  next_offset = bytestream2_get_le32u(&gb);
595  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
596  return AVERROR_INVALIDDATA;
597 
598  s->slices[i][j].size = next_offset - offset;
599  if (s->slices[i][j].size < 2)
600  return AVERROR_INVALIDDATA;
601  offset = next_offset;
602  }
603 
604  s->slices[i][j].start = offset + header_size;
605  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
606 
607  if (s->slices[i][j].size < 2)
608  return AVERROR_INVALIDDATA;
609  }
610 
611  if (bytestream2_get_byteu(&gb) != s->planes)
612  return AVERROR_INVALIDDATA;
613 
614  bytestream2_skipu(&gb, s->nb_slices * s->planes);
615 
616  table_size = header_size + first_offset - bytestream2_tell(&gb);
617  if (table_size < 2)
618  return AVERROR_INVALIDDATA;
619 
620  ret = build_huffman(avctx, avpkt->data + bytestream2_tell(&gb),
621  table_size, s->max);
622  if (ret < 0)
623  return ret;
624 
625  if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
626  return ret;
627 
628  s->buf = avpkt->data;
629  s->p = p;
630  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
631 
632  if (is_rgb) {
633  FFSWAP(uint8_t*, p->data[0], p->data[1]);
634  FFSWAP(int, p->linesize[0], p->linesize[1]);
635  } else {
636  switch (s->color_matrix) {
637  case 1:
638  p->colorspace = AVCOL_SPC_BT470BG;
639  break;
640  case 2:
641  p->colorspace = AVCOL_SPC_BT709;
642  break;
643  }
644  p->color_range = (s->flags & 4) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
645  }
646 
647  *got_frame = 1;
648 
649  return avpkt->size;
650 }
651 
653 {
654  MagicYUVContext *s = avctx->priv_data;
655  ff_llviddsp_init(&s->llviddsp);
656  return 0;
657 }
658 
660 {
661  MagicYUVContext * const s = avctx->priv_data;
662  int i;
663 
664  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
665  av_freep(&s->slices[i]);
666  s->slices_size[i] = 0;
667  ff_vlc_free(&s->vlc[i]);
668  ff_vlc_free_multi(&s->multi[i]);
669  }
670 
671  return 0;
672 }
673 
675  .p.name = "magicyuv",
676  CODEC_LONG_NAME("MagicYUV video"),
677  .p.type = AVMEDIA_TYPE_VIDEO,
678  .p.id = AV_CODEC_ID_MAGICYUV,
679  .priv_data_size = sizeof(MagicYUVContext),
683  .p.capabilities = AV_CODEC_CAP_DR1 |
686 };
flags
const SwsFlags flags[]
Definition: swscale.c:61
ff_magicyuv_decoder
const FFCodec ff_magicyuv_decoder
Definition: magicyuv.c:674
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:688
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
Prediction
Definition: aptx.h:70
GetByteContext
Definition: bytestream.h:33
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
HuffEntry::len
uint8_t len
Definition: exr.c:97
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
MEDIAN
@ MEDIAN
Definition: magicyuv.c:48
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:61
src1
const pixel * src1
Definition: h264pred_template.c:420
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:68
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
VLC_MULTI_ELEM
Definition: vlc.h:56
MagicYUVContext::color_matrix
int color_matrix
Definition: magicyuv.c:64
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
MagicYUVContext::llviddsp
LLVidDSPContext llviddsp
Definition: magicyuv.c:76
VLC_BITS
#define VLC_BITS
Definition: magicyuv.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:588
b
#define b
Definition: input.c:42
table
static const uint16_t table[]
Definition: prosumer.c:203
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
FFCodec
Definition: codec_internal.h:127
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
build_huffman
static int build_huffman(AVCodecContext *avctx, const uint8_t *table, int table_size, int max)
Definition: magicyuv.c:384
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
Slice::size
uint32_t size
Definition: magicyuv.c:42
magy_decode_slice10
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:138
thread.h
MagicYUVContext
Definition: magicyuv.c:56
Slice::start
uint32_t start
Definition: magicyuv.c:41
MagicYUVContext::max
int max
Definition: magicyuv.c:58
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:560
MagicYUVContext::bps
int bps
Definition: magicyuv.c:59
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:558
ff_llviddsp_init
av_cold void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:114
GetBitContext
Definition: get_bits.h:109
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
LLVidDSPContext
Definition: lossless_videodsp.h:28
huff_build
static int huff_build(AVCodecContext *avctx, const uint8_t len[], uint16_t codes_pos[33], VLC *vlc, VLC_MULTI *multi, int nb_elems, void *logctx)
Definition: magicyuv.c:81
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:542
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
MagicYUVContext::slices_size
unsigned int slices_size[4]
Definition: magicyuv.c:71
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:562
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP14
#define AV_PIX_FMT_GBRAP14
Definition: pixfmt.h:564
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:563
VLC_MULTI
Definition: vlc.h:65
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1043
MagicYUVContext::multi
VLC_MULTI multi[4]
Definition: magicyuv.c:73
ff_vlc_free_multi
void ff_vlc_free_multi(VLC_MULTI *vlc)
Definition: vlc.c:575
HuffEntry::sym
uint16_t sym
Definition: exr.c:98
GRADIENT
@ GRADIENT
Definition: magicyuv.c:47
decode.h
get_bits.h
Slice
Definition: magicyuv.c:40
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
MagicYUVContext::decorrelate
int decorrelate
Definition: magicyuv.c:63
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:519
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:69
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
magy_decode_init
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:652
MagicYUVContext::len
uint8_t len[1<< 14]
Definition: magicyuv.c:78
ff_vlc_init_multi_from_lengths
int ff_vlc_init_multi_from_lengths(VLC *vlc, VLC_MULTI *multi, int nb_bits, int nb_elems, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc_multi()
Definition: vlc.c:517
Prediction
Prediction
Definition: magicyuv.c:45
MagicYUVContext::magy_decode_slice
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:74
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:70
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:274
MagicYUVContext::he
HuffEntry he[1<< 14]
Definition: magicyuv.c:77
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:589
height
#define height
Definition: dsp.h:89
codec_internal.h
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
bps
unsigned bps
Definition: movenc.c:2033
VLCElem
Definition: vlc.h:32
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:57
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:60
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MagicYUVContext::flags
int flags
Definition: magicyuv.c:65
version
version
Definition: libkvazaar.c:313
interlaced
uint8_t interlaced
Definition: mxfenc.c:2334
MagicYUVContext::vlc
VLC vlc[4]
Definition: magicyuv.c:72
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:559
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
len
int len
Definition: vorbis_enc_data.h:426
MagicYUVContext::interlaced
int interlaced
Definition: magicyuv.c:66
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
magy_decode_end
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:659
avcodec.h
bytestream_get_buffer
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
Definition: bytestream.h:363
mid_pred
#define mid_pred
Definition: mathops.h:115
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
magy_decode_frame
static int magy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:437
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AVCodecContext
main external API structure.
Definition: avcodec.h:439
magicyuv_median_pred16
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:102
VLC
Definition: vlc.h:50
READ_PLANE
#define READ_PLANE(dst, plane, b, c)
Definition: magicyuv.c:123
HuffEntry
Definition: exr.c:96
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
desc
const char * desc
Definition: libsvtav1.c:82
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
MagicYUVContext::planes
int planes
Definition: magicyuv.c:62
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
stride
#define stride
Definition: h264pred_template.c:536
MagicYUVContext::buf
const uint8_t * buf
Definition: magicyuv.c:67
magy_decode_slice
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:261
width
#define width
Definition: dsp.h:89
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:702
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1622
LEFT
@ LEFT
Definition: magicyuv.c:46