FFmpeg
ffv1dec.c
Go to the documentation of this file.
1 /*
2  * FFV1 decoder
3  *
4  * Copyright (c) 2003-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * FF Video Codec 1 (a lossless codec) decoder
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/crc.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/pixdesc.h"
33 #include "avcodec.h"
34 #include "codec_internal.h"
35 #include "get_bits.h"
36 #include "rangecoder.h"
37 #include "golomb.h"
38 #include "mathops.h"
39 #include "ffv1.h"
40 #include "progressframe.h"
41 #include "libavutil/refstruct.h"
42 #include "thread.h"
43 
44 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
45  int is_signed)
46 {
47  if (get_rac(c, state + 0))
48  return 0;
49  else {
50  int e;
51  unsigned a;
52  e = 0;
53  while (get_rac(c, state + 1 + FFMIN(e, 9))) { // 1..10
54  e++;
55  if (e > 31)
56  return AVERROR_INVALIDDATA;
57  }
58 
59  a = 1;
60  for (int i = e - 1; i >= 0; i--)
61  a += a + get_rac(c, state + 22 + FFMIN(i, 9)); // 22..31
62 
63  e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); // 11..21
64  return (a ^ e) - e;
65  }
66 }
67 
68 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
69 {
70  return get_symbol_inline(c, state, is_signed);
71 }
72 
73 static inline int get_vlc_symbol(GetBitContext *gb, VlcState *const state,
74  int bits)
75 {
76  int k, i, v, ret;
77 
78  i = state->count;
79  k = 0;
80  while (i < state->error_sum) { // FIXME: optimize
81  k++;
82  i += i;
83  }
84 
85  v = get_sr_golomb(gb, k, 12, bits);
86  ff_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
87  v, state->bias, state->error_sum, state->drift, state->count, k);
88 
89  v ^= ((2 * state->drift + state->count) >> 31);
90 
91  ret = fold(v + state->bias, bits);
92 
94 
95  return ret;
96 }
97 
98 static int is_input_end(RangeCoder *c, GetBitContext *gb, int ac)
99 {
100  if (ac != AC_GOLOMB_RICE) {
101  if (c->overread > MAX_OVERREAD)
102  return AVERROR_INVALIDDATA;
103  } else {
104  if (get_bits_left(gb) < 1)
105  return AVERROR_INVALIDDATA;
106  }
107  return 0;
108 }
109 
110 #define TYPE int16_t
111 #define RENAME(name) name
112 #include "ffv1dec_template.c"
113 #undef TYPE
114 #undef RENAME
115 
116 #define TYPE int32_t
117 #define RENAME(name) name ## 32
118 #include "ffv1dec_template.c"
119 
121  GetBitContext *gb,
122  uint8_t *src, int w, int h, int stride, int plane_index,
123  int pixel_stride, int ac)
124 {
125  int x, y;
126  int16_t *sample[2];
127  sample[0] = sc->sample_buffer + 3;
128  sample[1] = sc->sample_buffer + w + 6 + 3;
129 
130  sc->run_index = 0;
131 
132  memset(sc->sample_buffer, 0, 2 * (w + 6) * sizeof(*sc->sample_buffer));
133 
134  for (y = 0; y < h; y++) {
135  int16_t *temp = sample[0]; // FIXME: try a normal buffer
136 
137  sample[0] = sample[1];
138  sample[1] = temp;
139 
140  sample[1][-1] = sample[0][0];
141  sample[0][w] = sample[0][w - 1];
142 
143  if (f->avctx->bits_per_raw_sample <= 8) {
144  int ret = decode_line(f, sc, gb, w, sample, plane_index, 8, ac);
145  if (ret < 0)
146  return ret;
147  for (x = 0; x < w; x++)
148  src[x*pixel_stride + stride * y] = sample[1][x];
149  } else {
150  int ret = decode_line(f, sc, gb, w, sample, plane_index, f->avctx->bits_per_raw_sample, ac);
151  if (ret < 0)
152  return ret;
153  if (f->packed_at_lsb) {
154  for (x = 0; x < w; x++) {
155  ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x];
156  }
157  } else {
158  for (x = 0; x < w; x++) {
159  ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x] << (16 - f->avctx->bits_per_raw_sample) | ((uint16_t **)sample)[1][x] >> (2 * f->avctx->bits_per_raw_sample - 16);
160  }
161  }
162  }
163  }
164  return 0;
165 }
166 
169 {
170  RangeCoder *c = &sc->c;
171  uint8_t state[CONTEXT_SIZE];
172  unsigned ps, context_count;
173  int sx, sy, sw, sh;
174 
175  memset(state, 128, sizeof(state));
176  sx = get_symbol(c, state, 0);
177  sy = get_symbol(c, state, 0);
178  sw = get_symbol(c, state, 0) + 1U;
179  sh = get_symbol(c, state, 0) + 1U;
180 
181  av_assert0(f->version > 2);
182 
183 
184  if (sx < 0 || sy < 0 || sw <= 0 || sh <= 0)
185  return AVERROR_INVALIDDATA;
186  if (sx > f->num_h_slices - sw || sy > f->num_v_slices - sh)
187  return AVERROR_INVALIDDATA;
188 
189  sc->slice_x = ff_slice_coord(f, f->width , sx , f->num_h_slices, f->chroma_h_shift);
190  sc->slice_y = ff_slice_coord(f, f->height, sy , f->num_v_slices, f->chroma_v_shift);
191  sc->slice_width = ff_slice_coord(f, f->width , sx + sw, f->num_h_slices, f->chroma_h_shift) - sc->slice_x;
192  sc->slice_height = ff_slice_coord(f, f->height, sy + sh, f->num_v_slices, f->chroma_v_shift) - sc->slice_y;
193 
194  av_assert0((unsigned)sc->slice_width <= f->width &&
195  (unsigned)sc->slice_height <= f->height);
196  av_assert0 ( (unsigned)sc->slice_x + (uint64_t)sc->slice_width <= f->width
197  && (unsigned)sc->slice_y + (uint64_t)sc->slice_height <= f->height);
198 
199  if (f->ac == AC_GOLOMB_RICE && sc->slice_width >= (1<<23))
200  return AVERROR_INVALIDDATA;
201 
202  for (unsigned i = 0; i < f->plane_count; i++) {
203  PlaneContext * const p = &sc->plane[i];
204  int idx = get_symbol(c, state, 0);
205  if (idx >= (unsigned)f->quant_table_count) {
206  av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
207  return -1;
208  }
209  p->quant_table_index = idx;
210  context_count = f->context_count[idx];
211 
212  if (p->context_count < context_count) {
213  av_freep(&p->state);
214  av_freep(&p->vlc_state);
215  }
216  p->context_count = context_count;
217  }
218 
219  ps = get_symbol(c, state, 0);
220  if (ps == 1) {
223  } else if (ps == 2) {
226  } else if (ps == 3) {
227  frame->flags &= ~AV_FRAME_FLAG_INTERLACED;
228  }
229  frame->sample_aspect_ratio.num = get_symbol(c, state, 0);
230  frame->sample_aspect_ratio.den = get_symbol(c, state, 0);
231 
232  if (av_image_check_sar(f->width, f->height,
233  frame->sample_aspect_ratio) < 0) {
234  av_log(f->avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
235  frame->sample_aspect_ratio.num,
236  frame->sample_aspect_ratio.den);
237  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
238  }
239 
240  if (f->version > 3) {
242  sc->slice_coding_mode = get_symbol(c, state, 0);
243  if (sc->slice_coding_mode != 1 && f->colorspace == 1) {
244  sc->slice_rct_by_coef = get_symbol(c, state, 0);
245  sc->slice_rct_ry_coef = get_symbol(c, state, 0);
246  if ((uint64_t)sc->slice_rct_by_coef + (uint64_t)sc->slice_rct_ry_coef > 4) {
247  av_log(f->avctx, AV_LOG_ERROR, "slice_rct_y_coef out of range\n");
248  return AVERROR_INVALIDDATA;
249  }
250  }
251  }
252 
253  return 0;
254 }
255 
257 {
258  sc->slice_damaged = 1;
259 
260  // only set this for frame threading, as for slice threading its value is
261  // not used and setting it would be a race
262  if (f->avctx->active_thread_type & FF_THREAD_FRAME)
263  f->frame_damaged = 1;
264 }
265 
266 static int decode_slice(AVCodecContext *c, void *arg)
267 {
268  FFV1Context *f = c->priv_data;
269  FFV1SliceContext *sc = arg;
270  int width, height, x, y, ret;
271  const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step;
272  AVFrame * const p = f->picture.f;
273  const int si = sc - f->slices;
274  GetBitContext gb;
275  int ac = f->ac || sc->slice_coding_mode == 1;
276 
277  if (!(p->flags & AV_FRAME_FLAG_KEY) && f->last_picture.f)
278  ff_progress_frame_await(&f->last_picture, si);
279 
280  if (f->slice_damaged[si])
281  slice_set_damaged(f, sc);
282 
283  sc->slice_rct_by_coef = 1;
284  sc->slice_rct_ry_coef = 1;
285 
286  if (f->version > 2) {
287  if (ff_ffv1_init_slice_state(f, sc) < 0)
288  return AVERROR(ENOMEM);
289  if (decode_slice_header(f, sc, p) < 0) {
290  sc->slice_x = sc->slice_y = sc->slice_height = sc->slice_width = 0;
291  slice_set_damaged(f, sc);
292  return AVERROR_INVALIDDATA;
293  }
294  }
295  if ((ret = ff_ffv1_init_slice_state(f, sc)) < 0)
296  return ret;
297  if ((p->flags & AV_FRAME_FLAG_KEY) || sc->slice_reset_contexts) {
299  } else if (sc->slice_damaged) {
300  return AVERROR_INVALIDDATA;
301  }
302 
303  width = sc->slice_width;
304  height = sc->slice_height;
305  x = sc->slice_x;
306  y = sc->slice_y;
307 
308  if (ac == AC_GOLOMB_RICE) {
309  if (f->version == 3 && f->micro_version > 1 || f->version > 3)
310  get_rac(&sc->c, (uint8_t[]) { 129 });
311  sc->ac_byte_count = f->version > 2 || (!x && !y) ? sc->c.bytestream - sc->c.bytestream_start - 1 : 0;
312  init_get_bits(&gb,
313  sc->c.bytestream_start + sc->ac_byte_count,
314  (sc->c.bytestream_end - sc->c.bytestream_start - sc->ac_byte_count) * 8);
315  }
316 
317  av_assert1(width && height);
318  if (f->colorspace == 0 && (f->chroma_planes || !f->transparency)) {
319  const int chroma_width = AV_CEIL_RSHIFT(width, f->chroma_h_shift);
320  const int chroma_height = AV_CEIL_RSHIFT(height, f->chroma_v_shift);
321  const int cx = x >> f->chroma_h_shift;
322  const int cy = y >> f->chroma_v_shift;
323  decode_plane(f, sc, &gb, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 1, ac);
324 
325  if (f->chroma_planes) {
326  decode_plane(f, sc, &gb, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1, ac);
327  decode_plane(f, sc, &gb, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1, ac);
328  }
329  if (f->transparency)
330  decode_plane(f, sc, &gb, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2, 1, ac);
331  } else if (f->colorspace == 0) {
332  decode_plane(f, sc, &gb, p->data[0] + ps*x + y*p->linesize[0] , width, height, p->linesize[0], 0, 2, ac);
333  decode_plane(f, sc, &gb, p->data[0] + ps*x + y*p->linesize[0] + 1, width, height, p->linesize[0], 1, 2, ac);
334  } else if (f->use32bit) {
335  uint8_t *planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
336  p->data[1] + ps * x + y * p->linesize[1],
337  p->data[2] + ps * x + y * p->linesize[2],
338  p->data[3] + ps * x + y * p->linesize[3] };
339  decode_rgb_frame32(f, sc, &gb, planes, width, height, p->linesize);
340  } else {
341  uint8_t *planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
342  p->data[1] + ps * x + y * p->linesize[1],
343  p->data[2] + ps * x + y * p->linesize[2],
344  p->data[3] + ps * x + y * p->linesize[3] };
345  decode_rgb_frame(f, sc, &gb, planes, width, height, p->linesize);
346  }
347  if (ac != AC_GOLOMB_RICE && f->version > 2) {
348  int v;
349  get_rac(&sc->c, (uint8_t[]) { 129 });
350  v = sc->c.bytestream_end - sc->c.bytestream - 2 - 5*!!f->ec;
351  if (v) {
352  av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
353  slice_set_damaged(f, sc);
354  }
355  }
356 
357  if (sc->slice_damaged && (f->avctx->err_recognition & AV_EF_EXPLODE))
358  return AVERROR_INVALIDDATA;
359 
360  if ((c->active_thread_type & FF_THREAD_FRAME) && !f->frame_damaged)
361  ff_progress_frame_report(&f->picture, si);
362 
363  return 0;
364 }
365 
366 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
367 {
368  int v;
369  int i = 0;
370  uint8_t state[CONTEXT_SIZE];
371 
372  memset(state, 128, sizeof(state));
373 
374  for (v = 0; i < 128; v++) {
375  unsigned len = get_symbol(c, state, 0) + 1U;
376 
377  if (len > 128 - i || !len)
378  return AVERROR_INVALIDDATA;
379 
380  while (len--) {
381  quant_table[i] = scale * v;
382  i++;
383  }
384  }
385 
386  for (i = 1; i < 128; i++)
387  quant_table[256 - i] = -quant_table[i];
388  quant_table[128] = -quant_table[127];
389 
390  return 2 * v - 1;
391 }
392 
394  int16_t quant_table[MAX_CONTEXT_INPUTS][256])
395 {
396  int i;
397  int context_count = 1;
398 
399  for (i = 0; i < 5; i++) {
400  int ret = read_quant_table(c, quant_table[i], context_count);
401  if (ret < 0)
402  return ret;
403  context_count *= ret;
404  if (context_count > 32768U) {
405  return AVERROR_INVALIDDATA;
406  }
407  }
408  return (context_count + 1) / 2;
409 }
410 
412 {
413  RangeCoder c;
414  uint8_t state[CONTEXT_SIZE];
415  int ret;
416  uint8_t state2[32][CONTEXT_SIZE];
417  unsigned crc = 0;
418 
419  memset(state2, 128, sizeof(state2));
420  memset(state, 128, sizeof(state));
421 
422  ff_init_range_decoder(&c, f->avctx->extradata, f->avctx->extradata_size);
423  ff_build_rac_states(&c, 0.05 * (1LL << 32), 256 - 8);
424 
425  f->version = get_symbol(&c, state, 0);
426  if (f->version < 2) {
427  av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
428  return AVERROR_INVALIDDATA;
429  }
430  if (f->version > 4) {
431  av_log(f->avctx, AV_LOG_ERROR, "unsupported version %d\n",
432  f->version);
433  return AVERROR_PATCHWELCOME;
434  }
435  if (f->version > 2) {
436  c.bytestream_end -= 4;
437  f->micro_version = get_symbol(&c, state, 0);
438  if (f->micro_version < 0)
439  return AVERROR_INVALIDDATA;
440  }
441  f->ac = get_symbol(&c, state, 0);
442 
443  if (f->ac == AC_RANGE_CUSTOM_TAB) {
444  for (int i = 1; i < 256; i++)
445  f->state_transition[i] = get_symbol(&c, state, 1) + c.one_state[i];
446  }
447 
448  f->colorspace = get_symbol(&c, state, 0); //YUV cs type
449  f->avctx->bits_per_raw_sample = get_symbol(&c, state, 0);
450  f->chroma_planes = get_rac(&c, state);
451  f->chroma_h_shift = get_symbol(&c, state, 0);
452  f->chroma_v_shift = get_symbol(&c, state, 0);
453  f->transparency = get_rac(&c, state);
454  f->plane_count = 1 + (f->chroma_planes || f->version<4) + f->transparency;
455  f->num_h_slices = 1 + get_symbol(&c, state, 0);
456  f->num_v_slices = 1 + get_symbol(&c, state, 0);
457 
458  if (f->chroma_h_shift > 4U || f->chroma_v_shift > 4U) {
459  av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
460  f->chroma_h_shift, f->chroma_v_shift);
461  return AVERROR_INVALIDDATA;
462  }
463 
464  if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices ||
465  f->num_v_slices > (unsigned)f->height || !f->num_v_slices
466  ) {
467  av_log(f->avctx, AV_LOG_ERROR, "slice count invalid\n");
468  return AVERROR_INVALIDDATA;
469  }
470 
471  if (f->num_h_slices > MAX_SLICES / f->num_v_slices) {
472  av_log(f->avctx, AV_LOG_ERROR, "slice count unsupported\n");
473  return AVERROR_PATCHWELCOME;
474  }
475 
476  f->quant_table_count = get_symbol(&c, state, 0);
477  if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES || !f->quant_table_count) {
478  av_log(f->avctx, AV_LOG_ERROR, "quant table count %d is invalid\n", f->quant_table_count);
479  f->quant_table_count = 0;
480  return AVERROR_INVALIDDATA;
481  }
482 
483  for (int i = 0; i < f->quant_table_count; i++) {
484  f->context_count[i] = read_quant_tables(&c, f->quant_tables[i]);
485  if (f->context_count[i] < 0) {
486  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
487  return AVERROR_INVALIDDATA;
488  }
489  }
491  return ret;
492 
493  for (int i = 0; i < f->quant_table_count; i++)
494  if (get_rac(&c, state)) {
495  for (int j = 0; j < f->context_count[i]; j++)
496  for (int k = 0; k < CONTEXT_SIZE; k++) {
497  int pred = j ? f->initial_states[i][j - 1][k] : 128;
498  f->initial_states[i][j][k] =
499  (pred + get_symbol(&c, state2[k], 1)) & 0xFF;
500  }
501  }
502 
503  if (f->version > 2) {
504  f->ec = get_symbol(&c, state, 0);
505  if (f->ec >= 2)
506  f->crcref = 0x7a8c4079;
507  if (f->micro_version > 2)
508  f->intra = get_symbol(&c, state, 0);
509  }
510 
511  if (f->version > 2) {
512  unsigned v;
513  v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), f->crcref,
514  f->avctx->extradata, f->avctx->extradata_size);
515  if (v != f->crcref || f->avctx->extradata_size < 4) {
516  av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
517  return AVERROR_INVALIDDATA;
518  }
519  crc = AV_RB32(f->avctx->extradata + f->avctx->extradata_size - 4);
520  }
521 
522  if (f->avctx->debug & FF_DEBUG_PICT_INFO)
523  av_log(f->avctx, AV_LOG_DEBUG,
524  "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
525  f->version, f->micro_version,
526  f->ac,
527  f->colorspace,
528  f->avctx->bits_per_raw_sample,
529  f->chroma_planes, f->chroma_h_shift, f->chroma_v_shift,
530  f->transparency,
531  f->num_h_slices, f->num_v_slices,
532  f->quant_table_count,
533  f->ec,
534  f->intra,
535  crc
536  );
537  return 0;
538 }
539 
541 {
542  uint8_t state[CONTEXT_SIZE];
543  int context_count = -1; //-1 to avoid warning
544  RangeCoder *const c = &f->slices[0].c;
545 
546  memset(state, 128, sizeof(state));
547 
548  if (f->version < 2) {
549  int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
550  unsigned v= get_symbol(c, state, 0);
551  if (v >= 2) {
552  av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
553  return AVERROR_INVALIDDATA;
554  }
555  f->version = v;
556  f->ac = get_symbol(c, state, 0);
557 
558  if (f->ac == AC_RANGE_CUSTOM_TAB) {
559  for (int i = 1; i < 256; i++) {
560  int st = get_symbol(c, state, 1) + c->one_state[i];
561  if (st < 1 || st > 255) {
562  av_log(f->avctx, AV_LOG_ERROR, "invalid state transition %d\n", st);
563  return AVERROR_INVALIDDATA;
564  }
565  f->state_transition[i] = st;
566  }
567  }
568 
569  colorspace = get_symbol(c, state, 0); //YUV cs type
570  bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
571  chroma_planes = get_rac(c, state);
572  chroma_h_shift = get_symbol(c, state, 0);
573  chroma_v_shift = get_symbol(c, state, 0);
574  transparency = get_rac(c, state);
575  if (colorspace == 0 && f->avctx->skip_alpha)
576  transparency = 0;
577 
578  if (f->plane_count) {
579  if (colorspace != f->colorspace ||
580  bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
581  chroma_planes != f->chroma_planes ||
582  chroma_h_shift != f->chroma_h_shift ||
583  chroma_v_shift != f->chroma_v_shift ||
584  transparency != f->transparency) {
585  av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
586  return AVERROR_INVALIDDATA;
587  }
588  }
589 
590  if (chroma_h_shift > 4U || chroma_v_shift > 4U) {
591  av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
592  chroma_h_shift, chroma_v_shift);
593  return AVERROR_INVALIDDATA;
594  }
595 
596  f->colorspace = colorspace;
597  f->avctx->bits_per_raw_sample = bits_per_raw_sample;
598  f->chroma_planes = chroma_planes;
599  f->chroma_h_shift = chroma_h_shift;
600  f->chroma_v_shift = chroma_v_shift;
601  f->transparency = transparency;
602 
603  f->plane_count = 2 + f->transparency;
604  }
605 
606  if (f->colorspace == 0) {
607  if (!f->transparency && !f->chroma_planes) {
608  if (f->avctx->bits_per_raw_sample <= 8)
609  f->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
610  else if (f->avctx->bits_per_raw_sample == 9) {
611  f->packed_at_lsb = 1;
612  f->avctx->pix_fmt = AV_PIX_FMT_GRAY9;
613  } else if (f->avctx->bits_per_raw_sample == 10) {
614  f->packed_at_lsb = 1;
615  f->avctx->pix_fmt = AV_PIX_FMT_GRAY10;
616  } else if (f->avctx->bits_per_raw_sample == 12) {
617  f->packed_at_lsb = 1;
618  f->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
619  } else if (f->avctx->bits_per_raw_sample == 14) {
620  f->packed_at_lsb = 1;
621  f->avctx->pix_fmt = AV_PIX_FMT_GRAY14;
622  } else if (f->avctx->bits_per_raw_sample == 16) {
623  f->packed_at_lsb = 1;
624  f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
625  } else if (f->avctx->bits_per_raw_sample < 16) {
626  f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
627  } else
628  return AVERROR(ENOSYS);
629  } else if (f->transparency && !f->chroma_planes) {
630  if (f->avctx->bits_per_raw_sample <= 8)
631  f->avctx->pix_fmt = AV_PIX_FMT_YA8;
632  else
633  return AVERROR(ENOSYS);
634  } else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
635  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
636  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
637  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
638  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
639  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
640  case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
641  case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
642  }
643  } else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
644  switch(16*f->chroma_h_shift + f->chroma_v_shift) {
645  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
646  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
647  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
648  }
649  } else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
650  f->packed_at_lsb = 1;
651  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
652  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
653  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
654  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
655  }
656  } else if (f->avctx->bits_per_raw_sample == 9 && f->transparency) {
657  f->packed_at_lsb = 1;
658  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
659  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P9; break;
660  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P9; break;
661  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P9; break;
662  }
663  } else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
664  f->packed_at_lsb = 1;
665  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
666  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
667  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P10; break;
668  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
669  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
670  }
671  } else if (f->avctx->bits_per_raw_sample == 10 && f->transparency) {
672  f->packed_at_lsb = 1;
673  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
674  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P10; break;
675  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P10; break;
676  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P10; break;
677  }
678  } else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency) {
679  f->packed_at_lsb = 1;
680  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
681  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P12; break;
682  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P12; break;
683  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P12; break;
684  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P12; break;
685  }
686  } else if (f->avctx->bits_per_raw_sample == 12 && f->transparency) {
687  f->packed_at_lsb = 1;
688  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
689  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P12; break;
690  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P12; break;
691  }
692  } else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency) {
693  f->packed_at_lsb = 1;
694  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
695  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P14; break;
696  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P14; break;
697  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P14; break;
698  }
699  } else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
700  f->packed_at_lsb = 1;
701  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
702  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
703  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
704  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
705  }
706  } else if (f->avctx->bits_per_raw_sample == 16 && f->transparency){
707  f->packed_at_lsb = 1;
708  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
709  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P16; break;
710  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P16; break;
711  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16; break;
712  }
713  }
714  } else if (f->colorspace == 1) {
715  if (f->chroma_h_shift || f->chroma_v_shift) {
716  av_log(f->avctx, AV_LOG_ERROR,
717  "chroma subsampling not supported in this colorspace\n");
718  return AVERROR(ENOSYS);
719  }
720  if ( f->avctx->bits_per_raw_sample <= 8 && !f->transparency)
721  f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
722  else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency)
723  f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
724  else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency)
725  f->avctx->pix_fmt = AV_PIX_FMT_GBRP9;
726  else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency)
727  f->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
728  else if (f->avctx->bits_per_raw_sample == 10 && f->transparency)
729  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
730  else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency)
731  f->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
732  else if (f->avctx->bits_per_raw_sample == 12 && f->transparency)
733  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
734  else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency)
735  f->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
736  else if (f->avctx->bits_per_raw_sample == 14 && f->transparency)
737  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP14;
738  else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency) {
739  f->avctx->pix_fmt = AV_PIX_FMT_GBRP16;
740  f->use32bit = 1;
741  }
742  else if (f->avctx->bits_per_raw_sample == 16 && f->transparency) {
743  f->avctx->pix_fmt = AV_PIX_FMT_GBRAP16;
744  f->use32bit = 1;
745  }
746  } else {
747  av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
748  return AVERROR(ENOSYS);
749  }
750  if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
751  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
752  return AVERROR(ENOSYS);
753  }
754 
755  ff_dlog(f->avctx, "%d %d %d\n",
756  f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
757  if (f->version < 2) {
758  context_count = read_quant_tables(c, f->quant_tables[0]);
759  if (context_count < 0) {
760  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
761  return AVERROR_INVALIDDATA;
762  }
763  f->slice_count = f->max_slice_count;
764  } else if (f->version < 3) {
765  f->slice_count = get_symbol(c, state, 0);
766  } else {
767  const uint8_t *p = c->bytestream_end;
768  for (f->slice_count = 0;
769  f->slice_count < MAX_SLICES && 3 + 5*!!f->ec < p - c->bytestream_start;
770  f->slice_count++) {
771  int trailer = 3 + 5*!!f->ec;
772  int size = AV_RB24(p-trailer);
773  if (size + trailer > p - c->bytestream_start)
774  break;
775  p -= size + trailer;
776  }
777  }
778  if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0 || f->slice_count > f->max_slice_count) {
779  av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid (max=%d)\n", f->slice_count, f->max_slice_count);
780  return AVERROR_INVALIDDATA;
781  }
782 
783  av_refstruct_unref(&f->slice_damaged);
784  f->slice_damaged = av_refstruct_allocz(f->slice_count * sizeof(*f->slice_damaged));
785  if (!f->slice_damaged)
786  return AVERROR(ENOMEM);
787 
788  for (int j = 0; j < f->slice_count; j++) {
789  FFV1SliceContext *sc = &f->slices[j];
790 
791  if (f->version == 2) {
792  int sx = get_symbol(c, state, 0);
793  int sy = get_symbol(c, state, 0);
794  int sw = get_symbol(c, state, 0) + 1U;
795  int sh = get_symbol(c, state, 0) + 1U;
796 
797  if (sx < 0 || sy < 0 || sw <= 0 || sh <= 0)
798  return AVERROR_INVALIDDATA;
799  if (sx > f->num_h_slices - sw || sy > f->num_v_slices - sh)
800  return AVERROR_INVALIDDATA;
801 
802  sc->slice_x = sx * (int64_t)f->width / f->num_h_slices;
803  sc->slice_y = sy * (int64_t)f->height / f->num_v_slices;
804  sc->slice_width = (sx + sw) * (int64_t)f->width / f->num_h_slices - sc->slice_x;
805  sc->slice_height = (sy + sh) * (int64_t)f->height / f->num_v_slices - sc->slice_y;
806 
807  av_assert0((unsigned)sc->slice_width <= f->width &&
808  (unsigned)sc->slice_height <= f->height);
809  av_assert0 ( (unsigned)sc->slice_x + (uint64_t)sc->slice_width <= f->width
810  && (unsigned)sc->slice_y + (uint64_t)sc->slice_height <= f->height);
811  }
812 
814  sc->plane = ff_ffv1_planes_alloc();
815  if (!sc->plane)
816  return AVERROR(ENOMEM);
817 
818  for (int i = 0; i < f->plane_count; i++) {
819  PlaneContext *const p = &sc->plane[i];
820 
821  if (f->version == 2) {
822  int idx = get_symbol(c, state, 0);
823  if (idx >= (unsigned)f->quant_table_count) {
824  av_log(f->avctx, AV_LOG_ERROR,
825  "quant_table_index out of range\n");
826  return AVERROR_INVALIDDATA;
827  }
828  p->quant_table_index = idx;
829  context_count = f->context_count[idx];
830  }
831 
832  if (f->version <= 2) {
833  av_assert0(context_count >= 0);
834  p->context_count = context_count;
835  }
836  }
837  }
838  return 0;
839 }
840 
842 {
843  FFV1Context *f = avctx->priv_data;
844  int ret;
845 
846  if ((ret = ff_ffv1_common_init(avctx)) < 0)
847  return ret;
848 
849  if (avctx->extradata_size > 0 && (ret = read_extra_header(f)) < 0)
850  return ret;
851 
852  if ((ret = ff_ffv1_init_slice_contexts(f)) < 0)
853  return ret;
854 
855  return 0;
856 }
857 
858 static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
859  int *got_frame, AVPacket *avpkt)
860 {
861  uint8_t *buf = avpkt->data;
862  int buf_size = avpkt->size;
863  FFV1Context *f = avctx->priv_data;
864  RangeCoder *const c = &f->slices[0].c;
865  int ret, key_frame;
866  uint8_t keystate = 128;
867  uint8_t *buf_p;
868  AVFrame *p;
869 
870  ff_progress_frame_unref(&f->last_picture);
871  FFSWAP(ProgressFrame, f->picture, f->last_picture);
872 
873 
874  f->avctx = avctx;
875  f->frame_damaged = 0;
876  ff_init_range_decoder(c, buf, buf_size);
877  ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
878 
879  if (get_rac(c, &keystate)) {
880  key_frame = AV_FRAME_FLAG_KEY;
881  f->key_frame_ok = 0;
882  if ((ret = read_header(f)) < 0)
883  return ret;
884  f->key_frame_ok = 1;
885  } else {
886  if (!f->key_frame_ok) {
887  av_log(avctx, AV_LOG_ERROR,
888  "Cannot decode non-keyframe without valid keyframe\n");
889  return AVERROR_INVALIDDATA;
890  }
891  key_frame = 0;
892  }
893 
894  if (f->ac != AC_GOLOMB_RICE) {
895  if (buf_size < avctx->width * avctx->height / (128*8))
896  return AVERROR_INVALIDDATA;
897  } else {
898  int w = avctx->width;
899  int s = 1 + w / (1<<23);
900  int i;
901 
902  w /= s;
903 
904  for (i = 0; w > (1<<ff_log2_run[i]); i++)
905  w -= ff_log2_run[i];
906  if (buf_size < (avctx->height + i + 6) / 8 * s)
907  return AVERROR_INVALIDDATA;
908  }
909 
910  ret = ff_progress_frame_get_buffer(avctx, &f->picture,
912  if (ret < 0)
913  return ret;
914 
915  p = f->picture.f;
916 
917  p->pict_type = AV_PICTURE_TYPE_I; //FIXME I vs. P
918  p->flags = (p->flags & ~AV_FRAME_FLAG_KEY) | key_frame;
919 
920  if (f->version < 3 && avctx->field_order > AV_FIELD_PROGRESSIVE) {
921  /* we have interlaced material flagged in container */
923  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
925  }
926 
927  if (avctx->debug & FF_DEBUG_PICT_INFO)
928  av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
929  f->version, !!(p->flags & AV_FRAME_FLAG_KEY), f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
930 
931  ff_thread_finish_setup(avctx);
932 
933  buf_p = buf + buf_size;
934  for (int i = f->slice_count - 1; i >= 0; i--) {
935  FFV1SliceContext *sc = &f->slices[i];
936  int trailer = 3 + 5*!!f->ec;
937  int v;
938 
939  sc->slice_damaged = 0;
940 
941  if (i || f->version > 2) {
942  if (trailer > buf_p - buf) v = INT_MAX;
943  else v = AV_RB24(buf_p-trailer) + trailer;
944  } else v = buf_p - c->bytestream_start;
945  if (buf_p - c->bytestream_start < v) {
946  av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
947  ff_progress_frame_report(&f->picture, INT_MAX);
948  return AVERROR_INVALIDDATA;
949  }
950  buf_p -= v;
951 
952  if (f->ec) {
953  unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), f->crcref, buf_p, v);
954  if (crc != f->crcref) {
955  int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
956  av_log(f->avctx, AV_LOG_ERROR, "slice CRC mismatch %X!", crc);
957  if (ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
958  av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n", ts*av_q2d(avctx->pkt_timebase));
959  } else if (ts != AV_NOPTS_VALUE) {
960  av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
961  } else {
962  av_log(f->avctx, AV_LOG_ERROR, "\n");
963  }
964  slice_set_damaged(f, sc);
965  }
966  if (avctx->debug & FF_DEBUG_PICT_INFO) {
967  av_log(avctx, AV_LOG_DEBUG, "slice %d, CRC: 0x%08"PRIX32"\n", i, AV_RB32(buf_p + v - 4));
968  }
969  }
970 
971  if (i) {
972  ff_init_range_decoder(&sc->c, buf_p, v);
973  ff_build_rac_states(&sc->c, 0.05 * (1LL << 32), 256 - 8);
974  } else
975  sc->c.bytestream_end = buf_p + v;
976 
977  }
978 
979  avctx->execute(avctx,
980  decode_slice,
981  f->slices,
982  NULL,
983  f->slice_count,
984  sizeof(*f->slices));
985 
986  for (int i = f->slice_count - 1; i >= 0; i--) {
987  FFV1SliceContext *sc = &f->slices[i];
988  if (sc->slice_damaged && f->last_picture.f) {
990  const uint8_t *src[4];
991  uint8_t *dst[4];
992  ff_progress_frame_await(&f->last_picture, INT_MAX);
993  for (int j = 0; j < desc->nb_components; j++) {
994  int pixshift = desc->comp[j].depth > 8;
995  int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
996  int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
997  dst[j] = p->data[j] + p->linesize[j] *
998  (sc->slice_y >> sv) + ((sc->slice_x >> sh) << pixshift);
999  src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j] *
1000  (sc->slice_y >> sv) + ((sc->slice_x >> sh) << pixshift);
1001 
1002  }
1003 
1005  f->last_picture.f->linesize,
1006  avctx->pix_fmt,
1007  sc->slice_width,
1008  sc->slice_height);
1009 
1010  f->slice_damaged[i] = 1;
1011  }
1012  }
1013  ff_progress_frame_report(&f->picture, INT_MAX);
1014 
1015  ff_progress_frame_unref(&f->last_picture);
1016  if ((ret = av_frame_ref(rframe, f->picture.f)) < 0)
1017  return ret;
1018 
1019  *got_frame = 1;
1020 
1021  return buf_size;
1022 }
1023 
1024 #if HAVE_THREADS
1026 {
1027  FFV1Context *fsrc = src->priv_data;
1028  FFV1Context *fdst = dst->priv_data;
1029 
1030  if (dst == src)
1031  return 0;
1032 
1033  fdst->version = fsrc->version;
1034  fdst->micro_version = fsrc->micro_version;
1035  fdst->chroma_planes = fsrc->chroma_planes;
1036  fdst->chroma_h_shift = fsrc->chroma_h_shift;
1037  fdst->chroma_v_shift = fsrc->chroma_v_shift;
1038  fdst->transparency = fsrc->transparency;
1039  fdst->plane_count = fsrc->plane_count;
1040  fdst->ac = fsrc->ac;
1041  fdst->colorspace = fsrc->colorspace;
1042 
1043  fdst->ec = fsrc->ec;
1044  fdst->intra = fsrc->intra;
1045  fdst->key_frame_ok = fsrc->key_frame_ok;
1046 
1047  fdst->packed_at_lsb = fsrc->packed_at_lsb;
1048  fdst->slice_count = fsrc->slice_count;
1049  fdst->use32bit = fsrc->use32bit;
1050  memcpy(fdst->state_transition, fsrc->state_transition,
1051  sizeof(fdst->state_transition));
1052 
1053  // in version 1 there is a single per-keyframe quant table, so
1054  // we need to propagate it between threads
1055  if (fsrc->version < 2)
1056  memcpy(fdst->quant_tables[0], fsrc->quant_tables[0], sizeof(fsrc->quant_tables[0]));
1057 
1058  for (int i = 0; i < fdst->num_h_slices * fdst->num_v_slices; i++) {
1059  FFV1SliceContext *sc = &fdst->slices[i];
1060  const FFV1SliceContext *sc0 = &fsrc->slices[i];
1061 
1062  av_refstruct_replace(&sc->plane, sc0->plane);
1063 
1064  if (fsrc->version < 3) {
1065  sc->slice_x = sc0->slice_x;
1066  sc->slice_y = sc0->slice_y;
1067  sc->slice_width = sc0->slice_width;
1068  sc->slice_height = sc0->slice_height;
1069  }
1070  }
1071 
1073 
1074  av_assert1(fdst->max_slice_count == fsrc->max_slice_count);
1075 
1076  ff_progress_frame_replace(&fdst->picture, &fsrc->picture);
1077 
1078  return 0;
1079 }
1080 #endif
1081 
1083 {
1084  FFV1Context *const s = avctx->priv_data;
1085 
1086  ff_progress_frame_unref(&s->picture);
1087  ff_progress_frame_unref(&s->last_picture);
1088 
1089  return ff_ffv1_close(avctx);
1090 }
1091 
1093  .p.name = "ffv1",
1094  CODEC_LONG_NAME("FFmpeg video codec #1"),
1095  .p.type = AVMEDIA_TYPE_VIDEO,
1096  .p.id = AV_CODEC_ID_FFV1,
1097  .priv_data_size = sizeof(FFV1Context),
1098  .init = decode_init,
1099  .close = ffv1_decode_close,
1102  .p.capabilities = AV_CODEC_CAP_DR1 |
1104  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1106 };
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:546
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1893
read_extra_header
static int read_extra_header(FFV1Context *f)
Definition: ffv1dec.c:411
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:525
FFV1Context::chroma_v_shift
int chroma_v_shift
Definition: ffv1.h:118
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
FFV1SliceContext::slice_height
int slice_height
Definition: ffv1.h:77
FFV1Context::key_frame_ok
int key_frame_ok
Definition: ffv1.h:139
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
update_vlc_state
static void update_vlc_state(VlcState *const state, const int v)
Definition: ffv1.h:196
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:266
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
MAX_OVERREAD
#define MAX_OVERREAD
Definition: lagarithrac.h:49
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:202
FFV1SliceContext::plane
PlaneContext * plane
Definition: ffv1.h:88
FFV1Context::ec
int ec
Definition: ffv1.h:137
int64_t
long long int64_t
Definition: coverity.c:34
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:538
get_sr_golomb
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
Definition: golomb.h:532
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:403
pixdesc.h
ff_ffv1_common_init
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
Definition: ffv1.c:36
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:545
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:539
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:540
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:722
decode_plane
static int decode_plane(FFV1Context *f, FFV1SliceContext *sc, GetBitContext *gb, uint8_t *src, int w, int h, int stride, int plane_index, int pixel_stride, int ac)
Definition: ffv1dec.c:120
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
This function sets up the ProgressFrame, i.e.
Definition: decode.c:1848
rangecoder.h
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:502
PlaneContext::state
uint8_t(* state)[CONTEXT_SIZE]
Definition: ffv1.h:66
FFCodec
Definition: codec_internal.h:127
FFV1Context::num_h_slices
int num_h_slices
Definition: ffv1.h:152
RangeCoder::bytestream_end
uint8_t * bytestream_end
Definition: rangecoder.h:44
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:675
read_quant_table
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
Definition: ffv1dec.c:366
decode_line
static av_always_inline int RENAME() decode_line(FFV1Context *f, FFV1SliceContext *sc, GetBitContext *gb, int w, TYPE *sample[2], int plane_index, int bits, int ac)
Definition: ffv1dec_template.c:26
AC_RANGE_CUSTOM_TAB
#define AC_RANGE_CUSTOM_TAB
Definition: ffv1.h:53
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:541
FFV1Context::quant_tables
int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][MAX_QUANT_TABLE_SIZE]
Definition: ffv1.h:129
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
FFV1Context::chroma_h_shift
int chroma_h_shift
Definition: ffv1.h:118
FFV1SliceContext::slice_x
int slice_x
Definition: ffv1.h:78
ff_ffv1_clear_slice_state
void ff_ffv1_clear_slice_state(const FFV1Context *f, FFV1SliceContext *sc)
Definition: ffv1.c:200
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1415
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:482
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:424
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:667
crc.h
golomb.h
exp golomb vlc stuff
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:537
AV_FIELD_TT
@ AV_FIELD_TT
Top coded_first, top displayed first.
Definition: defs.h:203
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:520
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:518
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:547
ffv1_decode_close
static av_cold int ffv1_decode_close(AVCodecContext *avctx)
Definition: ffv1dec.c:1082
GetBitContext
Definition: get_bits.h:108
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:500
planes
static const struct @469 planes[]
av_noinline
#define av_noinline
Definition: attributes.h:72
MAX_SLICES
#define MAX_SLICES
Definition: d3d12va_hevc.c:33
CONTEXT_SIZE
#define CONTEXT_SIZE
Definition: ffv1.h:44
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:486
get_symbol_inline
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:44
FFV1Context::chroma_planes
int chroma_planes
Definition: ffv1.h:117
PlaneContext::context_count
int context_count
Definition: ffv1.h:65
AVRational::num
int num
Numerator.
Definition: rational.h:59
progressframe.h
AV_FIELD_TB
@ AV_FIELD_TB
Top coded first, bottom displayed first.
Definition: defs.h:205
refstruct.h
decode_rgb_frame
static int RENAME() decode_rgb_frame(FFV1Context *f, FFV1SliceContext *sc, GetBitContext *gb, uint8_t *src[4], int w, int h, int stride[4])
Definition: ffv1dec_template.c:134
av_refstruct_allocz
static void * av_refstruct_allocz(size_t size)
Equivalent to av_refstruct_alloc_ext(size, 0, NULL, NULL)
Definition: refstruct.h:105
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:505
avassert.h
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:69
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:514
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:654
FFV1SliceContext::sample_buffer
int16_t * sample_buffer
Definition: ffv1.h:73
FFV1Context::use32bit
int use32bit
Definition: ffv1.h:135
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:522
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:538
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP14
#define AV_PIX_FMT_GBRAP14
Definition: pixfmt.h:524
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:523
FFV1Context::slice_count
int slice_count
Definition: ffv1.h:149
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:515
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: ffv1dec.c:858
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
FFV1Context::max_slice_count
int max_slice_count
Definition: ffv1.h:150
bits
uint8_t bits
Definition: vp3data.h:128
FFV1Context::intra
int intra
Definition: ffv1.h:138
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
is_input_end
static int is_input_end(RangeCoder *c, GetBitContext *gb, int ac)
Definition: ffv1dec.c:98
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:544
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:499
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
read_quant_tables
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
Definition: ffv1dec.c:393
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:513
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1876
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
get_bits.h
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:485
fold
static av_always_inline int fold(int diff, int bits)
Definition: ffv1.h:185
FFV1Context::ac
int ac
1=range coder <-> 0=golomb rice
Definition: ffv1.h:128
get_vlc_symbol
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
Definition: ffv1dec.c:73
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FFV1Context::plane_count
int plane_count
Definition: ffv1.h:127
FFV1Context::slice_damaged
uint8_t * slice_damaged
Definition: ffv1.h:162
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:483
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
quant_table
static const int16_t quant_table[64]
Definition: intrax8.c:517
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:521
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:540
get_symbol
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:68
NULL
#define NULL
Definition: coverity.c:32
PlaneContext::vlc_state
VlcState * vlc_state
Definition: ffv1.h:67
AC_GOLOMB_RICE
#define AC_GOLOMB_RICE
Definition: ffv1.h:51
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
FFV1Context::num_v_slices
int num_v_slices
Definition: ffv1.h:151
FFV1Context::colorspace
int colorspace
Definition: ffv1.h:133
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_ffv1_decoder
const FFCodec ff_ffv1_decoder
Definition: ffv1dec.c:1092
FFV1Context::slices
FFV1SliceContext * slices
Definition: ffv1.h:154
FFV1Context::state_transition
uint8_t state_transition[256]
Definition: ffv1.h:131
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:504
mathops.h
PlaneContext
Definition: ffv1.h:63
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:305
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:503
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:517
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VlcState
Definition: ffv1.h:56
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
FFV1SliceContext::slice_width
int slice_width
Definition: ffv1.h:76
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
AV_CODEC_ID_FFV1
@ AV_CODEC_ID_FFV1
Definition: codec_id.h:85
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:505
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:401
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:507
ff_ffv1_close
av_cold int ff_ffv1_close(AVCodecContext *avctx)
Definition: ffv1.c:225
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:565
sample
#define sample
Definition: flacdsp_template.c:44
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
slice_set_damaged
static void slice_set_damaged(FFV1Context *f, FFV1SliceContext *sc)
Definition: ffv1dec.c:256
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:509
RangeCoder::bytestream
uint8_t * bytestream
Definition: rangecoder.h:43
FFV1Context::picture
ProgressFrame picture
Definition: ffv1.h:123
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:475
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
FFV1SliceContext::slice_rct_by_coef
int slice_rct_by_coef
Definition: ffv1.h:84
av_crc_get_table
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:542
ff_ffv1_init_slice_state
av_cold int ff_ffv1_init_slice_state(const FFV1Context *f, FFV1SliceContext *sc)
Definition: ffv1.c:74
PlaneContext::quant_table_index
int quant_table_index
Definition: ffv1.h:64
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1612
FFV1SliceContext::c
RangeCoder c
Definition: ffv1.h:90
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
FFV1SliceContext::slice_rct_ry_coef
int slice_rct_ry_coef
Definition: ffv1.h:85
av_flatten
#define av_flatten
Definition: attributes.h:96
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:519
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ffv1.h
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
FFV1SliceContext
Definition: ffv1.h:72
len
int len
Definition: vorbis_enc_data.h:426
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:118
AV_CRC_32_IEEE
@ AV_CRC_32_IEEE
Definition: crc.h:52
AVCodecContext::height
int height
Definition: avcodec.h:632
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:671
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:662
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:501
MAX_CONTEXT_INPUTS
#define MAX_CONTEXT_INPUTS
Definition: ffv1.h:49
FFV1Context::packed_at_lsb
int packed_at_lsb
Definition: ffv1.h:144
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:479
decode_slice_header
static int decode_slice_header(const FFV1Context *f, FFV1SliceContext *sc, AVFrame *frame)
Definition: ffv1dec.c:167
FFV1SliceContext::slice_y
int slice_y
Definition: ffv1.h:79
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:539
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
ff_slice_coord
int ff_slice_coord(const FFV1Context *f, int width, int sx, int num_h_slices, int chroma_shift)
This is intended for both width and height.
Definition: ffv1.c:129
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:506
U
#define U(x)
Definition: vpx_arith.h:37
ff_ffv1_planes_alloc
PlaneContext * ff_ffv1_planes_alloc(void)
Definition: ffv1.c:68
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1883
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:511
ff_ffv1_allocate_initial_states
int ff_ffv1_allocate_initial_states(FFV1Context *f)
Definition: ffv1.c:185
AVCodecContext
main external API structure.
Definition: avcodec.h:451
RangeCoder::bytestream_start
uint8_t * bytestream_start
Definition: rangecoder.h:42
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1631
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: ffv1dec.c:841
av_crc
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:543
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
temp
else temp
Definition: vf_mcdeint.c:263
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1414
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
MAX_QUANT_TABLES
#define MAX_QUANT_TABLES
Definition: ffv1.h:46
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFV1Context
Definition: ffv1.h:109
FFV1Context::transparency
int transparency
Definition: ffv1.h:119
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
ProgressFrame
The ProgressFrame structure.
Definition: progressframe.h:73
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
FFV1SliceContext::run_index
int run_index
Definition: ffv1.h:82
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
ffv1dec_template.c
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:632
ff_ffv1_init_slice_contexts
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
Definition: ffv1.c:142
ff_log2_run
const uint8_t ff_log2_run[41]
Definition: mathtables.c:116
imgutils.h
FFV1SliceContext::slice_reset_contexts
int slice_reset_contexts
Definition: ffv1.h:97
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:448
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
FFV1Context::micro_version
int micro_version
Definition: ffv1.h:115
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:508
h
h
Definition: vp9dsp_template.c:2070
RangeCoder
Definition: mss3.c:63
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:512
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
width
#define width
Definition: dsp.h:85
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:484
av_image_copy
void av_image_copy(uint8_t *const dst_data[4], const int dst_linesizes[4], const uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
FFV1SliceContext::ac_byte_count
int ac_byte_count
number of bytes used for AC coding
Definition: ffv1.h:92
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
FFV1SliceContext::slice_damaged
int slice_damaged
Definition: ffv1.h:98
FFV1SliceContext::slice_coding_mode
int slice_coding_mode
Definition: ffv1.h:83
state
static struct @468 state
src
#define src
Definition: vp8dsp.c:248
FFV1Context::version
int version
Definition: ffv1.h:114
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:510