FFmpeg
ffv1dec.c
Go to the documentation of this file.
1 /*
2  * FFV1 decoder
3  *
4  * Copyright (c) 2003-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * FF Video Codec 1 (a lossless codec) decoder
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/crc.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/pixdesc.h"
33 #include "avcodec.h"
34 #include "codec_internal.h"
35 #include "get_bits.h"
36 #include "rangecoder.h"
37 #include "golomb.h"
38 #include "mathops.h"
39 #include "ffv1.h"
40 #include "progressframe.h"
41 #include "libavutil/refstruct.h"
42 #include "thread.h"
43 #include "decode.h"
44 
45 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
46  int is_signed)
47 {
48  if (get_rac(c, state + 0))
49  return 0;
50  else {
51  int e;
52  unsigned a;
53  e = 0;
54  while (get_rac(c, state + 1 + FFMIN(e, 9))) { // 1..10
55  e++;
56  if (e > 31)
57  return AVERROR_INVALIDDATA;
58  }
59 
60  a = 1;
61  for (int i = e - 1; i >= 0; i--)
62  a += a + get_rac(c, state + 22 + FFMIN(i, 9)); // 22..31
63 
64  e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); // 11..21
65  return (a ^ e) - e;
66  }
67 }
68 
69 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
70 {
71  return get_symbol_inline(c, state, is_signed);
72 }
73 
74 static inline int get_vlc_symbol(GetBitContext *gb, VlcState *const state,
75  int bits)
76 {
77  int k, i, v, ret;
78 
79  i = state->count;
80  k = 0;
81  while (i < state->error_sum) { // FIXME: optimize
82  k++;
83  i += i;
84  }
85 
86  v = get_sr_golomb(gb, k, 12, bits);
87  ff_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
88  v, state->bias, state->error_sum, state->drift, state->count, k);
89 
90  v ^= ((2 * state->drift + state->count) >> 31);
91 
92  ret = fold(v + state->bias, bits);
93 
95 
96  return ret;
97 }
98 
99 static int is_input_end(RangeCoder *c, GetBitContext *gb, int ac)
100 {
101  if (ac != AC_GOLOMB_RICE) {
102  if (c->overread > MAX_OVERREAD)
103  return AVERROR_INVALIDDATA;
104  } else {
105  if (get_bits_left(gb) < 1)
106  return AVERROR_INVALIDDATA;
107  }
108  return 0;
109 }
110 
111 #define TYPE int16_t
112 #define RENAME(name) name
113 #include "ffv1dec_template.c"
114 #undef TYPE
115 #undef RENAME
116 
117 #define TYPE int32_t
118 #define RENAME(name) name ## 32
119 #include "ffv1dec_template.c"
120 
122  GetBitContext *gb,
123  uint8_t *src, int w, int h, int stride, int plane_index,
124  int pixel_stride, int ac)
125 {
126  int x, y;
127  int16_t *sample[2];
128  sample[0] = sc->sample_buffer + 3;
129  sample[1] = sc->sample_buffer + w + 6 + 3;
130 
131  sc->run_index = 0;
132 
133  memset(sc->sample_buffer, 0, 2 * (w + 6) * sizeof(*sc->sample_buffer));
134 
135  for (y = 0; y < h; y++) {
136  int16_t *temp = sample[0]; // FIXME: try a normal buffer
137 
138  sample[0] = sample[1];
139  sample[1] = temp;
140 
141  sample[1][-1] = sample[0][0];
142  sample[0][w] = sample[0][w - 1];
143 
144  if (f->avctx->bits_per_raw_sample <= 8) {
145  int ret = decode_line(f, sc, gb, w, sample, plane_index, 8, ac);
146  if (ret < 0)
147  return ret;
148  for (x = 0; x < w; x++)
149  src[x*pixel_stride + stride * y] = sample[1][x];
150  } else {
151  int ret = decode_line(f, sc, gb, w, sample, plane_index, f->avctx->bits_per_raw_sample, ac);
152  if (ret < 0)
153  return ret;
154  if (f->packed_at_lsb) {
155  for (x = 0; x < w; x++) {
156  ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x];
157  }
158  } else {
159  for (x = 0; x < w; x++) {
160  ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x] << (16 - f->avctx->bits_per_raw_sample) | ((uint16_t **)sample)[1][x] >> (2 * f->avctx->bits_per_raw_sample - 16);
161  }
162  }
163  }
164  }
165  return 0;
166 }
167 
170 {
171  RangeCoder *c = &sc->c;
172  uint8_t state[CONTEXT_SIZE];
173  unsigned ps, context_count;
174  int sx, sy, sw, sh;
175 
176  memset(state, 128, sizeof(state));
177  sx = get_symbol(c, state, 0);
178  sy = get_symbol(c, state, 0);
179  sw = get_symbol(c, state, 0) + 1U;
180  sh = get_symbol(c, state, 0) + 1U;
181 
182  av_assert0(f->version > 2);
183 
184 
185  if (sx < 0 || sy < 0 || sw <= 0 || sh <= 0)
186  return AVERROR_INVALIDDATA;
187  if (sx > f->num_h_slices - sw || sy > f->num_v_slices - sh)
188  return AVERROR_INVALIDDATA;
189 
190  sc->slice_x = ff_slice_coord(f, f->width , sx , f->num_h_slices, f->chroma_h_shift);
191  sc->slice_y = ff_slice_coord(f, f->height, sy , f->num_v_slices, f->chroma_v_shift);
192  sc->slice_width = ff_slice_coord(f, f->width , sx + sw, f->num_h_slices, f->chroma_h_shift) - sc->slice_x;
193  sc->slice_height = ff_slice_coord(f, f->height, sy + sh, f->num_v_slices, f->chroma_v_shift) - sc->slice_y;
194 
195  av_assert0((unsigned)sc->slice_width <= f->width &&
196  (unsigned)sc->slice_height <= f->height);
197  av_assert0 ( (unsigned)sc->slice_x + (uint64_t)sc->slice_width <= f->width
198  && (unsigned)sc->slice_y + (uint64_t)sc->slice_height <= f->height);
199 
200  if (f->ac == AC_GOLOMB_RICE && sc->slice_width >= (1<<23))
201  return AVERROR_INVALIDDATA;
202 
203  for (unsigned i = 0; i < f->plane_count; i++) {
204  PlaneContext * const p = &sc->plane[i];
205  int idx = get_symbol(c, state, 0);
206  if (idx >= (unsigned)f->quant_table_count) {
207  av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
208  return -1;
209  }
210  p->quant_table_index = idx;
211  context_count = f->context_count[idx];
212 
213  if (p->context_count < context_count) {
214  av_freep(&p->state);
215  av_freep(&p->vlc_state);
216  }
217  p->context_count = context_count;
218  }
219 
220  ps = get_symbol(c, state, 0);
221  if (ps == 1) {
224  } else if (ps == 2) {
227  } else if (ps == 3) {
228  frame->flags &= ~AV_FRAME_FLAG_INTERLACED;
229  }
230  frame->sample_aspect_ratio.num = get_symbol(c, state, 0);
231  frame->sample_aspect_ratio.den = get_symbol(c, state, 0);
232 
233  if (av_image_check_sar(f->width, f->height,
234  frame->sample_aspect_ratio) < 0) {
235  av_log(f->avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
236  frame->sample_aspect_ratio.num,
237  frame->sample_aspect_ratio.den);
238  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
239  }
240 
241  if (f->version > 3) {
243  sc->slice_coding_mode = get_symbol(c, state, 0);
244  if (sc->slice_coding_mode != 1 && f->colorspace == 1) {
245  sc->slice_rct_by_coef = get_symbol(c, state, 0);
246  sc->slice_rct_ry_coef = get_symbol(c, state, 0);
247  if ((uint64_t)sc->slice_rct_by_coef + (uint64_t)sc->slice_rct_ry_coef > 4) {
248  av_log(f->avctx, AV_LOG_ERROR, "slice_rct_y_coef out of range\n");
249  return AVERROR_INVALIDDATA;
250  }
251  }
252  }
253 
254  return 0;
255 }
256 
258 {
259  sc->slice_damaged = 1;
260 
261  // only set this for frame threading, as for slice threading its value is
262  // not used and setting it would be a race
263  if (f->avctx->active_thread_type & FF_THREAD_FRAME)
264  f->frame_damaged = 1;
265 }
266 
267 static int decode_slice(AVCodecContext *c, void *arg)
268 {
269  FFV1Context *f = c->priv_data;
270  FFV1SliceContext *sc = arg;
271  int width, height, x, y, ret;
272  const int ps = av_pix_fmt_desc_get(f->pix_fmt)->comp[0].step;
273  AVFrame * const p = f->picture.f;
274  const int si = sc - f->slices;
275  GetBitContext gb;
276  int ac = f->ac || sc->slice_coding_mode == 1;
277 
278  if (!(p->flags & AV_FRAME_FLAG_KEY) && f->last_picture.f)
279  ff_progress_frame_await(&f->last_picture, si);
280 
281  if (f->slice_damaged[si])
282  slice_set_damaged(f, sc);
283 
284  sc->slice_rct_by_coef = 1;
285  sc->slice_rct_ry_coef = 1;
286 
287  if (f->version > 2) {
288  if (ff_ffv1_init_slice_state(f, sc) < 0)
289  return AVERROR(ENOMEM);
290  if (decode_slice_header(f, sc, p) < 0) {
291  sc->slice_x = sc->slice_y = sc->slice_height = sc->slice_width = 0;
292  slice_set_damaged(f, sc);
293  return AVERROR_INVALIDDATA;
294  }
295  }
296  if ((ret = ff_ffv1_init_slice_state(f, sc)) < 0)
297  return ret;
298  if ((p->flags & AV_FRAME_FLAG_KEY) || sc->slice_reset_contexts) {
300  } else if (sc->slice_damaged) {
301  return AVERROR_INVALIDDATA;
302  }
303 
304  width = sc->slice_width;
305  height = sc->slice_height;
306  x = sc->slice_x;
307  y = sc->slice_y;
308 
309  if (ac == AC_GOLOMB_RICE) {
310  if (f->combined_version >= 0x30002)
311  get_rac(&sc->c, (uint8_t[]) { 129 });
312  sc->ac_byte_count = f->version > 2 || (!x && !y) ? sc->c.bytestream - sc->c.bytestream_start - 1 : 0;
313  init_get_bits(&gb,
314  sc->c.bytestream_start + sc->ac_byte_count,
315  (sc->c.bytestream_end - sc->c.bytestream_start - sc->ac_byte_count) * 8);
316  }
317 
318  av_assert1(width && height);
319  if (f->colorspace == 0 && (f->chroma_planes || !f->transparency)) {
320  const int chroma_width = AV_CEIL_RSHIFT(width, f->chroma_h_shift);
321  const int chroma_height = AV_CEIL_RSHIFT(height, f->chroma_v_shift);
322  const int cx = x >> f->chroma_h_shift;
323  const int cy = y >> f->chroma_v_shift;
324  decode_plane(f, sc, &gb, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 1, ac);
325 
326  if (f->chroma_planes) {
327  decode_plane(f, sc, &gb, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1, ac);
328  decode_plane(f, sc, &gb, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1, ac);
329  }
330  if (f->transparency)
331  decode_plane(f, sc, &gb, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2, 1, ac);
332  } else if (f->colorspace == 0) {
333  decode_plane(f, sc, &gb, p->data[0] + ps*x + y*p->linesize[0] , width, height, p->linesize[0], 0, 2, ac);
334  decode_plane(f, sc, &gb, p->data[0] + ps*x + y*p->linesize[0] + 1, width, height, p->linesize[0], 1, 2, ac);
335  } else if (f->use32bit) {
336  uint8_t *planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
337  p->data[1] + ps * x + y * p->linesize[1],
338  p->data[2] + ps * x + y * p->linesize[2],
339  p->data[3] + ps * x + y * p->linesize[3] };
340  decode_rgb_frame32(f, sc, &gb, planes, width, height, p->linesize);
341  } else {
342  uint8_t *planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
343  p->data[1] + ps * x + y * p->linesize[1],
344  p->data[2] + ps * x + y * p->linesize[2],
345  p->data[3] + ps * x + y * p->linesize[3] };
346  decode_rgb_frame(f, sc, &gb, planes, width, height, p->linesize);
347  }
348  if (ac != AC_GOLOMB_RICE && f->version > 2) {
349  int v;
350  get_rac(&sc->c, (uint8_t[]) { 129 });
351  v = sc->c.bytestream_end - sc->c.bytestream - 2 - 5*!!f->ec;
352  if (v) {
353  av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
354  slice_set_damaged(f, sc);
355  }
356  }
357 
358  if (sc->slice_damaged && (f->avctx->err_recognition & AV_EF_EXPLODE))
359  return AVERROR_INVALIDDATA;
360 
361  if ((c->active_thread_type & FF_THREAD_FRAME) && !f->frame_damaged)
362  ff_progress_frame_report(&f->picture, si);
363 
364  return 0;
365 }
366 
367 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
368 {
369  int v;
370  int i = 0;
371  uint8_t state[CONTEXT_SIZE];
372 
373  memset(state, 128, sizeof(state));
374 
375  for (v = 0; i < 128; v++) {
376  unsigned len = get_symbol(c, state, 0) + 1U;
377 
378  if (len > 128 - i || !len)
379  return AVERROR_INVALIDDATA;
380 
381  while (len--) {
382  quant_table[i] = scale * v;
383  i++;
384  }
385  }
386 
387  for (i = 1; i < 128; i++)
388  quant_table[256 - i] = -quant_table[i];
389  quant_table[128] = -quant_table[127];
390 
391  return 2 * v - 1;
392 }
393 
395  int16_t quant_table[MAX_CONTEXT_INPUTS][256])
396 {
397  int i;
398  int context_count = 1;
399 
400  for (i = 0; i < 5; i++) {
401  int ret = read_quant_table(c, quant_table[i], context_count);
402  if (ret < 0)
403  return ret;
404  context_count *= ret;
405  if (context_count > 32768U) {
406  return AVERROR_INVALIDDATA;
407  }
408  }
409  return (context_count + 1) / 2;
410 }
411 
413 {
414  RangeCoder c;
415  uint8_t state[CONTEXT_SIZE];
416  int ret;
417  uint8_t state2[32][CONTEXT_SIZE];
418  unsigned crc = 0;
419 
420  memset(state2, 128, sizeof(state2));
421  memset(state, 128, sizeof(state));
422 
423  ff_init_range_decoder(&c, f->avctx->extradata, f->avctx->extradata_size);
424  ff_build_rac_states(&c, 0.05 * (1LL << 32), 256 - 8);
425 
426  f->version = get_symbol(&c, state, 0);
427  if (f->version < 2) {
428  av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
429  return AVERROR_INVALIDDATA;
430  }
431  if (f->version > 4) {
432  av_log(f->avctx, AV_LOG_ERROR, "unsupported version %d\n",
433  f->version);
434  return AVERROR_PATCHWELCOME;
435  }
436  f->combined_version = f->version << 16;
437  if (f->version > 2) {
438  c.bytestream_end -= 4;
439  f->micro_version = get_symbol(&c, state, 0);
440  if (f->micro_version < 0 || f->micro_version > 65535)
441  return AVERROR_INVALIDDATA;
442  f->combined_version += f->micro_version;
443  }
444  f->ac = get_symbol(&c, state, 0);
445 
446  if (f->ac == AC_RANGE_CUSTOM_TAB) {
447  for (int i = 1; i < 256; i++)
448  f->state_transition[i] = get_symbol(&c, state, 1) + c.one_state[i];
449  }
450 
451  f->colorspace = get_symbol(&c, state, 0); //YUV cs type
452  f->avctx->bits_per_raw_sample = get_symbol(&c, state, 0);
453  f->chroma_planes = get_rac(&c, state);
454  f->chroma_h_shift = get_symbol(&c, state, 0);
455  f->chroma_v_shift = get_symbol(&c, state, 0);
456  f->transparency = get_rac(&c, state);
457  f->plane_count = 1 + (f->chroma_planes || f->version<4) + f->transparency;
458  f->num_h_slices = 1 + get_symbol(&c, state, 0);
459  f->num_v_slices = 1 + get_symbol(&c, state, 0);
460 
461  if (f->chroma_h_shift > 4U || f->chroma_v_shift > 4U) {
462  av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
463  f->chroma_h_shift, f->chroma_v_shift);
464  return AVERROR_INVALIDDATA;
465  }
466 
467  if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices ||
468  f->num_v_slices > (unsigned)f->height || !f->num_v_slices
469  ) {
470  av_log(f->avctx, AV_LOG_ERROR, "slice count invalid\n");
471  return AVERROR_INVALIDDATA;
472  }
473 
474  if (f->num_h_slices > MAX_SLICES / f->num_v_slices) {
475  av_log(f->avctx, AV_LOG_ERROR, "slice count unsupported\n");
476  return AVERROR_PATCHWELCOME;
477  }
478 
479  f->quant_table_count = get_symbol(&c, state, 0);
480  if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES || !f->quant_table_count) {
481  av_log(f->avctx, AV_LOG_ERROR, "quant table count %d is invalid\n", f->quant_table_count);
482  f->quant_table_count = 0;
483  return AVERROR_INVALIDDATA;
484  }
485 
486  for (int i = 0; i < f->quant_table_count; i++) {
487  f->context_count[i] = read_quant_tables(&c, f->quant_tables[i]);
488  if (f->context_count[i] < 0) {
489  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
490  return AVERROR_INVALIDDATA;
491  }
492  }
494  return ret;
495 
496  for (int i = 0; i < f->quant_table_count; i++)
497  if (get_rac(&c, state)) {
498  for (int j = 0; j < f->context_count[i]; j++)
499  for (int k = 0; k < CONTEXT_SIZE; k++) {
500  int pred = j ? f->initial_states[i][j - 1][k] : 128;
501  f->initial_states[i][j][k] =
502  (pred + get_symbol(&c, state2[k], 1)) & 0xFF;
503  }
504  }
505 
506  if (f->version > 2) {
507  f->ec = get_symbol(&c, state, 0);
508  if (f->ec >= 2)
509  f->crcref = 0x7a8c4079;
510  if (f->combined_version >= 0x30003)
511  f->intra = get_symbol(&c, state, 0);
512  }
513 
514  if (f->version > 2) {
515  unsigned v;
516  v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), f->crcref,
517  f->avctx->extradata, f->avctx->extradata_size);
518  if (v != f->crcref || f->avctx->extradata_size < 4) {
519  av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
520  return AVERROR_INVALIDDATA;
521  }
522  crc = AV_RB32(f->avctx->extradata + f->avctx->extradata_size - 4);
523  }
524 
525  if (f->avctx->debug & FF_DEBUG_PICT_INFO)
526  av_log(f->avctx, AV_LOG_DEBUG,
527  "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
528  f->version, f->micro_version,
529  f->ac,
530  f->colorspace,
531  f->avctx->bits_per_raw_sample,
532  f->chroma_planes, f->chroma_h_shift, f->chroma_v_shift,
533  f->transparency,
534  f->num_h_slices, f->num_v_slices,
535  f->quant_table_count,
536  f->ec,
537  f->intra,
538  crc
539  );
540  return 0;
541 }
542 
544 {
545  enum AVPixelFormat pix_fmts[] = {
546  f->pix_fmt,
548  };
549 
550  return ff_get_format(f->avctx, pix_fmts);
551 }
552 
554 {
555  uint8_t state[CONTEXT_SIZE];
556  int context_count = -1; //-1 to avoid warning
557 
558  memset(state, 128, sizeof(state));
559 
560  if (f->version < 2) {
561  int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
562  unsigned v= get_symbol(c, state, 0);
563  if (v >= 2) {
564  av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
565  return AVERROR_INVALIDDATA;
566  }
567  f->version = v;
568  f->ac = get_symbol(c, state, 0);
569 
570  if (f->ac == AC_RANGE_CUSTOM_TAB) {
571  for (int i = 1; i < 256; i++) {
572  int st = get_symbol(c, state, 1) + c->one_state[i];
573  if (st < 1 || st > 255) {
574  av_log(f->avctx, AV_LOG_ERROR, "invalid state transition %d\n", st);
575  return AVERROR_INVALIDDATA;
576  }
577  f->state_transition[i] = st;
578  }
579  }
580 
581  colorspace = get_symbol(c, state, 0); //YUV cs type
582  bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
583  chroma_planes = get_rac(c, state);
584  chroma_h_shift = get_symbol(c, state, 0);
585  chroma_v_shift = get_symbol(c, state, 0);
586  transparency = get_rac(c, state);
587  if (colorspace == 0 && f->avctx->skip_alpha)
588  transparency = 0;
589 
590  if (f->plane_count) {
591  if (colorspace != f->colorspace ||
592  bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
593  chroma_planes != f->chroma_planes ||
594  chroma_h_shift != f->chroma_h_shift ||
595  chroma_v_shift != f->chroma_v_shift ||
596  transparency != f->transparency) {
597  av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
598  return AVERROR_INVALIDDATA;
599  }
600  }
601 
602  if (chroma_h_shift > 4U || chroma_v_shift > 4U) {
603  av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
604  chroma_h_shift, chroma_v_shift);
605  return AVERROR_INVALIDDATA;
606  }
607 
608  f->colorspace = colorspace;
609  f->avctx->bits_per_raw_sample = bits_per_raw_sample;
610  f->chroma_planes = chroma_planes;
611  f->chroma_h_shift = chroma_h_shift;
612  f->chroma_v_shift = chroma_v_shift;
613  f->transparency = transparency;
614 
615  f->plane_count = 2 + f->transparency;
616  }
617 
618  if (f->colorspace == 0) {
619  if (!f->transparency && !f->chroma_planes) {
620  if (f->avctx->bits_per_raw_sample <= 8)
621  f->pix_fmt = AV_PIX_FMT_GRAY8;
622  else if (f->avctx->bits_per_raw_sample == 9) {
623  f->packed_at_lsb = 1;
624  f->pix_fmt = AV_PIX_FMT_GRAY9;
625  } else if (f->avctx->bits_per_raw_sample == 10) {
626  f->packed_at_lsb = 1;
627  f->pix_fmt = AV_PIX_FMT_GRAY10;
628  } else if (f->avctx->bits_per_raw_sample == 12) {
629  f->packed_at_lsb = 1;
630  f->pix_fmt = AV_PIX_FMT_GRAY12;
631  } else if (f->avctx->bits_per_raw_sample == 14) {
632  f->packed_at_lsb = 1;
633  f->pix_fmt = AV_PIX_FMT_GRAY14;
634  } else if (f->avctx->bits_per_raw_sample == 16) {
635  f->packed_at_lsb = 1;
636  f->pix_fmt = AV_PIX_FMT_GRAY16;
637  } else if (f->avctx->bits_per_raw_sample < 16) {
638  f->pix_fmt = AV_PIX_FMT_GRAY16;
639  } else
640  return AVERROR(ENOSYS);
641  } else if (f->transparency && !f->chroma_planes) {
642  if (f->avctx->bits_per_raw_sample <= 8)
643  f->pix_fmt = AV_PIX_FMT_YA8;
644  else
645  return AVERROR(ENOSYS);
646  } else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
647  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
648  case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P; break;
649  case 0x01: f->pix_fmt = AV_PIX_FMT_YUV440P; break;
650  case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P; break;
651  case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P; break;
652  case 0x20: f->pix_fmt = AV_PIX_FMT_YUV411P; break;
653  case 0x22: f->pix_fmt = AV_PIX_FMT_YUV410P; break;
654  }
655  } else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
656  switch(16*f->chroma_h_shift + f->chroma_v_shift) {
657  case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P; break;
658  case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P; break;
659  case 0x11: f->pix_fmt = AV_PIX_FMT_YUVA420P; break;
660  }
661  } else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
662  f->packed_at_lsb = 1;
663  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
664  case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P9; break;
665  case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P9; break;
666  case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P9; break;
667  }
668  } else if (f->avctx->bits_per_raw_sample == 9 && f->transparency) {
669  f->packed_at_lsb = 1;
670  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
671  case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P9; break;
672  case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P9; break;
673  case 0x11: f->pix_fmt = AV_PIX_FMT_YUVA420P9; break;
674  }
675  } else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
676  f->packed_at_lsb = 1;
677  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
678  case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P10; break;
679  case 0x01: f->pix_fmt = AV_PIX_FMT_YUV440P10; break;
680  case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P10; break;
681  case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P10; break;
682  }
683  } else if (f->avctx->bits_per_raw_sample == 10 && f->transparency) {
684  f->packed_at_lsb = 1;
685  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
686  case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P10; break;
687  case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P10; break;
688  case 0x11: f->pix_fmt = AV_PIX_FMT_YUVA420P10; break;
689  }
690  } else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency) {
691  f->packed_at_lsb = 1;
692  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
693  case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P12; break;
694  case 0x01: f->pix_fmt = AV_PIX_FMT_YUV440P12; break;
695  case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P12; break;
696  case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P12; break;
697  }
698  } else if (f->avctx->bits_per_raw_sample == 12 && f->transparency) {
699  f->packed_at_lsb = 1;
700  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
701  case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P12; break;
702  case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P12; break;
703  }
704  } else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency) {
705  f->packed_at_lsb = 1;
706  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
707  case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P14; break;
708  case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P14; break;
709  case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P14; break;
710  }
711  } else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
712  f->packed_at_lsb = 1;
713  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
714  case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P16; break;
715  case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P16; break;
716  case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P16; break;
717  }
718  } else if (f->avctx->bits_per_raw_sample == 16 && f->transparency){
719  f->packed_at_lsb = 1;
720  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
721  case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P16; break;
722  case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P16; break;
723  case 0x11: f->pix_fmt = AV_PIX_FMT_YUVA420P16; break;
724  }
725  }
726  } else if (f->colorspace == 1) {
727  if (f->chroma_h_shift || f->chroma_v_shift) {
728  av_log(f->avctx, AV_LOG_ERROR,
729  "chroma subsampling not supported in this colorspace\n");
730  return AVERROR(ENOSYS);
731  }
732  if ( f->avctx->bits_per_raw_sample <= 8 && !f->transparency)
733  f->pix_fmt = AV_PIX_FMT_0RGB32;
734  else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency)
735  f->pix_fmt = AV_PIX_FMT_RGB32;
736  else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency)
737  f->pix_fmt = AV_PIX_FMT_GBRP9;
738  else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency)
739  f->pix_fmt = AV_PIX_FMT_GBRP10;
740  else if (f->avctx->bits_per_raw_sample == 10 && f->transparency)
741  f->pix_fmt = AV_PIX_FMT_GBRAP10;
742  else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency)
743  f->pix_fmt = AV_PIX_FMT_GBRP12;
744  else if (f->avctx->bits_per_raw_sample == 12 && f->transparency)
745  f->pix_fmt = AV_PIX_FMT_GBRAP12;
746  else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency)
747  f->pix_fmt = AV_PIX_FMT_GBRP14;
748  else if (f->avctx->bits_per_raw_sample == 14 && f->transparency)
749  f->pix_fmt = AV_PIX_FMT_GBRAP14;
750  else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency) {
751  f->pix_fmt = AV_PIX_FMT_GBRP16;
752  f->use32bit = 1;
753  } else if (f->avctx->bits_per_raw_sample == 16 && f->transparency) {
754  f->pix_fmt = AV_PIX_FMT_GBRAP16;
755  f->use32bit = 1;
756  }
757  } else {
758  av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
759  return AVERROR(ENOSYS);
760  }
761  if (f->pix_fmt == AV_PIX_FMT_NONE) {
762  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
763  return AVERROR(ENOSYS);
764  }
765 
766  f->avctx->pix_fmt = get_pixel_format(f);
767  if (f->avctx->pix_fmt < 0)
768  return AVERROR(EINVAL);
769 
770  ff_dlog(f->avctx, "%d %d %d\n",
771  f->chroma_h_shift, f->chroma_v_shift, f->pix_fmt);
772  if (f->version < 2) {
773  context_count = read_quant_tables(c, f->quant_tables[0]);
774  if (context_count < 0) {
775  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
776  return AVERROR_INVALIDDATA;
777  }
778  f->slice_count = f->max_slice_count;
779  } else if (f->version < 3) {
780  f->slice_count = get_symbol(c, state, 0);
781  } else {
782  const uint8_t *p = c->bytestream_end;
783  for (f->slice_count = 0;
784  f->slice_count < MAX_SLICES && 3 + 5*!!f->ec < p - c->bytestream_start;
785  f->slice_count++) {
786  int trailer = 3 + 5*!!f->ec;
787  int size = AV_RB24(p-trailer);
788  if (size + trailer > p - c->bytestream_start)
789  break;
790  p -= size + trailer;
791  }
792  }
793  if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0 || f->slice_count > f->max_slice_count) {
794  av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid (max=%d)\n", f->slice_count, f->max_slice_count);
795  return AVERROR_INVALIDDATA;
796  }
797 
798  av_refstruct_unref(&f->slice_damaged);
799  f->slice_damaged = av_refstruct_allocz(f->slice_count * sizeof(*f->slice_damaged));
800  if (!f->slice_damaged)
801  return AVERROR(ENOMEM);
802 
803  for (int j = 0; j < f->slice_count; j++) {
804  FFV1SliceContext *sc = &f->slices[j];
805 
806  if (f->version == 2) {
807  int sx = get_symbol(c, state, 0);
808  int sy = get_symbol(c, state, 0);
809  int sw = get_symbol(c, state, 0) + 1U;
810  int sh = get_symbol(c, state, 0) + 1U;
811 
812  if (sx < 0 || sy < 0 || sw <= 0 || sh <= 0)
813  return AVERROR_INVALIDDATA;
814  if (sx > f->num_h_slices - sw || sy > f->num_v_slices - sh)
815  return AVERROR_INVALIDDATA;
816 
817  sc->slice_x = sx * (int64_t)f->width / f->num_h_slices;
818  sc->slice_y = sy * (int64_t)f->height / f->num_v_slices;
819  sc->slice_width = (sx + sw) * (int64_t)f->width / f->num_h_slices - sc->slice_x;
820  sc->slice_height = (sy + sh) * (int64_t)f->height / f->num_v_slices - sc->slice_y;
821 
822  av_assert0((unsigned)sc->slice_width <= f->width &&
823  (unsigned)sc->slice_height <= f->height);
824  av_assert0 ( (unsigned)sc->slice_x + (uint64_t)sc->slice_width <= f->width
825  && (unsigned)sc->slice_y + (uint64_t)sc->slice_height <= f->height);
826  }
827 
829  sc->plane = ff_ffv1_planes_alloc();
830  if (!sc->plane)
831  return AVERROR(ENOMEM);
832 
833  for (int i = 0; i < f->plane_count; i++) {
834  PlaneContext *const p = &sc->plane[i];
835 
836  if (f->version == 2) {
837  int idx = get_symbol(c, state, 0);
838  if (idx >= (unsigned)f->quant_table_count) {
839  av_log(f->avctx, AV_LOG_ERROR,
840  "quant_table_index out of range\n");
841  return AVERROR_INVALIDDATA;
842  }
843  p->quant_table_index = idx;
844  context_count = f->context_count[idx];
845  }
846 
847  if (f->version <= 2) {
848  av_assert0(context_count >= 0);
849  p->context_count = context_count;
850  }
851  }
852  }
853  return 0;
854 }
855 
857 {
858  FFV1Context *f = avctx->priv_data;
859  int ret;
860 
861  if ((ret = ff_ffv1_common_init(avctx)) < 0)
862  return ret;
863 
864  if (avctx->extradata_size > 0 && (ret = read_extra_header(f)) < 0)
865  return ret;
866 
867  if ((ret = ff_ffv1_init_slice_contexts(f)) < 0)
868  return ret;
869 
870  return 0;
871 }
872 
873 static int find_next_slice(AVCodecContext *avctx,
874  uint8_t *buf, uint8_t *buf_end, int idx,
875  uint8_t **pos, uint32_t *len)
876 {
877  FFV1Context *f = avctx->priv_data;
878 
879  /* Length field */
880  uint32_t v = buf_end - buf;
881  if (idx || f->version > 2) {
882  /* Three bytes of length, plus flush bit + CRC */
883  uint32_t trailer = 3 + 5*!!f->ec;
884  if (trailer > buf_end - buf)
885  v = INT_MAX;
886  else
887  v = AV_RB24(buf_end - trailer) + trailer;
888  }
889 
890  if (buf_end - buf < v) {
891  av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
892  ff_progress_frame_report(&f->picture, INT_MAX);
893  return AVERROR_INVALIDDATA;
894  }
895 
896  *len = v;
897  if (idx)
898  *pos = buf_end - v;
899  else
900  *pos = buf;
901 
902  return 0;
903 }
904 
906  uint8_t *buf, size_t buf_size)
907 {
908  int ret;
909  FFV1Context *f = avctx->priv_data;
910 
911  uint8_t keystate = 128;
912  ff_init_range_decoder(c, buf, buf_size);
913  ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
914 
915  if (get_rac(c, &keystate)) {
916  f->key_frame = AV_FRAME_FLAG_KEY;
917  f->key_frame_ok = 0;
918  if ((ret = read_header(f, c)) < 0)
919  return ret;
920  f->key_frame_ok = 1;
921  } else {
922  if (!f->key_frame_ok) {
923  av_log(avctx, AV_LOG_ERROR,
924  "Cannot decode non-keyframe without valid keyframe\n");
925  return AVERROR_INVALIDDATA;
926  }
927  f->key_frame = 0;
928  }
929 
930  if (f->ac != AC_GOLOMB_RICE) {
931  if (buf_size < avctx->width * avctx->height / (128*8))
932  return AVERROR_INVALIDDATA;
933  } else {
934  int w = avctx->width;
935  int s = 1 + w / (1<<23);
936  int i;
937 
938  w /= s;
939 
940  for (i = 0; w > (1<<ff_log2_run[i]); i++)
941  w -= ff_log2_run[i];
942  if (buf_size < (avctx->height + i + 6) / 8 * s)
943  return AVERROR_INVALIDDATA;
944  }
945 
946  return 0;
947 }
948 
950  AVPacket *avpkt)
951 {
952  FFV1Context *f = avctx->priv_data;
953  AVFrame *p = f->picture.f;
954 
955  uint8_t *buf = avpkt->data;
956  size_t buf_size = avpkt->size;
957  uint8_t *buf_end = buf + buf_size;
958 
959  for (int i = f->slice_count - 1; i >= 0; i--) {
960  FFV1SliceContext *sc = &f->slices[i];
961 
962  uint8_t *pos;
963  uint32_t len;
964  int err = find_next_slice(avctx, buf, buf_end, i,
965  &pos, &len);
966  if (err < 0)
967  return err;
968 
969  buf_end -= len;
970 
971  sc->slice_damaged = 0;
972 
973  if (f->ec) {
974  unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), f->crcref, pos, len);
975  if (crc != f->crcref) {
976  int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
977  av_log(f->avctx, AV_LOG_ERROR, "slice CRC mismatch %X!", crc);
978  if (ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
979  av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n", ts*av_q2d(avctx->pkt_timebase));
980  } else if (ts != AV_NOPTS_VALUE) {
981  av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
982  } else {
983  av_log(f->avctx, AV_LOG_ERROR, "\n");
984  }
985  slice_set_damaged(f, sc);
986  }
987  if (avctx->debug & FF_DEBUG_PICT_INFO) {
988  av_log(avctx, AV_LOG_DEBUG, "slice %d, CRC: 0x%08"PRIX32"\n", i, AV_RB32(pos + len - 4));
989  }
990  }
991 
992  if (i) {
993  ff_init_range_decoder(&sc->c, pos, len);
994  ff_build_rac_states(&sc->c, 0.05 * (1LL << 32), 256 - 8);
995  } else {
996  sc->c = c;
997  sc->c.bytestream_end = pos + len;
998  }
999  }
1000 
1001  avctx->execute(avctx,
1002  decode_slice,
1003  f->slices,
1004  NULL,
1005  f->slice_count,
1006  sizeof(*f->slices));
1007 
1008  for (int i = f->slice_count - 1; i >= 0; i--) {
1009  FFV1SliceContext *sc = &f->slices[i];
1010  if (sc->slice_damaged && f->last_picture.f) {
1011  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(f->pix_fmt);
1012  const uint8_t *src[4];
1013  uint8_t *dst[4];
1014  ff_progress_frame_await(&f->last_picture, INT_MAX);
1015  for (int j = 0; j < desc->nb_components; j++) {
1016  int pixshift = desc->comp[j].depth > 8;
1017  int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
1018  int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
1019  dst[j] = p->data[j] + p->linesize[j] *
1020  (sc->slice_y >> sv) + ((sc->slice_x >> sh) << pixshift);
1021  src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j] *
1022  (sc->slice_y >> sv) + ((sc->slice_x >> sh) << pixshift);
1023 
1024  }
1025 
1027  f->last_picture.f->linesize,
1028  f->pix_fmt,
1029  sc->slice_width,
1030  sc->slice_height);
1031 
1032  f->slice_damaged[i] = 1;
1033  }
1034  }
1035 
1036  return 0;
1037 }
1038 
1039 static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
1040  int *got_frame, AVPacket *avpkt)
1041 {
1042  FFV1Context *f = avctx->priv_data;
1043  int ret;
1044  AVFrame *p;
1045 
1046  /* This is copied onto the first slice's range coder context */
1047  RangeCoder c;
1048 
1049  ff_progress_frame_unref(&f->last_picture);
1050  FFSWAP(ProgressFrame, f->picture, f->last_picture);
1051 
1052 
1053  f->avctx = avctx;
1054  f->frame_damaged = 0;
1055 
1056  ret = decode_header(avctx, &c, avpkt->data, avpkt->size);
1057  if (ret < 0)
1058  return ret;
1059 
1060  ret = ff_progress_frame_get_buffer(avctx, &f->picture,
1062  if (ret < 0)
1063  return ret;
1064 
1065  p = f->picture.f;
1066 
1067  p->pict_type = AV_PICTURE_TYPE_I; //FIXME I vs. P
1068  p->flags = (p->flags & ~AV_FRAME_FLAG_KEY) | f->key_frame;
1069 
1070  if (f->version < 3 && avctx->field_order > AV_FIELD_PROGRESSIVE) {
1071  /* we have interlaced material flagged in container */
1073  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
1075  }
1076 
1077  if (avctx->debug & FF_DEBUG_PICT_INFO)
1078  av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
1079  f->version, !!(p->flags & AV_FRAME_FLAG_KEY), f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
1080 
1081  ff_thread_finish_setup(avctx);
1082 
1083  ret = decode_slices(avctx, c, avpkt);
1084  if (ret < 0)
1085  return ret;
1086 
1087  ff_progress_frame_report(&f->picture, INT_MAX);
1088 
1089  ff_progress_frame_unref(&f->last_picture);
1090  if ((ret = av_frame_ref(rframe, f->picture.f)) < 0)
1091  return ret;
1092 
1093  *got_frame = 1;
1094 
1095  return avpkt->size;
1096 }
1097 
1098 #if HAVE_THREADS
1100 {
1101  FFV1Context *fsrc = src->priv_data;
1102  FFV1Context *fdst = dst->priv_data;
1103 
1104  if (dst == src)
1105  return 0;
1106 
1107  fdst->version = fsrc->version;
1108  fdst->micro_version = fsrc->micro_version;
1109  fdst->combined_version = fsrc->combined_version;
1110  fdst->chroma_planes = fsrc->chroma_planes;
1111  fdst->chroma_h_shift = fsrc->chroma_h_shift;
1112  fdst->chroma_v_shift = fsrc->chroma_v_shift;
1113  fdst->transparency = fsrc->transparency;
1114  fdst->plane_count = fsrc->plane_count;
1115  fdst->ac = fsrc->ac;
1116  fdst->colorspace = fsrc->colorspace;
1117 
1118  fdst->ec = fsrc->ec;
1119  fdst->intra = fsrc->intra;
1120  fdst->key_frame_ok = fsrc->key_frame_ok;
1121 
1122  fdst->packed_at_lsb = fsrc->packed_at_lsb;
1123  fdst->slice_count = fsrc->slice_count;
1124  fdst->use32bit = fsrc->use32bit;
1125  memcpy(fdst->state_transition, fsrc->state_transition,
1126  sizeof(fdst->state_transition));
1127 
1128  // in version 1 there is a single per-keyframe quant table, so
1129  // we need to propagate it between threads
1130  if (fsrc->version < 2)
1131  memcpy(fdst->quant_tables[0], fsrc->quant_tables[0], sizeof(fsrc->quant_tables[0]));
1132 
1133  for (int i = 0; i < fdst->num_h_slices * fdst->num_v_slices; i++) {
1134  FFV1SliceContext *sc = &fdst->slices[i];
1135  const FFV1SliceContext *sc0 = &fsrc->slices[i];
1136 
1137  av_refstruct_replace(&sc->plane, sc0->plane);
1138 
1139  if (fsrc->version < 3) {
1140  sc->slice_x = sc0->slice_x;
1141  sc->slice_y = sc0->slice_y;
1142  sc->slice_width = sc0->slice_width;
1143  sc->slice_height = sc0->slice_height;
1144  }
1145  }
1146 
1148 
1149  av_assert1(fdst->max_slice_count == fsrc->max_slice_count);
1150 
1151  ff_progress_frame_replace(&fdst->picture, &fsrc->picture);
1152 
1153  return 0;
1154 }
1155 #endif
1156 
1158 {
1159  FFV1Context *const s = avctx->priv_data;
1160 
1161  ff_progress_frame_unref(&s->picture);
1162  ff_progress_frame_unref(&s->last_picture);
1163 
1164  return ff_ffv1_close(avctx);
1165 }
1166 
1168  .p.name = "ffv1",
1169  CODEC_LONG_NAME("FFmpeg video codec #1"),
1170  .p.type = AVMEDIA_TYPE_VIDEO,
1171  .p.id = AV_CODEC_ID_FFV1,
1172  .priv_data_size = sizeof(FFV1Context),
1173  .init = decode_init,
1174  .close = ffv1_decode_close,
1177  .p.capabilities = AV_CODEC_CAP_DR1 |
1179  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1181 };
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:557
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1893
read_extra_header
static int read_extra_header(FFV1Context *f)
Definition: ffv1dec.c:412
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:533
FFV1Context::chroma_v_shift
int chroma_v_shift
Definition: ffv1.h:119
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
FFV1SliceContext::slice_height
int slice_height
Definition: ffv1.h:77
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
FFV1Context::key_frame_ok
int key_frame_ok
Definition: ffv1.h:141
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
update_vlc_state
static void update_vlc_state(VlcState *const state, const int v)
Definition: ffv1.h:198
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:267
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1277
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3244
MAX_OVERREAD
#define MAX_OVERREAD
Definition: lagarithrac.h:49
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:202
FFV1SliceContext::plane
PlaneContext * plane
Definition: ffv1.h:88
FFV1Context::ec
int ec
Definition: ffv1.h:139
int64_t
long long int64_t
Definition: coverity.c:34
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:549
get_sr_golomb
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
Definition: golomb.h:532
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
pixdesc.h
ff_ffv1_common_init
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
Definition: ffv1.c:36
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:556
w
uint8_t w
Definition: llviddspenc.c:38
decode_header
static int decode_header(AVCodecContext *avctx, RangeCoder *c, uint8_t *buf, size_t buf_size)
Definition: ffv1dec.c:905
AVPacket::data
uint8_t * data
Definition: packet.h:539
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:551
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:722
decode_plane
static int decode_plane(FFV1Context *f, FFV1SliceContext *sc, GetBitContext *gb, uint8_t *src, int w, int h, int stride, int plane_index, int pixel_stride, int ac)
Definition: ffv1dec.c:121
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
This function sets up the ProgressFrame, i.e.
Definition: decode.c:1848
rangecoder.h
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:510
PlaneContext::state
uint8_t(* state)[CONTEXT_SIZE]
Definition: ffv1.h:66
FFCodec
Definition: codec_internal.h:127
FFV1Context::num_h_slices
int num_h_slices
Definition: ffv1.h:154
RangeCoder::bytestream_end
uint8_t * bytestream_end
Definition: rangecoder.h:44
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:690
read_quant_table
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
Definition: ffv1dec.c:367
decode_line
static av_always_inline int RENAME() decode_line(FFV1Context *f, FFV1SliceContext *sc, GetBitContext *gb, int w, TYPE *sample[2], int plane_index, int bits, int ac)
Definition: ffv1dec_template.c:26
AC_RANGE_CUSTOM_TAB
#define AC_RANGE_CUSTOM_TAB
Definition: ffv1.h:53
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:552
FFV1Context::quant_tables
int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][MAX_QUANT_TABLE_SIZE]
Definition: ffv1.h:131
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
FFV1Context::chroma_h_shift
int chroma_h_shift
Definition: ffv1.h:119
FFV1SliceContext::slice_x
int slice_x
Definition: ffv1.h:78
ff_ffv1_clear_slice_state
void ff_ffv1_clear_slice_state(const FFV1Context *f, FFV1SliceContext *sc)
Definition: ffv1.c:200
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1415
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:490
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:674
crc.h
golomb.h
exp golomb vlc stuff
state
static struct @472 state
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:548
AV_FIELD_TT
@ AV_FIELD_TT
Top coded_first, top displayed first.
Definition: defs.h:203
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:528
FFV1Context::combined_version
int combined_version
Definition: ffv1.h:116
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:526
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:558
ffv1_decode_close
static av_cold int ffv1_decode_close(AVCodecContext *avctx)
Definition: ffv1dec.c:1157
GetBitContext
Definition: get_bits.h:108
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:508
av_noinline
#define av_noinline
Definition: attributes.h:72
MAX_SLICES
#define MAX_SLICES
Definition: d3d12va_hevc.c:33
CONTEXT_SIZE
#define CONTEXT_SIZE
Definition: ffv1.h:44
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:494
get_symbol_inline
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:45
FFV1Context::chroma_planes
int chroma_planes
Definition: ffv1.h:118
PlaneContext::context_count
int context_count
Definition: ffv1.h:65
AVRational::num
int num
Numerator.
Definition: rational.h:59
progressframe.h
AV_FIELD_TB
@ AV_FIELD_TB
Top coded first, bottom displayed first.
Definition: defs.h:205
refstruct.h
decode_rgb_frame
static int RENAME() decode_rgb_frame(FFV1Context *f, FFV1SliceContext *sc, GetBitContext *gb, uint8_t *src[4], int w, int h, int stride[4])
Definition: ffv1dec_template.c:134
av_refstruct_allocz
static void * av_refstruct_allocz(size_t size)
Equivalent to av_refstruct_alloc_ext(size, 0, NULL, NULL)
Definition: refstruct.h:105
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:513
avassert.h
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:69
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:522
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:661
FFV1SliceContext::sample_buffer
int16_t * sample_buffer
Definition: ffv1.h:73
FFV1Context::use32bit
int use32bit
Definition: ffv1.h:137
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:530
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:538
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP14
#define AV_PIX_FMT_GBRAP14
Definition: pixfmt.h:532
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:531
FFV1Context::slice_count
int slice_count
Definition: ffv1.h:151
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:523
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: ffv1dec.c:1039
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
FFV1Context::max_slice_count
int max_slice_count
Definition: ffv1.h:152
bits
uint8_t bits
Definition: vp3data.h:128
FFV1Context::intra
int intra
Definition: ffv1.h:140
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
is_input_end
static int is_input_end(RangeCoder *c, GetBitContext *gb, int ac)
Definition: ffv1dec.c:99
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:555
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:507
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
read_quant_tables
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
Definition: ffv1dec.c:394
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:521
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1876
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:493
fold
static av_always_inline int fold(int diff, int bits)
Definition: ffv1.h:187
FFV1Context::ac
int ac
1=range coder <-> 0=golomb rice
Definition: ffv1.h:130
get_vlc_symbol
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
Definition: ffv1dec.c:74
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FFV1Context::plane_count
int plane_count
Definition: ffv1.h:129
FFV1Context::slice_damaged
uint8_t * slice_damaged
Definition: ffv1.h:164
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:491
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
quant_table
static const int16_t quant_table[64]
Definition: intrax8.c:517
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:529
get_symbol
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:69
NULL
#define NULL
Definition: coverity.c:32
PlaneContext::vlc_state
VlcState * vlc_state
Definition: ffv1.h:67
AC_GOLOMB_RICE
#define AC_GOLOMB_RICE
Definition: ffv1.h:51
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
FFV1Context::num_v_slices
int num_v_slices
Definition: ffv1.h:153
FFV1Context::colorspace
int colorspace
Definition: ffv1.h:135
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_ffv1_decoder
const FFCodec ff_ffv1_decoder
Definition: ffv1dec.c:1167
FFV1Context::slices
FFV1SliceContext * slices
Definition: ffv1.h:156
FFV1Context::state_transition
uint8_t state_transition[256]
Definition: ffv1.h:133
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:512
mathops.h
PlaneContext
Definition: ffv1.h:63
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:305
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:511
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:525
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
decode_slices
static int decode_slices(AVCodecContext *avctx, RangeCoder c, AVPacket *avpkt)
Definition: ffv1dec.c:949
VlcState
Definition: ffv1.h:56
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
FFV1SliceContext::slice_width
int slice_width
Definition: ffv1.h:76
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
AV_CODEC_ID_FFV1
@ AV_CODEC_ID_FFV1
Definition: codec_id.h:85
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:512
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
get_pixel_format
static enum AVPixelFormat get_pixel_format(FFV1Context *f)
Definition: ffv1dec.c:543
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:401
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:515
ff_ffv1_close
av_cold int ff_ffv1_close(AVCodecContext *avctx)
Definition: ffv1.c:225
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:565
sample
#define sample
Definition: flacdsp_template.c:44
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
slice_set_damaged
static void slice_set_damaged(FFV1Context *f, FFV1SliceContext *sc)
Definition: ffv1dec.c:257
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:517
RangeCoder::bytestream
uint8_t * bytestream
Definition: rangecoder.h:43
FFV1Context::picture
ProgressFrame picture
Definition: ffv1.h:124
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:483
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
FFV1SliceContext::slice_rct_by_coef
int slice_rct_by_coef
Definition: ffv1.h:84
av_crc_get_table
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:553
ff_ffv1_init_slice_state
av_cold int ff_ffv1_init_slice_state(const FFV1Context *f, FFV1SliceContext *sc)
Definition: ffv1.c:74
read_header
static int read_header(FFV1Context *f, RangeCoder *c)
Definition: ffv1dec.c:553
PlaneContext::quant_table_index
int quant_table_index
Definition: ffv1.h:64
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1612
FFV1SliceContext::c
RangeCoder c
Definition: ffv1.h:90
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
FFV1SliceContext::slice_rct_ry_coef
int slice_rct_ry_coef
Definition: ffv1.h:85
av_flatten
#define av_flatten
Definition: attributes.h:96
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:527
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ffv1.h
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
FFV1SliceContext
Definition: ffv1.h:72
len
int len
Definition: vorbis_enc_data.h:426
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:118
AV_CRC_32_IEEE
@ AV_CRC_32_IEEE
Definition: crc.h:52
AVCodecContext::height
int height
Definition: avcodec.h:632
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:669
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:509
MAX_CONTEXT_INPUTS
#define MAX_CONTEXT_INPUTS
Definition: ffv1.h:49
FFV1Context::packed_at_lsb
int packed_at_lsb
Definition: ffv1.h:146
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
find_next_slice
static int find_next_slice(AVCodecContext *avctx, uint8_t *buf, uint8_t *buf_end, int idx, uint8_t **pos, uint32_t *len)
Definition: ffv1dec.c:873
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:487
decode_slice_header
static int decode_slice_header(const FFV1Context *f, FFV1SliceContext *sc, AVFrame *frame)
Definition: ffv1dec.c:168
FFV1SliceContext::slice_y
int slice_y
Definition: ffv1.h:79
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:550
pos
unsigned int pos
Definition: spdifenc.c:414
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
ff_slice_coord
int ff_slice_coord(const FFV1Context *f, int width, int sx, int num_h_slices, int chroma_shift)
This is intended for both width and height.
Definition: ffv1.c:129
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:514
U
#define U(x)
Definition: vpx_arith.h:37
ff_ffv1_planes_alloc
PlaneContext * ff_ffv1_planes_alloc(void)
Definition: ffv1.c:68
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1883
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:519
planes
static const struct @473 planes[]
ff_ffv1_allocate_initial_states
int ff_ffv1_allocate_initial_states(FFV1Context *f)
Definition: ffv1.c:185
AVCodecContext
main external API structure.
Definition: avcodec.h:451
RangeCoder::bytestream_start
uint8_t * bytestream_start
Definition: rangecoder.h:42
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1631
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: ffv1dec.c:856
av_crc
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:554
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
temp
else temp
Definition: vf_mcdeint.c:263
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1414
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
MAX_QUANT_TABLES
#define MAX_QUANT_TABLES
Definition: ffv1.h:46
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFV1Context
Definition: ffv1.h:109
FFV1Context::transparency
int transparency
Definition: ffv1.h:120
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
ProgressFrame
The ProgressFrame structure.
Definition: progressframe.h:73
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
FFV1SliceContext::run_index
int run_index
Definition: ffv1.h:82
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
ffv1dec_template.c
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:632
ff_ffv1_init_slice_contexts
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
Definition: ffv1.c:142
ff_log2_run
const uint8_t ff_log2_run[41]
Definition: mathtables.c:116
imgutils.h
FFV1SliceContext::slice_reset_contexts
int slice_reset_contexts
Definition: ffv1.h:97
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
FFV1Context::micro_version
int micro_version
Definition: ffv1.h:115
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:516
h
h
Definition: vp9dsp_template.c:2070
RangeCoder
Definition: mss3.c:63
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:520
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
width
#define width
Definition: dsp.h:85
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:492
av_image_copy
void av_image_copy(uint8_t *const dst_data[4], const int dst_linesizes[4], const uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
FFV1SliceContext::ac_byte_count
int ac_byte_count
number of bytes used for AC coding
Definition: ffv1.h:92
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
FFV1SliceContext::slice_damaged
int slice_damaged
Definition: ffv1.h:98
FFV1SliceContext::slice_coding_mode
int slice_coding_mode
Definition: ffv1.h:83
src
#define src
Definition: vp8dsp.c:248
FFV1Context::version
int version
Definition: ffv1.h:114
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:518