FFmpeg
vc2enc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2016 Open Broadcast Systems Ltd.
3  * Author 2016 Rostislav Pehlivanov <atomnuker@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/mem.h"
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/version.h"
26 #include "codec_internal.h"
27 #include "dirac.h"
28 #include "encode.h"
29 #include "put_bits.h"
30 #include "version.h"
31 
32 #include "vc2enc_dwt.h"
33 #include "diractab.h"
34 
35 /* The limited size resolution of each slice forces us to do this */
36 #define SSIZE_ROUND(b) (FFALIGN((b), s->size_scaler) + 4 + s->prefix_bytes)
37 
38 /* Decides the cutoff point in # of slices to distribute the leftover bytes */
39 #define SLICE_REDIST_TOTAL 150
40 
41 typedef struct VC2BaseVideoFormat {
44  int width, height;
45  uint8_t interlaced, level;
46  char name[13];
48 
50  { 0 }, /* Custom format, here just to make indexing equal to base_vf */
51  { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 176, 120, 0, 1, "QSIF525" },
52  { AV_PIX_FMT_YUV420P, { 2, 25 }, 176, 144, 0, 1, "QCIF" },
53  { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 352, 240, 0, 1, "SIF525" },
54  { AV_PIX_FMT_YUV420P, { 2, 25 }, 352, 288, 0, 1, "CIF" },
55  { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 704, 480, 0, 1, "4SIF525" },
56  { AV_PIX_FMT_YUV420P, { 2, 25 }, 704, 576, 0, 1, "4CIF" },
57 
58  { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 720, 480, 1, 2, "SD480I-60" },
59  { AV_PIX_FMT_YUV422P10, { 1, 25 }, 720, 576, 1, 2, "SD576I-50" },
60 
61  { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 1280, 720, 0, 3, "HD720P-60" },
62  { AV_PIX_FMT_YUV422P10, { 1, 50 }, 1280, 720, 0, 3, "HD720P-50" },
63  { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 1920, 1080, 1, 3, "HD1080I-60" },
64  { AV_PIX_FMT_YUV422P10, { 1, 25 }, 1920, 1080, 1, 3, "HD1080I-50" },
65  { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 1920, 1080, 0, 3, "HD1080P-60" },
66  { AV_PIX_FMT_YUV422P10, { 1, 50 }, 1920, 1080, 0, 3, "HD1080P-50" },
67 
68  { AV_PIX_FMT_YUV444P12, { 1, 24 }, 2048, 1080, 0, 4, "DC2K" },
69  { AV_PIX_FMT_YUV444P12, { 1, 24 }, 4096, 2160, 0, 5, "DC4K" },
70 
71  { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 3840, 2160, 0, 6, "UHDTV 4K-60" },
72  { AV_PIX_FMT_YUV422P10, { 1, 50 }, 3840, 2160, 0, 6, "UHDTV 4K-50" },
73 
74  { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 7680, 4320, 0, 7, "UHDTV 8K-60" },
75  { AV_PIX_FMT_YUV422P10, { 1, 50 }, 7680, 4320, 0, 7, "UHDTV 8K-50" },
76 
77  { AV_PIX_FMT_YUV422P10, { 1001, 24000 }, 1920, 1080, 0, 3, "HD1080P-24" },
78  { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 720, 486, 1, 2, "SD Pro486" },
79 };
81 
82 enum VC2_QM {
86 
88 };
89 
90 typedef struct SubBand {
92  ptrdiff_t stride;
93  int width;
94  int height;
95 } SubBand;
96 
97 typedef struct Plane {
100  int width;
101  int height;
104  ptrdiff_t coef_stride;
105 } Plane;
106 
107 typedef struct SliceArgs {
108  const struct VC2EncContext *ctx;
109  union {
111  uint8_t *buf;
112  };
113  int x;
114  int y;
118  int bytes;
119 } SliceArgs;
120 
121 typedef struct TransformArgs {
122  const struct VC2EncContext *ctx;
124  const void *idata;
125  ptrdiff_t istride;
126  int field;
128 } TransformArgs;
129 
130 typedef struct VC2EncContext {
136 
139 
140  /* For conversion from unsigned pixel values to signed */
142  int bpp;
143  int bpp_idx;
144 
145  /* Picture number */
146  uint32_t picture_number;
147 
148  /* Base video format */
149  int base_vf;
150  int level;
151  int profile;
152 
153  /* Quantization matrix */
154  uint8_t quant[MAX_DWT_LEVELS][4];
156 
157  /* Division LUT */
158  uint32_t qmagic_lut[116][2];
159 
160  int num_x; /* #slices horizontally */
161  int num_y; /* #slices vertically */
166 
167  /* Rate control stuff */
171  int q_ceil;
172  int q_avg;
173 
174  /* Options */
175  double tolerance;
183 
184  /* Parse code state */
187 } VC2EncContext;
188 
190 {
191  int i;
192  int pbits = 0, bits = 0, topbit = 1, maxval = 1;
193 
194  if (!val++) {
195  put_bits(pb, 1, 1);
196  return;
197  }
198 
199  while (val > maxval) {
200  topbit <<= 1;
201  maxval <<= 1;
202  maxval |= 1;
203  }
204 
205  bits = ff_log2(topbit);
206 
207  for (i = 0; i < bits; i++) {
208  topbit >>= 1;
209  pbits <<= 2;
210  if (val & topbit)
211  pbits |= 0x1;
212  }
213 
214  put_bits(pb, bits*2 + 1, (pbits << 1) | 1);
215 }
216 
218 {
219  int topbit = 1, maxval = 1;
220 
221  if (!val++)
222  return 1;
223 
224  while (val > maxval) {
225  topbit <<= 1;
226  maxval <<= 1;
227  maxval |= 1;
228  }
229 
230  return ff_log2(topbit)*2 + 1;
231 }
232 
233 /* VC-2 10.4 - parse_info() */
235 {
236  uint32_t cur_pos, dist;
237 
238  align_put_bits(&s->pb);
239 
240  cur_pos = put_bytes_count(&s->pb, 0);
241 
242  /* Magic string */
243  ff_put_string(&s->pb, "BBCD", 0);
244 
245  /* Parse code */
246  put_bits(&s->pb, 8, pcode);
247 
248  /* Next parse offset */
249  dist = cur_pos - s->next_parse_offset;
250  AV_WB32(s->pb.buf + s->next_parse_offset + 5, dist);
251  s->next_parse_offset = cur_pos;
252  put_bits32(&s->pb, pcode == DIRAC_PCODE_END_SEQ ? 13 : 0);
253 
254  /* Last parse offset */
255  put_bits32(&s->pb, s->last_parse_code == DIRAC_PCODE_END_SEQ ? 13 : dist);
256 
257  s->last_parse_code = pcode;
258 }
259 
260 /* VC-2 11.1 - parse_parameters()
261  * The level dictates what the decoder should expect in terms of resolution
262  * and allows it to quickly reject whatever it can't support. Remember,
263  * this codec kinda targets cheapo FPGAs without much memory. Unfortunately
264  * it also limits us greatly in our choice of formats, hence the flag to disable
265  * strict_compliance */
267 {
268  put_vc2_ue_uint(&s->pb, s->ver.major); /* VC-2 demands this to be 2 */
269  put_vc2_ue_uint(&s->pb, s->ver.minor); /* ^^ and this to be 0 */
270  put_vc2_ue_uint(&s->pb, s->profile); /* 3 to signal HQ profile */
271  put_vc2_ue_uint(&s->pb, s->level); /* 3 - 1080/720, 6 - 4K */
272 }
273 
274 /* VC-2 11.3 - frame_size() */
276 {
277  put_bits(&s->pb, 1, !s->strict_compliance);
278  if (!s->strict_compliance) {
279  AVCodecContext *avctx = s->avctx;
280  put_vc2_ue_uint(&s->pb, avctx->width);
281  put_vc2_ue_uint(&s->pb, avctx->height);
282  }
283 }
284 
285 /* VC-2 11.3.3 - color_diff_sampling_format() */
287 {
288  put_bits(&s->pb, 1, !s->strict_compliance);
289  if (!s->strict_compliance) {
290  int idx;
291  if (s->chroma_x_shift == 1 && s->chroma_y_shift == 0)
292  idx = 1; /* 422 */
293  else if (s->chroma_x_shift == 1 && s->chroma_y_shift == 1)
294  idx = 2; /* 420 */
295  else
296  idx = 0; /* 444 */
297  put_vc2_ue_uint(&s->pb, idx);
298  }
299 }
300 
301 /* VC-2 11.3.4 - scan_format() */
303 {
304  put_bits(&s->pb, 1, !s->strict_compliance);
305  if (!s->strict_compliance)
306  put_vc2_ue_uint(&s->pb, s->interlaced);
307 }
308 
309 /* VC-2 11.3.5 - frame_rate() */
311 {
312  put_bits(&s->pb, 1, !s->strict_compliance);
313  if (!s->strict_compliance) {
314  AVCodecContext *avctx = s->avctx;
315  put_vc2_ue_uint(&s->pb, 0);
316  put_vc2_ue_uint(&s->pb, avctx->time_base.den);
317  put_vc2_ue_uint(&s->pb, avctx->time_base.num);
318  }
319 }
320 
321 /* VC-2 11.3.6 - aspect_ratio() */
323 {
324  put_bits(&s->pb, 1, !s->strict_compliance);
325  if (!s->strict_compliance) {
326  AVCodecContext *avctx = s->avctx;
327  put_vc2_ue_uint(&s->pb, 0);
330  }
331 }
332 
333 /* VC-2 11.3.7 - clean_area() */
335 {
336  put_bits(&s->pb, 1, 0);
337 }
338 
339 /* VC-2 11.3.8 - signal_range() */
341 {
342  put_bits(&s->pb, 1, !s->strict_compliance);
343  if (!s->strict_compliance)
344  put_vc2_ue_uint(&s->pb, s->bpp_idx);
345 }
346 
347 /* VC-2 11.3.9 - color_spec() */
349 {
350  AVCodecContext *avctx = s->avctx;
351  put_bits(&s->pb, 1, !s->strict_compliance);
352  if (!s->strict_compliance) {
353  int val;
354  put_vc2_ue_uint(&s->pb, 0);
355 
356  /* primaries */
357  put_bits(&s->pb, 1, 1);
358  if (avctx->color_primaries == AVCOL_PRI_BT470BG)
359  val = 2;
360  else if (avctx->color_primaries == AVCOL_PRI_SMPTE170M)
361  val = 1;
362  else if (avctx->color_primaries == AVCOL_PRI_SMPTE240M)
363  val = 1;
364  else
365  val = 0;
366  put_vc2_ue_uint(&s->pb, val);
367 
368  /* color matrix */
369  put_bits(&s->pb, 1, 1);
370  if (avctx->colorspace == AVCOL_SPC_RGB)
371  val = 3;
372  else if (avctx->colorspace == AVCOL_SPC_YCOCG)
373  val = 2;
374  else if (avctx->colorspace == AVCOL_SPC_BT470BG)
375  val = 1;
376  else
377  val = 0;
378  put_vc2_ue_uint(&s->pb, val);
379 
380  /* transfer function */
381  put_bits(&s->pb, 1, 1);
382  if (avctx->color_trc == AVCOL_TRC_LINEAR)
383  val = 2;
384  else if (avctx->color_trc == AVCOL_TRC_BT1361_ECG)
385  val = 1;
386  else
387  val = 0;
388  put_vc2_ue_uint(&s->pb, val);
389  }
390 }
391 
392 /* VC-2 11.3 - source_parameters() */
394 {
403 }
404 
405 /* VC-2 11 - sequence_header() */
407 {
408  align_put_bits(&s->pb);
410  put_vc2_ue_uint(&s->pb, s->base_vf);
412  put_vc2_ue_uint(&s->pb, s->interlaced); /* Frames or fields coding */
413 }
414 
415 /* VC-2 12.1 - picture_header() */
417 {
418  align_put_bits(&s->pb);
419  put_bits32(&s->pb, s->picture_number++);
420 }
421 
422 /* VC-2 12.3.4.1 - slice_parameters() */
424 {
425  put_vc2_ue_uint(&s->pb, s->num_x);
426  put_vc2_ue_uint(&s->pb, s->num_y);
427  put_vc2_ue_uint(&s->pb, s->prefix_bytes);
428  put_vc2_ue_uint(&s->pb, s->size_scaler);
429 }
430 
431 /* 1st idx = LL, second - vertical, third - horizontal, fourth - total */
432 static const uint8_t vc2_qm_col_tab[][4] = {
433  {20, 9, 15, 4},
434  { 0, 6, 6, 4},
435  { 0, 3, 3, 5},
436  { 0, 3, 5, 1},
437  { 0, 11, 10, 11}
438 };
439 
440 static const uint8_t vc2_qm_flat_tab[][4] = {
441  { 0, 0, 0, 0},
442  { 0, 0, 0, 0},
443  { 0, 0, 0, 0},
444  { 0, 0, 0, 0},
445  { 0, 0, 0, 0}
446 };
447 
449 {
450  int level, orientation;
451 
452  if (s->wavelet_depth <= 4 && s->quant_matrix == VC2_QM_DEF) {
453  s->custom_quant_matrix = 0;
454  for (level = 0; level < s->wavelet_depth; level++) {
455  s->quant[level][0] = ff_dirac_default_qmat[s->wavelet_idx][level][0];
456  s->quant[level][1] = ff_dirac_default_qmat[s->wavelet_idx][level][1];
457  s->quant[level][2] = ff_dirac_default_qmat[s->wavelet_idx][level][2];
458  s->quant[level][3] = ff_dirac_default_qmat[s->wavelet_idx][level][3];
459  }
460  return;
461  }
462 
463  s->custom_quant_matrix = 1;
464 
465  if (s->quant_matrix == VC2_QM_DEF) {
466  for (level = 0; level < s->wavelet_depth; level++) {
467  for (orientation = 0; orientation < 4; orientation++) {
468  if (level <= 3)
469  s->quant[level][orientation] = ff_dirac_default_qmat[s->wavelet_idx][level][orientation];
470  else
471  s->quant[level][orientation] = vc2_qm_col_tab[level][orientation];
472  }
473  }
474  } else if (s->quant_matrix == VC2_QM_COL) {
475  for (level = 0; level < s->wavelet_depth; level++) {
476  for (orientation = 0; orientation < 4; orientation++) {
477  s->quant[level][orientation] = vc2_qm_col_tab[level][orientation];
478  }
479  }
480  } else {
481  for (level = 0; level < s->wavelet_depth; level++) {
482  for (orientation = 0; orientation < 4; orientation++) {
483  s->quant[level][orientation] = vc2_qm_flat_tab[level][orientation];
484  }
485  }
486  }
487 }
488 
489 /* VC-2 12.3.4.2 - quant_matrix() */
491 {
492  int level;
493  put_bits(&s->pb, 1, s->custom_quant_matrix);
494  if (s->custom_quant_matrix) {
495  put_vc2_ue_uint(&s->pb, s->quant[0][0]);
496  for (level = 0; level < s->wavelet_depth; level++) {
497  put_vc2_ue_uint(&s->pb, s->quant[level][1]);
498  put_vc2_ue_uint(&s->pb, s->quant[level][2]);
499  put_vc2_ue_uint(&s->pb, s->quant[level][3]);
500  }
501  }
502 }
503 
504 /* VC-2 12.3 - transform_parameters() */
506 {
507  put_vc2_ue_uint(&s->pb, s->wavelet_idx);
508  put_vc2_ue_uint(&s->pb, s->wavelet_depth);
509 
512 }
513 
514 /* VC-2 12.2 - wavelet_transform() */
516 {
518  align_put_bits(&s->pb);
519 }
520 
521 /* VC-2 12 - picture_parse() */
523 {
524  align_put_bits(&s->pb);
526  align_put_bits(&s->pb);
528 }
529 
530 #define QUANT(c, mul, add, shift) (((mul) * (c) + (add)) >> (shift))
531 
532 /* VC-2 13.5.5.2 - slice_band() */
533 static void encode_subband(const VC2EncContext *s, PutBitContext *pb,
534  int sx, int sy, const SubBand *b, int quant)
535 {
536  int x, y;
537 
538  const int left = b->width * (sx+0) / s->num_x;
539  const int right = b->width * (sx+1) / s->num_x;
540  const int top = b->height * (sy+0) / s->num_y;
541  const int bottom = b->height * (sy+1) / s->num_y;
542 
543  dwtcoef *coeff = b->buf + top * b->stride;
544  const uint64_t q_m = ((uint64_t)(s->qmagic_lut[quant][0])) << 2;
545  const uint64_t q_a = s->qmagic_lut[quant][1];
546  const int q_s = av_log2(ff_dirac_qscale_tab[quant]) + 32;
547 
548  for (y = top; y < bottom; y++) {
549  for (x = left; x < right; x++) {
550  uint32_t c_abs = QUANT(FFABS(coeff[x]), q_m, q_a, q_s);
551  put_vc2_ue_uint(pb, c_abs);
552  if (c_abs)
553  put_bits(pb, 1, coeff[x] < 0);
554  }
555  coeff += b->stride;
556  }
557 }
558 
559 static int count_hq_slice(SliceArgs *slice, int quant_idx)
560 {
561  int x, y;
562  uint8_t quants[MAX_DWT_LEVELS][4];
563  int bits = 0, p, level, orientation;
564  const VC2EncContext *s = slice->ctx;
565 
566  if (slice->cache[quant_idx])
567  return slice->cache[quant_idx];
568 
569  bits += 8*s->prefix_bytes;
570  bits += 8; /* quant_idx */
571 
572  for (level = 0; level < s->wavelet_depth; level++)
573  for (orientation = !!level; orientation < 4; orientation++)
574  quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
575 
576  for (p = 0; p < 3; p++) {
577  int bytes_start, bytes_len, pad_s, pad_c;
578  bytes_start = bits >> 3;
579  bits += 8;
580  for (level = 0; level < s->wavelet_depth; level++) {
581  for (orientation = !!level; orientation < 4; orientation++) {
582  const SubBand *b = &s->plane[p].band[level][orientation];
583 
584  const int q_idx = quants[level][orientation];
585  const uint64_t q_m = ((uint64_t)s->qmagic_lut[q_idx][0]) << 2;
586  const uint64_t q_a = s->qmagic_lut[q_idx][1];
587  const int q_s = av_log2(ff_dirac_qscale_tab[q_idx]) + 32;
588 
589  const int left = b->width * slice->x / s->num_x;
590  const int right = b->width *(slice->x+1) / s->num_x;
591  const int top = b->height * slice->y / s->num_y;
592  const int bottom = b->height *(slice->y+1) / s->num_y;
593 
594  dwtcoef *buf = b->buf + top * b->stride;
595 
596  for (y = top; y < bottom; y++) {
597  for (x = left; x < right; x++) {
598  uint32_t c_abs = QUANT(FFABS(buf[x]), q_m, q_a, q_s);
599  bits += count_vc2_ue_uint(c_abs);
600  bits += !!c_abs;
601  }
602  buf += b->stride;
603  }
604  }
605  }
606  bits += FFALIGN(bits, 8) - bits;
607  bytes_len = (bits >> 3) - bytes_start - 1;
608  pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
609  pad_c = (pad_s*s->size_scaler) - bytes_len;
610  bits += pad_c*8;
611  }
612 
613  slice->cache[quant_idx] = bits;
614 
615  return bits;
616 }
617 
618 /* Approaches the best possible quantizer asymptotically, its kinda exaustive
619  * but we have a LUT to get the coefficient size in bits. Guaranteed to never
620  * overshoot, which is apparently very important when streaming */
621 static int rate_control(AVCodecContext *avctx, void *arg)
622 {
623  SliceArgs *slice_dat = arg;
624  const VC2EncContext *s = slice_dat->ctx;
625  const int top = slice_dat->bits_ceil;
626  const int bottom = slice_dat->bits_floor;
627  int quant_buf[2] = {-1, -1};
628  int quant = slice_dat->quant_idx, step = 1;
629  int bits_last, bits = count_hq_slice(slice_dat, quant);
630  while ((bits > top) || (bits < bottom)) {
631  const int signed_step = bits > top ? +step : -step;
632  quant = av_clip(quant + signed_step, 0, s->q_ceil-1);
633  bits = count_hq_slice(slice_dat, quant);
634  if (quant_buf[1] == quant) {
635  quant = FFMAX(quant_buf[0], quant);
636  bits = quant == quant_buf[0] ? bits_last : bits;
637  break;
638  }
639  step = av_clip(step/2, 1, (s->q_ceil-1)/2);
640  quant_buf[1] = quant_buf[0];
641  quant_buf[0] = quant;
642  bits_last = bits;
643  }
644  slice_dat->quant_idx = av_clip(quant, 0, s->q_ceil-1);
645  slice_dat->bytes = SSIZE_ROUND(bits >> 3);
646  return 0;
647 }
648 
650 {
651  int i, j, slice_x, slice_y, bytes_left = 0;
652  int bytes_top[SLICE_REDIST_TOTAL] = {0};
653  int64_t total_bytes_needed = 0;
654  int slice_redist_range = FFMIN(SLICE_REDIST_TOTAL, s->num_x*s->num_y);
655  SliceArgs *enc_args = s->slice_args;
656  SliceArgs *top_loc[SLICE_REDIST_TOTAL] = {NULL};
657 
659 
660  for (slice_y = 0; slice_y < s->num_y; slice_y++) {
661  for (slice_x = 0; slice_x < s->num_x; slice_x++) {
662  SliceArgs *args = &enc_args[s->num_x*slice_y + slice_x];
663  args->ctx = s;
664  args->x = slice_x;
665  args->y = slice_y;
666  args->bits_ceil = s->slice_max_bytes << 3;
667  args->bits_floor = s->slice_min_bytes << 3;
668  memset(args->cache, 0, s->q_ceil*sizeof(*args->cache));
669  }
670  }
671 
672  /* First pass - determine baseline slice sizes w.r.t. max_slice_size */
673  s->avctx->execute(s->avctx, rate_control, enc_args, NULL, s->num_x*s->num_y,
674  sizeof(SliceArgs));
675 
676  for (i = 0; i < s->num_x*s->num_y; i++) {
677  SliceArgs *args = &enc_args[i];
678  bytes_left += args->bytes;
679  for (j = 0; j < slice_redist_range; j++) {
680  if (args->bytes > bytes_top[j]) {
681  bytes_top[j] = args->bytes;
682  top_loc[j] = args;
683  break;
684  }
685  }
686  }
687 
688  bytes_left = s->frame_max_bytes - bytes_left;
689 
690  /* Second pass - distribute leftover bytes */
691  while (bytes_left > 0) {
692  int distributed = 0;
693  for (i = 0; i < slice_redist_range; i++) {
694  SliceArgs *args;
695  int bits, bytes, diff, prev_bytes, new_idx;
696  if (bytes_left <= 0)
697  break;
698  if (!top_loc[i] || !top_loc[i]->quant_idx)
699  break;
700  args = top_loc[i];
701  prev_bytes = args->bytes;
702  new_idx = FFMAX(args->quant_idx - 1, 0);
703  bits = count_hq_slice(args, new_idx);
704  bytes = SSIZE_ROUND(bits >> 3);
705  diff = bytes - prev_bytes;
706  if ((bytes_left - diff) > 0) {
707  args->quant_idx = new_idx;
708  args->bytes = bytes;
709  bytes_left -= diff;
710  distributed++;
711  }
712  }
713  if (!distributed)
714  break;
715  }
716 
717  for (i = 0; i < s->num_x*s->num_y; i++) {
718  SliceArgs *args = &enc_args[i];
719  total_bytes_needed += args->bytes;
720  s->q_avg = (s->q_avg + args->quant_idx)/2;
721  }
722 
723  return total_bytes_needed;
724 }
725 
726 /* VC-2 13.5.3 - hq_slice */
727 static int encode_hq_slice(AVCodecContext *avctx, void *arg)
728 {
729  const SliceArgs *slice_dat = arg;
730  const VC2EncContext *s = slice_dat->ctx;
731  PutBitContext pb0, *const pb = &pb0;
732  const int slice_x = slice_dat->x;
733  const int slice_y = slice_dat->y;
734  const int quant_idx = slice_dat->quant_idx;
735  const int slice_bytes_max = slice_dat->bytes;
736  uint8_t quants[MAX_DWT_LEVELS][4];
737  int p, level, orientation;
738 
739  /* The reference decoder ignores it, and its typical length is 0 */
740  memset(slice_dat->buf, 0, s->prefix_bytes);
741 
742  init_put_bits(pb, slice_dat->buf + s->prefix_bytes, slice_dat->bytes - s->prefix_bytes);
743 
744  put_bits(pb, 8, quant_idx);
745 
746  /* Slice quantization (slice_quantizers() in the specs) */
747  for (level = 0; level < s->wavelet_depth; level++)
748  for (orientation = !!level; orientation < 4; orientation++)
749  quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
750 
751  /* Luma + 2 Chroma planes */
752  for (p = 0; p < 3; p++) {
753  int bytes_start, bytes_len, pad_s, pad_c;
754  bytes_start = put_bytes_count(pb, 0);
755  put_bits(pb, 8, 0);
756  for (level = 0; level < s->wavelet_depth; level++) {
757  for (orientation = !!level; orientation < 4; orientation++) {
758  encode_subband(s, pb, slice_x, slice_y,
759  &s->plane[p].band[level][orientation],
760  quants[level][orientation]);
761  }
762  }
763  flush_put_bits(pb);
764  bytes_len = put_bytes_output(pb) - bytes_start - 1;
765  if (p == 2) {
766  int len_diff = slice_bytes_max - put_bytes_output(pb);
767  pad_s = FFALIGN((bytes_len + len_diff), s->size_scaler)/s->size_scaler;
768  pad_c = (pad_s*s->size_scaler) - bytes_len;
769  } else {
770  pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
771  pad_c = (pad_s*s->size_scaler) - bytes_len;
772  }
773  pb->buf[bytes_start] = pad_s;
774  /* vc2-reference uses that padding that decodes to '0' coeffs */
775  memset(put_bits_ptr(pb), 0xFF, pad_c);
776  skip_put_bytes(pb, pad_c);
777  }
778 
779  return 0;
780 }
781 
782 /* VC-2 13.5.1 - low_delay_transform_data() */
784 {
785  uint8_t *buf;
786  int slice_x, slice_y, skip = 0;
787  SliceArgs *enc_args = s->slice_args;
788 
789  flush_put_bits(&s->pb);
790  buf = put_bits_ptr(&s->pb);
791 
792  for (slice_y = 0; slice_y < s->num_y; slice_y++) {
793  for (slice_x = 0; slice_x < s->num_x; slice_x++) {
794  SliceArgs *args = &enc_args[s->num_x*slice_y + slice_x];
795  args->buf = buf + skip;
796  skip += args->bytes;
797  }
798  }
799 
800  s->avctx->execute(s->avctx, encode_hq_slice, enc_args, NULL, s->num_x*s->num_y,
801  sizeof(SliceArgs));
802 
803  skip_put_bytes(&s->pb, skip);
804 
805  return 0;
806 }
807 
808 /*
809  * Transform basics for a 3 level transform
810  * |---------------------------------------------------------------------|
811  * | LL-0 | HL-0 | | |
812  * |--------|-------| HL-1 | |
813  * | LH-0 | HH-0 | | |
814  * |----------------|-----------------| HL-2 |
815  * | | | |
816  * | LH-1 | HH-1 | |
817  * | | | |
818  * |----------------------------------|----------------------------------|
819  * | | |
820  * | | |
821  * | | |
822  * | LH-2 | HH-2 |
823  * | | |
824  * | | |
825  * | | |
826  * |---------------------------------------------------------------------|
827  *
828  * DWT transforms are generally applied by splitting the image in two vertically
829  * and applying a low pass transform on the left part and a corresponding high
830  * pass transform on the right hand side. This is known as the horizontal filter
831  * stage.
832  * After that, the same operation is performed except the image is divided
833  * horizontally, with the high pass on the lower and the low pass on the higher
834  * side.
835  * Therefore, you're left with 4 subdivisions - known as low-low, low-high,
836  * high-low and high-high. They're referred to as orientations in the decoder
837  * and encoder.
838  *
839  * The LL (low-low) area contains the original image downsampled by the amount
840  * of levels. The rest of the areas can be thought as the details needed
841  * to restore the image perfectly to its original size.
842  */
843 static int dwt_plane(AVCodecContext *avctx, void *arg)
844 {
845  TransformArgs *transform_dat = arg;
846  const VC2EncContext *s = transform_dat->ctx;
847  const void *frame_data = transform_dat->idata;
848  const ptrdiff_t linesize = transform_dat->istride;
849  const int field = transform_dat->field;
850  const Plane *p = transform_dat->plane;
851  VC2TransformContext *t = &transform_dat->t;
852  dwtcoef *buf = p->coef_buf;
853  const int idx = s->wavelet_idx;
854  const int skip = 1 + s->interlaced;
855 
856  int x, y, level, offset;
857  ptrdiff_t pix_stride = linesize >> (s->bpp - 1);
858 
859  if (field == 1) {
860  offset = 0;
861  pix_stride <<= 1;
862  } else if (field == 2) {
863  offset = pix_stride;
864  pix_stride <<= 1;
865  } else {
866  offset = 0;
867  }
868 
869  if (s->bpp == 1) {
870  const uint8_t *pix = (const uint8_t *)frame_data + offset;
871  for (y = 0; y < p->height*skip; y+=skip) {
872  for (x = 0; x < p->width; x++) {
873  buf[x] = pix[x] - s->diff_offset;
874  }
875  memset(&buf[x], 0, (p->coef_stride - p->width)*sizeof(dwtcoef));
876  buf += p->coef_stride;
877  pix += pix_stride;
878  }
879  } else {
880  const uint16_t *pix = (const uint16_t *)frame_data + offset;
881  for (y = 0; y < p->height*skip; y+=skip) {
882  for (x = 0; x < p->width; x++) {
883  buf[x] = pix[x] - s->diff_offset;
884  }
885  memset(&buf[x], 0, (p->coef_stride - p->width)*sizeof(dwtcoef));
886  buf += p->coef_stride;
887  pix += pix_stride;
888  }
889  }
890 
891  memset(buf, 0, p->coef_stride * (p->dwt_height - p->height) * sizeof(dwtcoef));
892 
893  for (level = s->wavelet_depth-1; level >= 0; level--) {
894  const SubBand *b = &p->band[level][0];
895  t->vc2_subband_dwt[idx](t, p->coef_buf, p->coef_stride,
896  b->width, b->height);
897  }
898 
899  return 0;
900 }
901 
902 static int encode_frame(VC2EncContext *s, AVPacket *avpkt, const AVFrame *frame,
903  const char *aux_data, const int header_size, int field)
904 {
905  int i, ret;
906  int64_t max_frame_bytes;
907 
908  /* Threaded DWT transform */
909  for (i = 0; i < 3; i++) {
910  s->transform_args[i].ctx = s;
911  s->transform_args[i].field = field;
912  s->transform_args[i].plane = &s->plane[i];
913  s->transform_args[i].idata = frame->data[i];
914  s->transform_args[i].istride = frame->linesize[i];
915  }
916  s->avctx->execute(s->avctx, dwt_plane, s->transform_args, NULL, 3,
917  sizeof(TransformArgs));
918 
919  /* Calculate per-slice quantizers and sizes */
920  max_frame_bytes = header_size + calc_slice_sizes(s);
921 
922  if (field < 2) {
923  ret = ff_get_encode_buffer(s->avctx, avpkt,
924  max_frame_bytes << s->interlaced, 0);
925  if (ret < 0)
926  return ret;
927  init_put_bits(&s->pb, avpkt->data, avpkt->size);
928  }
929 
930  /* Sequence header */
933 
934  /* Encoder version */
935  if (aux_data) {
937  ff_put_string(&s->pb, aux_data, 1);
938  }
939 
940  /* Picture header */
943 
944  /* Encode slices */
945  encode_slices(s);
946 
947  /* End sequence */
949 
950  return 0;
951 }
952 
954  const AVFrame *frame, int *got_packet)
955 {
956  int ret = 0;
957  int slice_ceil, sig_size = 256;
958  VC2EncContext *s = avctx->priv_data;
959  const int bitexact = avctx->flags & AV_CODEC_FLAG_BITEXACT;
960  const char *aux_data = bitexact ? "Lavc" : LIBAVCODEC_IDENT;
961  const int aux_data_size = bitexact ? sizeof("Lavc") : sizeof(LIBAVCODEC_IDENT);
962  const int header_size = 100 + aux_data_size;
963  int64_t r_bitrate = avctx->bit_rate >> (s->interlaced);
964 
965  s->avctx = avctx;
966  s->size_scaler = 2;
967  s->prefix_bytes = 0;
968  s->last_parse_code = 0;
969  s->next_parse_offset = 0;
970 
971  /* Rate control */
972  s->frame_max_bytes = (av_rescale(r_bitrate, s->avctx->time_base.num,
973  s->avctx->time_base.den) >> 3) - header_size;
974  s->slice_max_bytes = slice_ceil = av_rescale(s->frame_max_bytes, 1, s->num_x*s->num_y);
975 
976  /* Find an appropriate size scaler */
977  while (sig_size > 255) {
978  int r_size = SSIZE_ROUND(s->slice_max_bytes);
979  if (r_size > slice_ceil) {
980  s->slice_max_bytes -= r_size - slice_ceil;
981  r_size = SSIZE_ROUND(s->slice_max_bytes);
982  }
983  sig_size = r_size/s->size_scaler; /* Signalled slize size */
984  s->size_scaler <<= 1;
985  }
986 
987  s->slice_min_bytes = s->slice_max_bytes - s->slice_max_bytes*(s->tolerance/100.0f);
988  if (s->slice_min_bytes < 0)
989  return AVERROR(EINVAL);
990 
991  ret = encode_frame(s, avpkt, frame, aux_data, header_size, s->interlaced);
992  if (ret)
993  return ret;
994  if (s->interlaced) {
995  ret = encode_frame(s, avpkt, frame, aux_data, header_size, 2);
996  if (ret)
997  return ret;
998  }
999 
1000  flush_put_bits(&s->pb);
1001  av_shrink_packet(avpkt, put_bytes_output(&s->pb));
1002 
1003  *got_packet = 1;
1004 
1005  return 0;
1006 }
1007 
1009 {
1010  int i;
1011  VC2EncContext *s = avctx->priv_data;
1012 
1013  av_log(avctx, AV_LOG_INFO, "Qavg: %i\n", s->q_avg);
1014 
1015  for (i = 0; i < 3; i++) {
1016  ff_vc2enc_free_transforms(&s->transform_args[i].t);
1017  av_freep(&s->plane[i].coef_buf);
1018  }
1019 
1020  av_freep(&s->slice_args);
1021 
1022  return 0;
1023 }
1024 
1026 {
1027  Plane *p;
1028  SubBand *b;
1029  int i, level, o, shift;
1030  const AVPixFmtDescriptor *pixdesc;
1031  int depth;
1032  VC2EncContext *s = avctx->priv_data;
1033 
1034  s->picture_number = 0;
1035 
1036  /* Total allowed quantization range */
1037  s->q_ceil = DIRAC_MAX_QUANT_INDEX;
1038 
1039  s->ver.major = 2;
1040  s->ver.minor = 0;
1041  s->profile = 3;
1042  s->level = 3;
1043 
1044  s->base_vf = -1;
1045  s->strict_compliance = 1;
1046 
1047  s->q_avg = 0;
1048  s->slice_max_bytes = 0;
1049  s->slice_min_bytes = 0;
1050 
1051  /* Mark unknown as progressive */
1052  s->interlaced = !((avctx->field_order == AV_FIELD_UNKNOWN) ||
1053  (avctx->field_order == AV_FIELD_PROGRESSIVE));
1054 
1055  for (i = 0; i < base_video_fmts_len; i++) {
1056  const VC2BaseVideoFormat *fmt = &base_video_fmts[i];
1057  if (avctx->pix_fmt != fmt->pix_fmt)
1058  continue;
1059  if (avctx->time_base.num != fmt->time_base.num)
1060  continue;
1061  if (avctx->time_base.den != fmt->time_base.den)
1062  continue;
1063  if (avctx->width != fmt->width)
1064  continue;
1065  if (avctx->height != fmt->height)
1066  continue;
1067  if (s->interlaced != fmt->interlaced)
1068  continue;
1069  s->base_vf = i;
1070  s->level = base_video_fmts[i].level;
1071  break;
1072  }
1073 
1074  if (s->interlaced)
1075  av_log(avctx, AV_LOG_WARNING, "Interlacing enabled!\n");
1076 
1077  if ((s->slice_width & (s->slice_width - 1)) ||
1078  (s->slice_height & (s->slice_height - 1))) {
1079  av_log(avctx, AV_LOG_ERROR, "Slice size is not a power of two!\n");
1080  return AVERROR_UNKNOWN;
1081  }
1082 
1083  if ((s->slice_width > avctx->width) ||
1084  (s->slice_height > avctx->height)) {
1085  av_log(avctx, AV_LOG_ERROR, "Slice size is bigger than the image!\n");
1086  return AVERROR_UNKNOWN;
1087  }
1088 
1089  if (s->base_vf <= 0) {
1091  s->strict_compliance = s->base_vf = 0;
1092  av_log(avctx, AV_LOG_WARNING, "Format does not strictly comply with VC2 specs\n");
1093  } else {
1094  av_log(avctx, AV_LOG_WARNING, "Given format does not strictly comply with "
1095  "the specifications, decrease strictness to use it.\n");
1096  return AVERROR_UNKNOWN;
1097  }
1098  } else {
1099  av_log(avctx, AV_LOG_INFO, "Selected base video format = %i (%s)\n",
1100  s->base_vf, base_video_fmts[s->base_vf].name);
1101  }
1102 
1103  pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
1104  /* Chroma subsampling */
1105  s->chroma_x_shift = pixdesc->log2_chroma_w;
1106  s->chroma_y_shift = pixdesc->log2_chroma_h;
1107 
1108  /* Bit depth and color range index */
1109  depth = pixdesc->comp[0].depth;
1110  if (depth == 8 && avctx->color_range == AVCOL_RANGE_JPEG) {
1111  s->bpp = 1;
1112  s->bpp_idx = 1;
1113  s->diff_offset = 128;
1114  } else if (depth == 8 && (avctx->color_range == AVCOL_RANGE_MPEG ||
1115  avctx->color_range == AVCOL_RANGE_UNSPECIFIED)) {
1116  s->bpp = 1;
1117  s->bpp_idx = 2;
1118  s->diff_offset = 128;
1119  } else if (depth == 10) {
1120  s->bpp = 2;
1121  s->bpp_idx = 3;
1122  s->diff_offset = 512;
1123  } else {
1124  s->bpp = 2;
1125  s->bpp_idx = 4;
1126  s->diff_offset = 2048;
1127  }
1128 
1129  /* Planes initialization */
1130  for (i = 0; i < 3; i++) {
1131  int w, h;
1132  p = &s->plane[i];
1133  p->width = avctx->width >> (i ? s->chroma_x_shift : 0);
1134  p->height = avctx->height >> (i ? s->chroma_y_shift : 0);
1135  if (s->interlaced)
1136  p->height >>= 1;
1137  p->dwt_width = w = FFALIGN(p->width, (1 << s->wavelet_depth));
1138  p->dwt_height = h = FFALIGN(p->height, (1 << s->wavelet_depth));
1139  p->coef_stride = FFALIGN(p->dwt_width, 32);
1140  p->coef_buf = av_mallocz(p->coef_stride*p->dwt_height*sizeof(dwtcoef));
1141  if (!p->coef_buf)
1142  return AVERROR(ENOMEM);
1143  for (level = s->wavelet_depth-1; level >= 0; level--) {
1144  w = w >> 1;
1145  h = h >> 1;
1146  for (o = 0; o < 4; o++) {
1147  b = &p->band[level][o];
1148  b->width = w;
1149  b->height = h;
1150  b->stride = p->coef_stride;
1151  shift = (o > 1)*b->height*b->stride + (o & 1)*b->width;
1152  b->buf = p->coef_buf + shift;
1153  }
1154  }
1155 
1156  /* DWT init */
1157  if (ff_vc2enc_init_transforms(&s->transform_args[i].t,
1158  s->plane[i].coef_stride,
1159  s->plane[i].dwt_height,
1160  s->slice_width, s->slice_height))
1161  return AVERROR(ENOMEM);
1162  }
1163 
1164  /* Slices */
1165  s->num_x = s->plane[0].dwt_width/s->slice_width;
1166  s->num_y = s->plane[0].dwt_height/s->slice_height;
1167 
1168  s->slice_args = av_calloc(s->num_x*s->num_y, sizeof(SliceArgs));
1169  if (!s->slice_args)
1170  return AVERROR(ENOMEM);
1171 
1172  for (i = 0; i < 116; i++) {
1173  const uint64_t qf = ff_dirac_qscale_tab[i];
1174  const uint32_t m = av_log2(qf);
1175  const uint32_t t = (1ULL << (m + 32)) / qf;
1176  const uint32_t r = (t*qf + qf) & UINT32_MAX;
1177  if (!(qf & (qf - 1))) {
1178  s->qmagic_lut[i][0] = 0xFFFFFFFF;
1179  s->qmagic_lut[i][1] = 0xFFFFFFFF;
1180  } else if (r <= 1 << m) {
1181  s->qmagic_lut[i][0] = t + 1;
1182  s->qmagic_lut[i][1] = 0;
1183  } else {
1184  s->qmagic_lut[i][0] = t;
1185  s->qmagic_lut[i][1] = t;
1186  }
1187  }
1188 
1189  return 0;
1190 }
1191 
1192 #define VC2ENC_FLAGS (AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
1193 static const AVOption vc2enc_options[] = {
1194  {"tolerance", "Max undershoot in percent", offsetof(VC2EncContext, tolerance), AV_OPT_TYPE_DOUBLE, {.dbl = 5.0f}, 0.0f, 45.0f, VC2ENC_FLAGS, .unit = "tolerance"},
1195  {"slice_width", "Slice width", offsetof(VC2EncContext, slice_width), AV_OPT_TYPE_INT, {.i64 = 32}, 32, 1024, VC2ENC_FLAGS, .unit = "slice_width"},
1196  {"slice_height", "Slice height", offsetof(VC2EncContext, slice_height), AV_OPT_TYPE_INT, {.i64 = 16}, 8, 1024, VC2ENC_FLAGS, .unit = "slice_height"},
1197  {"wavelet_depth", "Transform depth", offsetof(VC2EncContext, wavelet_depth), AV_OPT_TYPE_INT, {.i64 = 4}, 1, 5, VC2ENC_FLAGS, .unit = "wavelet_depth"},
1198  {"wavelet_type", "Transform type", offsetof(VC2EncContext, wavelet_idx), AV_OPT_TYPE_INT, {.i64 = VC2_TRANSFORM_9_7}, 0, VC2_TRANSFORMS_NB, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1199  {"9_7", "Deslauriers-Dubuc (9,7)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_9_7}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1200  {"5_3", "LeGall (5,3)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_5_3}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1201  {"haar", "Haar (with shift)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_HAAR_S}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1202  {"haar_noshift", "Haar (without shift)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_HAAR}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1203  {"qm", "Custom quantization matrix", offsetof(VC2EncContext, quant_matrix), AV_OPT_TYPE_INT, {.i64 = VC2_QM_DEF}, 0, VC2_QM_NB, VC2ENC_FLAGS, .unit = "quant_matrix"},
1204  {"default", "Default from the specifications", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_DEF}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "quant_matrix"},
1205  {"color", "Prevents low bitrate discoloration", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_COL}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "quant_matrix"},
1206  {"flat", "Optimize for PSNR", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_FLAT}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "quant_matrix"},
1207  {NULL}
1208 };
1209 
1210 static const AVClass vc2enc_class = {
1211  .class_name = "SMPTE VC-2 encoder",
1212  .category = AV_CLASS_CATEGORY_ENCODER,
1213  .option = vc2enc_options,
1214  .item_name = av_default_item_name,
1215  .version = LIBAVUTIL_VERSION_INT
1216 };
1217 
1219  { "b", "600000000" },
1220  { NULL },
1221 };
1222 
1223 static const enum AVPixelFormat allowed_pix_fmts[] = {
1228 };
1229 
1231  .p.name = "vc2",
1232  CODEC_LONG_NAME("SMPTE VC-2"),
1233  .p.type = AVMEDIA_TYPE_VIDEO,
1234  .p.id = AV_CODEC_ID_DIRAC,
1235  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS |
1237  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1238  .priv_data_size = sizeof(VC2EncContext),
1239  .init = vc2_encode_init,
1240  .close = vc2_encode_end,
1242  .p.priv_class = &vc2enc_class,
1243  .defaults = vc2enc_defaults,
1244  .p.pix_fmts = allowed_pix_fmts
1245 };
init_quant_matrix
static void init_quant_matrix(VC2EncContext *s)
Definition: vc2enc.c:448
vc2_qm_col_tab
static const uint8_t vc2_qm_col_tab[][4]
Definition: vc2enc.c:432
SliceArgs::bits_floor
int bits_floor
Definition: vc2enc.c:117
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
VC2EncContext::slice_args
SliceArgs * slice_args
Definition: vc2enc.c:137
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
vc2_qm_flat_tab
static const uint8_t vc2_qm_flat_tab[][4]
Definition: vc2enc.c:440
DiracVersionInfo
Definition: dirac.h:80
put_bits32
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
Definition: put_bits.h:291
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:43
VC2_TRANSFORM_9_7
@ VC2_TRANSFORM_9_7
Definition: vc2enc_dwt.h:31
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
Plane::coef_buf
dwtcoef * coef_buf
Definition: vc2enc.c:99
align_put_bits
static void align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: put_bits.h:420
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:89
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:201
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:589
MAX_DWT_LEVELS
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
Definition: dirac.h:49
dwtcoef
int32_t dwtcoef
Definition: vc2enc_dwt.h:28
AV_CODEC_ID_DIRAC
@ AV_CODEC_ID_DIRAC
Definition: codec_id.h:168
VC2EncContext::chroma_x_shift
int chroma_x_shift
Definition: vc2enc.c:164
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
TransformArgs::plane
Plane * plane
Definition: vc2enc.c:123
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
pixdesc.h
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:678
VC2EncContext::q_avg
int q_avg
Definition: vc2enc.c:172
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
AVCOL_SPC_YCOCG
@ AVCOL_SPC_YCOCG
Definition: pixfmt.h:619
AVPacket::data
uint8_t * data
Definition: packet.h:520
VC2EncContext::diff_offset
int diff_offset
Definition: vc2enc.c:141
SubBand::width
int width
Definition: cfhd.h:111
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
SliceArgs::y
int y
Definition: vc2enc.c:114
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:708
AVOption
AVOption.
Definition: opt.h:357
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
SliceArgs::bytes
int bytes
Definition: vc2enc.c:118
encode_scan_format
static void encode_scan_format(VC2EncContext *s)
Definition: vc2enc.c:302
encode_slices
static int encode_slices(VC2EncContext *s)
Definition: vc2enc.c:783
rate_control
static int rate_control(AVCodecContext *avctx, void *arg)
Definition: vc2enc.c:621
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: defs.h:59
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
VC2EncContext::pb
PutBitContext pb
Definition: vc2enc.c:132
ff_vc2enc_free_transforms
av_cold void ff_vc2enc_free_transforms(VC2TransformContext *s)
Definition: vc2enc_dwt.c:276
FFCodec
Definition: codec_internal.h:126
version.h
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:610
base_video_fmts
static const VC2BaseVideoFormat base_video_fmts[]
Definition: vc2enc.c:49
encode_picture_header
static void encode_picture_header(VC2EncContext *s)
Definition: vc2enc.c:416
encode_slice_params
static void encode_slice_params(VC2EncContext *s)
Definition: vc2enc.c:423
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
vc2_encode_frame
static av_cold int vc2_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet)
Definition: vc2enc.c:953
VC2TransformContext::vc2_subband_dwt
void(* vc2_subband_dwt[VC2_TRANSFORMS_NB])(struct VC2TransformContext *t, dwtcoef *data, ptrdiff_t stride, int width, int height)
Definition: vc2enc_dwt.h:45
VC2EncContext::qmagic_lut
uint32_t qmagic_lut[116][2]
Definition: vc2enc.c:158
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
ff_dirac_qscale_tab
const int32_t ff_dirac_qscale_tab[116]
Definition: diractab.c:34
VC2_QM_NB
@ VC2_QM_NB
Definition: vc2enc.c:87
VC2EncContext::slice_min_bytes
int slice_min_bytes
Definition: vc2enc.c:170
encode_clean_area
static void encode_clean_area(VC2EncContext *s)
Definition: vc2enc.c:334
encode_frame_size
static void encode_frame_size(VC2EncContext *s)
Definition: vc2enc.c:275
encode_quant_matrix
static void encode_quant_matrix(VC2EncContext *s)
Definition: vc2enc.c:490
diractab.h
ff_dirac_default_qmat
const uint8_t ff_dirac_default_qmat[7][4][4]
Definition: diractab.c:24
VC2EncContext::prefix_bytes
int prefix_bytes
Definition: vc2enc.c:162
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
VC2EncContext::custom_quant_matrix
int custom_quant_matrix
Definition: vc2enc.c:155
encode_color_spec
static void encode_color_spec(VC2EncContext *s)
Definition: vc2enc.c:348
count_hq_slice
static int count_hq_slice(SliceArgs *slice, int quant_idx)
Definition: vc2enc.c:559
VC2BaseVideoFormat
Definition: vc2enc.c:41
FFCodecDefault
Definition: codec_internal.h:96
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
VC2_TRANSFORMS_NB
@ VC2_TRANSFORMS_NB
Definition: vc2enc_dwt.h:39
av_shrink_packet
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
Definition: packet.c:113
TransformArgs
Definition: vc2enc.c:121
VC2_QM_FLAT
@ VC2_QM_FLAT
Definition: vc2enc.c:85
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
val
static double val(void *priv, double ch)
Definition: aeval.c:78
TransformArgs::ctx
const struct VC2EncContext * ctx
Definition: vc2enc.c:122
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:295
AVRational::num
int num
Numerator.
Definition: rational.h:59
SliceArgs::x
int x
Definition: vc2enc.c:113
dirac.h
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:671
SSIZE_ROUND
#define SSIZE_ROUND(b)
Definition: vc2enc.c:36
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
VC2EncContext::next_parse_offset
uint32_t next_parse_offset
Definition: vc2enc.c:185
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
VC2EncContext::tolerance
double tolerance
Definition: vc2enc.c:175
av_cold
#define av_cold
Definition: attributes.h:90
ff_vc2enc_init_transforms
av_cold int ff_vc2enc_init_transforms(VC2TransformContext *s, int p_stride, int p_height, int slice_w, int slice_h)
Definition: vc2enc_dwt.c:257
DiracParseCodes
DiracParseCodes
Parse code values:
Definition: dirac.h:61
VC2EncContext::slice_width
int slice_width
Definition: vc2enc.c:180
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:200
SliceArgs::ctx
const struct VC2EncContext * ctx
Definition: vc2enc.c:108
vc2enc_class
static const AVClass vc2enc_class
Definition: vc2enc.c:1210
s
#define s(width, name)
Definition: cbs_vp9.c:198
SliceArgs::buf
uint8_t * buf
Definition: vc2enc.c:111
AVCOL_TRC_BT1361_ECG
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
Definition: pixfmt.h:593
TransformArgs::t
VC2TransformContext t
Definition: vc2enc.c:127
VC2EncContext::quant_matrix
enum VC2_QM quant_matrix
Definition: vc2enc.c:182
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:247
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
bits
uint8_t bits
Definition: vp3data.h:128
encode_frame
static int encode_frame(VC2EncContext *s, AVPacket *avpkt, const AVFrame *frame, const char *aux_data, const int header_size, int field)
Definition: vc2enc.c:902
encode_transform_params
static void encode_transform_params(VC2EncContext *s)
Definition: vc2enc.c:505
VC2EncContext::chroma_y_shift
int chroma_y_shift
Definition: vc2enc.c:165
dwt_plane
static int dwt_plane(AVCodecContext *avctx, void *arg)
Definition: vc2enc.c:843
vc2enc_dwt.h
VC2EncContext::wavelet_idx
int wavelet_idx
Definition: vc2enc.c:176
ff_put_string
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
Definition: bitstream.c:39
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
VC2EncContext::slice_max_bytes
int slice_max_bytes
Definition: vc2enc.c:169
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
Definition: pixfmt.h:564
PutBitContext
Definition: put_bits.h:50
VC2EncContext::avctx
AVCodecContext * avctx
Definition: vc2enc.c:134
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:562
arg
const char * arg
Definition: jacosubdec.c:67
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:563
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
VC2_QM
VC2_QM
Definition: vc2enc.c:82
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
encode_frame_rate
static void encode_frame_rate(VC2EncContext *s)
Definition: vc2enc.c:310
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:53
VC2BaseVideoFormat::interlaced
uint8_t interlaced
Definition: vc2enc.c:45
NULL
#define NULL
Definition: coverity.c:32
VC2BaseVideoFormat::pix_fmt
enum AVPixelFormat pix_fmt
Definition: vc2enc.c:42
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
VC2EncContext::quant
uint8_t quant[MAX_DWT_LEVELS][4]
Definition: vc2enc.c:154
Plane::dwt_width
int dwt_width
Definition: vc2enc.c:102
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
VC2EncContext::size_scaler
int size_scaler
Definition: vc2enc.c:163
encode_wavelet_transform
static void encode_wavelet_transform(VC2EncContext *s)
Definition: vc2enc.c:515
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
vc2enc_options
static const AVOption vc2enc_options[]
Definition: vc2enc.c:1193
base_video_fmts_len
static const int base_video_fmts_len
Definition: vc2enc.c:80
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
encode_aspect_ratio
static void encode_aspect_ratio(VC2EncContext *s)
Definition: vc2enc.c:322
SliceArgs::quant_idx
int quant_idx
Definition: vc2enc.c:115
VC2BaseVideoFormat::height
int height
Definition: vc2enc.c:44
ff_log2
#define ff_log2
Definition: intmath.h:51
VC2_QM_COL
@ VC2_QM_COL
Definition: vc2enc.c:84
VC2EncContext::av_class
AVClass * av_class
Definition: vc2enc.c:131
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
DIRAC_MAX_QUANT_INDEX
#define DIRAC_MAX_QUANT_INDEX
Definition: diractab.h:41
DIRAC_PCODE_AUX
@ DIRAC_PCODE_AUX
Definition: dirac.h:64
SliceArgs::bits_ceil
int bits_ceil
Definition: vc2enc.c:116
VC2BaseVideoFormat::name
char name[13]
Definition: vc2enc.c:46
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:652
allowed_pix_fmts
static enum AVPixelFormat allowed_pix_fmts[]
Definition: vc2enc.c:1223
encode_hq_slice
static int encode_hq_slice(AVCodecContext *avctx, void *arg)
Definition: vc2enc.c:727
VC2EncContext::interlaced
int interlaced
Definition: vc2enc.c:181
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:413
VC2EncContext::picture_number
uint32_t picture_number
Definition: vc2enc.c:146
VC2EncContext::last_parse_code
enum DiracParseCodes last_parse_code
Definition: vc2enc.c:186
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:544
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:521
VC2TransformContext
Definition: vc2enc_dwt.h:42
VC2BaseVideoFormat::time_base
AVRational time_base
Definition: vc2enc.c:43
SubBand::stride
ptrdiff_t stride
Definition: cfhd.h:109
codec_internal.h
Plane::height
int height
Definition: cfhd.h:119
shift
static int shift(int a, int b)
Definition: bonk.c:261
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
vc2_encode_init
static av_cold int vc2_encode_init(AVCodecContext *avctx)
Definition: vc2enc.c:1025
VC2EncContext::level
int level
Definition: vc2enc.c:150
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
SubBand
Definition: cfhd.h:108
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:452
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
VC2EncContext::bpp
int bpp
Definition: vc2enc.c:142
encode_source_params
static void encode_source_params(VC2EncContext *s)
Definition: vc2enc.c:393
Plane::width
int width
Definition: cfhd.h:118
VC2EncContext::ver
DiracVersionInfo ver
Definition: vc2enc.c:135
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
encode_picture_start
static void encode_picture_start(VC2EncContext *s)
Definition: vc2enc.c:522
DIRAC_PCODE_SEQ_HEADER
@ DIRAC_PCODE_SEQ_HEADER
Definition: dirac.h:62
Plane::coef_stride
ptrdiff_t coef_stride
Definition: vc2enc.c:104
VC2EncContext::num_y
int num_y
Definition: vc2enc.c:161
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
encode_parse_params
static void encode_parse_params(VC2EncContext *s)
Definition: vc2enc.c:266
SliceArgs
Definition: vc2enc.c:107
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
VC2EncContext::wavelet_depth
int wavelet_depth
Definition: vc2enc.c:177
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:669
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
encode_parse_info
static void encode_parse_info(VC2EncContext *s, enum DiracParseCodes pcode)
Definition: vc2enc.c:234
ff_vc2_encoder
const FFCodec ff_vc2_encoder
Definition: vc2enc.c:1230
vc2_encode_end
static av_cold int vc2_encode_end(AVCodecContext *avctx)
Definition: vc2enc.c:1008
version.h
SubBand::buf
dwtcoef * buf
Definition: vc2enc.c:91
vc2enc_defaults
static const FFCodecDefault vc2enc_defaults[]
Definition: vc2enc.c:1218
ret
ret
Definition: filter_design.txt:187
VC2EncContext::transform_args
TransformArgs transform_args[3]
Definition: vc2enc.c:138
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
VC2EncContext
Definition: vc2enc.c:130
VC2EncContext::num_x
int num_x
Definition: vc2enc.c:160
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1379
VC2EncContext::q_ceil
int q_ceil
Definition: vc2enc.c:171
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
VC2EncContext::profile
int profile
Definition: vc2enc.c:151
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
DIRAC_PCODE_END_SEQ
@ DIRAC_PCODE_END_SEQ
Definition: dirac.h:63
AVCodecContext
main external API structure.
Definition: avcodec.h:445
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
SliceArgs::cache
int cache[DIRAC_MAX_QUANT_INDEX]
Definition: vc2enc.c:110
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:106
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:245
encode_seq_header
static void encode_seq_header(VC2EncContext *s)
Definition: vc2enc.c:406
skip_put_bytes
static void skip_put_bytes(PutBitContext *s, int n)
Skip the given number of bytes.
Definition: put_bits.h:386
encode_subband
static void encode_subband(const VC2EncContext *s, PutBitContext *pb, int sx, int sy, const SubBand *b, int quant)
Definition: vc2enc.c:533
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
VC2_TRANSFORM_HAAR
@ VC2_TRANSFORM_HAAR
Definition: vc2enc_dwt.h:34
VC2ENC_FLAGS
#define VC2ENC_FLAGS
Definition: vc2enc.c:1192
Plane
Definition: cfhd.h:117
VC2EncContext::strict_compliance
int strict_compliance
Definition: vc2enc.c:178
put_vc2_ue_uint
static av_always_inline void put_vc2_ue_uint(PutBitContext *pb, uint32_t val)
Definition: vc2enc.c:189
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
QUANT
#define QUANT(c, mul, add, shift)
Definition: vc2enc.c:530
Plane::dwt_height
int dwt_height
Definition: vc2enc.c:103
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
count_vc2_ue_uint
static av_always_inline int count_vc2_ue_uint(uint32_t val)
Definition: vc2enc.c:217
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:342
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
TransformArgs::field
int field
Definition: vc2enc.c:126
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
calc_slice_sizes
static int calc_slice_sizes(VC2EncContext *s)
Definition: vc2enc.c:649
VC2EncContext::base_vf
int base_vf
Definition: vc2enc.c:149
VC2EncContext::slice_height
int slice_height
Definition: vc2enc.c:179
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:497
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
VC2EncContext::plane
Plane plane[3]
Definition: vc2enc.c:133
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
VC2_QM_DEF
@ VC2_QM_DEF
Definition: vc2enc.c:83
AV_CLASS_CATEGORY_ENCODER
@ AV_CLASS_CATEGORY_ENCODER
Definition: log.h:34
h
h
Definition: vp9dsp_template.c:2038
DIRAC_PCODE_PICTURE_HQ
@ DIRAC_PCODE_PICTURE_HQ
Definition: dirac.h:69
TransformArgs::istride
ptrdiff_t istride
Definition: vc2enc.c:125
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:254
SLICE_REDIST_TOTAL
#define SLICE_REDIST_TOTAL
Definition: vc2enc.c:39
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
VC2_TRANSFORM_HAAR_S
@ VC2_TRANSFORM_HAAR_S
Definition: vc2enc_dwt.h:35
encode_sample_fmt
static void encode_sample_fmt(VC2EncContext *s)
Definition: vc2enc.c:286
VC2EncContext::frame_max_bytes
int frame_max_bytes
Definition: vc2enc.c:168
VC2BaseVideoFormat::width
int width
Definition: vc2enc.c:44
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
VC2_TRANSFORM_5_3
@ VC2_TRANSFORM_5_3
Definition: vc2enc_dwt.h:32
encode_signal_range
static void encode_signal_range(VC2EncContext *s)
Definition: vc2enc.c:340
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
TransformArgs::idata
const void * idata
Definition: vc2enc.c:124
SubBand::height
int height
Definition: cfhd.h:113
VC2BaseVideoFormat::level
uint8_t level
Definition: vc2enc.c:45
VC2EncContext::bpp_idx
int bpp_idx
Definition: vc2enc.c:143