FFmpeg
mv30.c
Go to the documentation of this file.
1 /*
2  * MidiVid MV30 decoder
3  *
4  * Copyright (c) 2020 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stddef.h>
24 #include <string.h>
25 
26 #include "libavutil/mem.h"
27 #include "libavutil/thread.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "codec_internal.h"
32 #include "copy_block.h"
33 #include "decode.h"
34 #include "mathops.h"
35 #include "blockdsp.h"
36 #include "get_bits.h"
37 #include "aandcttab.h"
38 
39 #define CBP_VLC_BITS 9
40 
41 typedef struct MV30Context {
43 
46  int is_inter;
47  int mode_size;
49 
50  int block[6][64];
51  int16_t *mvectors;
52  unsigned int mvectors_size;
53  int16_t *coeffs;
54  unsigned int coeffs_size;
55 
56  int16_t intraq_tab[2][64];
57  int16_t interq_tab[2][64];
58 
61 } MV30Context;
62 
64 
65 static const uint8_t luma_tab[] = {
66  12, 12, 15, 19, 25, 34, 40, 48,
67  12, 12, 18, 22, 27, 44, 47, 46,
68  17, 18, 21, 26, 35, 46, 52, 47,
69  18, 20, 24, 28, 40, 61, 59, 51,
70  20, 24, 32, 43, 50, 72, 72, 63,
71  25, 31, 42, 48, 58, 72, 81, 75,
72  38, 46, 54, 61, 71, 84, 88, 85,
73  50, 61, 65, 68, 79, 78, 86, 91,
74 };
75 
76 static const uint8_t chroma_tab[] = {
77  12, 16, 24, 47, 99, 99, 99, 99,
78  16, 21, 26, 66, 99, 99, 99, 99,
79  24, 26, 56, 99, 99, 99, 99, 99,
80  47, 66, 99, 99, 99, 99, 99, 99,
81  99, 99, 99, 99, 99, 99, 99, 99,
82  99, 99, 99, 99, 99, 99, 99, 99,
83  99, 99, 99, 99, 99, 99, 99, 99,
84  99, 99, 99, 99, 99, 99, 99, 99,
85 };
86 
87 static const uint8_t zigzag[] = {
88  0, 1, 8, 9, 16, 2, 3, 10,
89  17, 24, 32, 25, 18, 11, 4, 5,
90  12, 19, 26, 33, 40, 48, 41, 34,
91  27, 20, 13, 6, 7, 14, 21, 28,
92  35, 42, 49, 56, 57, 50, 43, 36,
93  29, 22, 15, 23, 30, 37, 44, 51,
94  58, 59, 52, 45, 38, 31, 39, 46,
95  53, 60, 61, 54, 47, 55, 62, 63,
96 };
97 
98 static void get_qtable(int16_t *table, int quant, const uint8_t *quant_tab)
99 {
100  int factor = quant < 50 ? 5000 / FFMAX(quant, 1) : 200 - FFMIN(quant, 100) * 2;
101 
102  for (int i = 0; i < 64; i++) {
103  table[i] = av_clip((quant_tab[i] * factor + 0x32) / 100, 1, 0x7fff);
104  table[i] = ((int)ff_aanscales[i] * (int)table[i] + 0x800) >> 12;
105  }
106 }
107 
108 static inline void idct_1d(unsigned *blk, int step)
109 {
110  const unsigned t0 = blk[0 * step] + blk[4 * step];
111  const unsigned t1 = blk[0 * step] - blk[4 * step];
112  const unsigned t2 = blk[2 * step] + blk[6 * step];
113  const unsigned t3 = ((int)((blk[2 * step] - blk[6 * step]) * 362U) >> 8) - t2;
114  const unsigned t4 = t0 + t2;
115  const unsigned t5 = t0 - t2;
116  const unsigned t6 = t1 + t3;
117  const unsigned t7 = t1 - t3;
118  const unsigned t8 = blk[5 * step] + blk[3 * step];
119  const unsigned t9 = blk[5 * step] - blk[3 * step];
120  const unsigned tA = blk[1 * step] + blk[7 * step];
121  const unsigned tB = blk[1 * step] - blk[7 * step];
122  const unsigned tC = t8 + tA;
123  const unsigned tD = (int)((tB + t9) * 473U) >> 8;
124  const unsigned tE = (((int)(t9 * -669U) >> 8) - tC) + tD;
125  const unsigned tF = ((int)((tA - t8) * 362U) >> 8) - tE;
126  const unsigned t10 = (((int)(tB * 277U) >> 8) - tD) + tF;
127 
128  blk[0 * step] = t4 + tC;
129  blk[1 * step] = t6 + tE;
130  blk[2 * step] = t7 + tF;
131  blk[3 * step] = t5 - t10;
132  blk[4 * step] = t5 + t10;
133  blk[5 * step] = t7 - tF;
134  blk[6 * step] = t6 - tE;
135  blk[7 * step] = t4 - tC;
136 }
137 
138 static void idct_put(uint8_t *dst, int stride, int *block)
139 {
140  for (int i = 0; i < 8; i++) {
141  if ((block[0x08 + i] |
142  block[0x10 + i] |
143  block[0x18 + i] |
144  block[0x20 + i] |
145  block[0x28 + i] |
146  block[0x30 + i] |
147  block[0x38 + i]) == 0) {
148  block[0x08 + i] = block[i];
149  block[0x10 + i] = block[i];
150  block[0x18 + i] = block[i];
151  block[0x20 + i] = block[i];
152  block[0x28 + i] = block[i];
153  block[0x30 + i] = block[i];
154  block[0x38 + i] = block[i];
155  } else {
156  idct_1d(block + i, 8);
157  }
158  }
159 
160  for (int i = 0; i < 8; i++) {
161  idct_1d(block, 1);
162  for (int j = 0; j < 8; j++)
163  dst[j] = av_clip_uint8((block[j] >> 5) + 128);
164  block += 8;
165  dst += stride;
166  }
167 }
168 
169 static void idct_add(uint8_t *dst, int stride,
170  const uint8_t *src, int in_linesize, int *block)
171 {
172  for (int i = 0; i < 8; i++) {
173  if ((block[0x08 + i] |
174  block[0x10 + i] |
175  block[0x18 + i] |
176  block[0x20 + i] |
177  block[0x28 + i] |
178  block[0x30 + i] |
179  block[0x38 + i]) == 0) {
180  block[0x08 + i] = block[i];
181  block[0x10 + i] = block[i];
182  block[0x18 + i] = block[i];
183  block[0x20 + i] = block[i];
184  block[0x28 + i] = block[i];
185  block[0x30 + i] = block[i];
186  block[0x38 + i] = block[i];
187  } else {
188  idct_1d(block + i, 8);
189  }
190  }
191 
192  for (int i = 0; i < 8; i++) {
193  idct_1d(block, 1);
194  for (int j = 0; j < 8; j++)
195  dst[j] = av_clip_uint8((block[j] >> 5) + src[j]);
196  block += 8;
197  dst += stride;
198  src += in_linesize;
199  }
200 }
201 
202 static inline void idct2_1d(int *blk, int step)
203 {
204  const unsigned int t0 = blk[0 * step];
205  const unsigned int t1 = blk[1 * step];
206  const unsigned int t2 = (int)(t1 * 473U) >> 8;
207  const unsigned int t3 = t2 - t1;
208  const unsigned int t4 = ((int)(t1 * 362U) >> 8) - t3;
209  const unsigned int t5 = (((int)(t1 * 277U) >> 8) - t2) + t4;
210 
211  blk[0 * step] = t1 + t0;
212  blk[1 * step] = t0 + t3;
213  blk[2 * step] = t4 + t0;
214  blk[3 * step] = t0 - t5;
215  blk[4 * step] = t5 + t0;
216  blk[5 * step] = t0 - t4;
217  blk[6 * step] = t0 - t3;
218  blk[7 * step] = t0 - t1;
219 }
220 
221 static void idct2_put(uint8_t *dst, int stride, int *block)
222 {
223  for (int i = 0; i < 2; i++) {
224  if ((block[0x08 + i]) == 0) {
225  block[0x08 + i] = block[i];
226  block[0x10 + i] = block[i];
227  block[0x18 + i] = block[i];
228  block[0x20 + i] = block[i];
229  block[0x28 + i] = block[i];
230  block[0x30 + i] = block[i];
231  block[0x38 + i] = block[i];
232  } else {
233  idct2_1d(block + i, 8);
234  }
235  }
236 
237  for (int i = 0; i < 8; i++) {
238  if (block[1] == 0) {
239  for (int j = 0; j < 8; j++)
240  dst[j] = av_clip_uint8((block[0] >> 5) + 128);
241  } else {
242  idct2_1d(block, 1);
243  for (int j = 0; j < 8; j++)
244  dst[j] = av_clip_uint8((block[j] >> 5) + 128);
245  }
246  block += 8;
247  dst += stride;
248  }
249 }
250 
251 static void idct2_add(uint8_t *dst, int stride,
252  const uint8_t *src, int in_linesize,
253  int *block)
254 {
255  for (int i = 0; i < 2; i++) {
256  if ((block[0x08 + i]) == 0) {
257  block[0x08 + i] = block[i];
258  block[0x10 + i] = block[i];
259  block[0x18 + i] = block[i];
260  block[0x20 + i] = block[i];
261  block[0x28 + i] = block[i];
262  block[0x30 + i] = block[i];
263  block[0x38 + i] = block[i];
264  } else {
265  idct2_1d(block + i, 8);
266  }
267  }
268 
269  for (int i = 0; i < 8; i++) {
270  if (block[1] == 0) {
271  for (int j = 0; j < 8; j++)
272  dst[j] = av_clip_uint8((block[0] >> 5) + src[j]);
273  } else {
274  idct2_1d(block, 1);
275  for (int j = 0; j < 8; j++)
276  dst[j] = av_clip_uint8((block[j] >> 5) + src[j]);
277  }
278  block += 8;
279  dst += stride;
280  src += in_linesize;
281  }
282 }
283 
284 static void update_inter_block(uint8_t *dst, int stride,
285  const uint8_t *src, int in_linesize,
286  int block)
287 {
288  for (int i = 0; i < 8; i++) {
289  for (int j = 0; j < 8; j++)
290  dst[j] = av_clip_uint8(block + src[j]);
291  dst += stride;
292  src += in_linesize;
293  }
294 }
295 
296 static int decode_intra_block(AVCodecContext *avctx, int mode,
297  GetByteContext *gbyte, int16_t *qtab,
298  int *block, int *pfill,
299  uint8_t *dst, int linesize)
300 {
301  MV30Context *s = avctx->priv_data;
302  int fill;
303 
304  switch (mode) {
305  case 0:
306  s->bdsp.fill_block_tab[1](dst, 128, linesize, 8);
307  break;
308  case 1:
309  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
310  pfill[0] += fill;
311  block[0] = ((int)((unsigned)pfill[0] * qtab[0]) >> 5) + 128;
312  s->bdsp.fill_block_tab[1](dst, block[0], linesize, 8);
313  break;
314  case 2:
315  memset(block, 0, sizeof(*block) * 64);
316  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
317  pfill[0] += fill;
318  block[0] = (unsigned)pfill[0] * qtab[0];
319  block[1] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[1];
320  block[8] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[8];
321  block[9] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[9];
322  idct2_put(dst, linesize, block);
323  break;
324  case 3:
325  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
326  pfill[0] += fill;
327  block[0] = (unsigned)pfill[0] * qtab[0];
328  for (int i = 1; i < 64; i++)
329  block[zigzag[i]] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[zigzag[i]];
330  idct_put(dst, linesize, block);
331  break;
332  }
333 
334  return 0;
335 }
336 
337 static int decode_inter_block(AVCodecContext *avctx, int mode,
338  GetByteContext *gbyte, int16_t *qtab,
339  int *block, int *pfill,
340  uint8_t *dst, int linesize,
341  const uint8_t *src, int in_linesize)
342 {
343  int fill;
344 
345  switch (mode) {
346  case 0:
347  copy_block8(dst, src, linesize, in_linesize, 8);
348  break;
349  case 1:
350  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
351  pfill[0] += fill;
352  block[0] = (int)((unsigned)pfill[0] * qtab[0]) >> 5;
353  update_inter_block(dst, linesize, src, in_linesize, block[0]);
354  break;
355  case 2:
356  memset(block, 0, sizeof(*block) * 64);
357  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
358  pfill[0] += fill;
359  block[0] = (unsigned)pfill[0] * qtab[0];
360  block[1] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[1];
361  block[8] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[8];
362  block[9] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[9];
363  idct2_add(dst, linesize, src, in_linesize, block);
364  break;
365  case 3:
366  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
367  pfill[0] += fill;
368  block[0] = (unsigned)pfill[0] * qtab[0];
369  for (int i = 1; i < 64; i++)
370  block[zigzag[i]] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[zigzag[i]];
371  idct_add(dst, linesize, src, in_linesize, block);
372  break;
373  }
374 
375  return 0;
376 }
377 
378 static int decode_coeffs(GetBitContext *gb, int16_t *coeffs, int nb_codes)
379 {
380  memset(coeffs, 0, nb_codes * sizeof(*coeffs));
381 
382  for (int i = 0; i < nb_codes;) {
383  int value = get_vlc2(gb, cbp_tab, CBP_VLC_BITS, 1);
384 
385  if (value > 0) {
386  int x = get_bits(gb, value);
387 
388  if (x < (1 << value) / 2) {
389  x = (1 << (value - 1)) + (x & ((1 << value) - 1 >> 1));
390  } else {
391  x = -(1 << (value - 1)) - (x & ((1 << value) - 1 >> 1));
392  }
393  coeffs[i++] = x;
394  } else {
395  int flag = get_bits1(gb);
396 
397  i += get_bits(gb, 3 + flag * 3) + 1 + flag * 8;
398  }
399  }
400 
401  return 0;
402 }
403 
405 {
406  MV30Context *s = avctx->priv_data;
407  GetBitContext mgb;
408  uint8_t *dst[6];
409  int linesize[6];
410  int ret;
411 
412  mgb = *gb;
413  if (get_bits_left(gb) < s->mode_size * 8)
414  return AVERROR_INVALIDDATA;
415 
416  skip_bits_long(gb, s->mode_size * 8);
417 
418  linesize[0] = frame->linesize[0];
419  linesize[1] = frame->linesize[0];
420  linesize[2] = frame->linesize[0];
421  linesize[3] = frame->linesize[0];
422  linesize[4] = frame->linesize[1];
423  linesize[5] = frame->linesize[2];
424 
425  for (int y = 0; y < avctx->height; y += 16) {
426  GetByteContext gbyte;
427  int pfill[3][1] = { {0} };
428  int nb_codes = get_bits(gb, 16);
429 
430  av_fast_padded_malloc(&s->coeffs, &s->coeffs_size, nb_codes * sizeof(*s->coeffs));
431  if (!s->coeffs)
432  return AVERROR(ENOMEM);
433  ret = decode_coeffs(gb, s->coeffs, nb_codes);
434  if (ret < 0)
435  return ret;
436 
437  bytestream2_init(&gbyte, (uint8_t *)s->coeffs, nb_codes * sizeof(*s->coeffs));
438 
439  for (int x = 0; x < avctx->width; x += 16) {
440  dst[0] = frame->data[0] + linesize[0] * y + x;
441  dst[1] = frame->data[0] + linesize[0] * y + x + 8;
442  dst[2] = frame->data[0] + linesize[0] * (y + 8) + x;
443  dst[3] = frame->data[0] + linesize[0] * (y + 8) + x + 8;
444  dst[4] = frame->data[1] + linesize[4] * (y >> 1) + (x >> 1);
445  dst[5] = frame->data[2] + linesize[5] * (y >> 1) + (x >> 1);
446 
447  for (int b = 0; b < 6; b++) {
448  int mode = get_bits_le(&mgb, 2);
449 
450  ret = decode_intra_block(avctx, mode, &gbyte, s->intraq_tab[b >= 4],
451  s->block[b],
452  pfill[(b >= 4) + (b >= 5)],
453  dst[b], linesize[b]);
454  if (ret < 0)
455  return ret;
456  }
457  }
458  }
459 
460  return 0;
461 }
462 
464  AVFrame *frame, AVFrame *prev)
465 {
466  MV30Context *s = avctx->priv_data;
468  GetBitContext mgb;
470  const int mask_size = ((avctx->height >> 4) * (avctx->width >> 4) * 2 + 7) / 8;
471  uint8_t *dst[6], *src[6];
472  int in_linesize[6];
473  int linesize[6];
474  int ret, cnt = 0;
475  int flags = 0;
476 
477  in_linesize[0] = prev->linesize[0];
478  in_linesize[1] = prev->linesize[0];
479  in_linesize[2] = prev->linesize[0];
480  in_linesize[3] = prev->linesize[0];
481  in_linesize[4] = prev->linesize[1];
482  in_linesize[5] = prev->linesize[2];
483 
484  linesize[0] = frame->linesize[0];
485  linesize[1] = frame->linesize[0];
486  linesize[2] = frame->linesize[0];
487  linesize[3] = frame->linesize[0];
488  linesize[4] = frame->linesize[1];
489  linesize[5] = frame->linesize[2];
490 
491  av_fast_padded_malloc(&s->mvectors, &s->mvectors_size, 2 * s->nb_mvectors * sizeof(*s->mvectors));
492  if (!s->mvectors) {
493  ret = AVERROR(ENOMEM);
494  goto fail;
495  }
496 
497  mask = *gb;
498  skip_bits_long(gb, mask_size * 8);
499  mgb = *gb;
500  skip_bits_long(gb, s->mode_size * 8);
501 
502  ret = decode_coeffs(gb, s->mvectors, 2 * s->nb_mvectors);
503  if (ret < 0)
504  goto fail;
505 
506  bytestream2_init(&mv, (uint8_t *)s->mvectors, 2 * s->nb_mvectors * sizeof(*s->mvectors));
507 
508  for (int y = 0; y < avctx->height; y += 16) {
509  GetByteContext gbyte;
510  int pfill[3][1] = { {0} };
511  int nb_codes = get_bits(gb, 16);
512 
513  skip_bits(gb, 8);
514  if (get_bits_left(gb) < 0) {
516  goto fail;
517  }
518 
519  av_fast_padded_malloc(&s->coeffs, &s->coeffs_size, nb_codes * sizeof(*s->coeffs));
520  if (!s->coeffs) {
521  ret = AVERROR(ENOMEM);
522  goto fail;
523  }
524 
525  ret = decode_coeffs(gb, s->coeffs, nb_codes);
526  if (ret < 0)
527  goto fail;
528 
529  bytestream2_init(&gbyte, (uint8_t *)s->coeffs, nb_codes * sizeof(*s->coeffs));
530 
531  for (int x = 0; x < avctx->width; x += 16) {
532  if (cnt >= 4)
533  cnt = 0;
534  if (cnt == 0) {
535  if (get_bits_left(&mask) < 8) {
537  goto fail;
538  }
539  flags = get_bits(&mask, 8);
540  }
541 
542  dst[0] = frame->data[0] + linesize[0] * y + x;
543  dst[1] = frame->data[0] + linesize[0] * y + x + 8;
544  dst[2] = frame->data[0] + linesize[0] * (y + 8) + x;
545  dst[3] = frame->data[0] + linesize[0] * (y + 8) + x + 8;
546  dst[4] = frame->data[1] + linesize[4] * (y >> 1) + (x >> 1);
547  dst[5] = frame->data[2] + linesize[5] * (y >> 1) + (x >> 1);
548 
549  if ((flags >> (cnt)) & 1) {
550  int mv_x = sign_extend(bytestream2_get_ne16(&mv), 16);
551  int mv_y = sign_extend(bytestream2_get_ne16(&mv), 16);
552 
553  int px = x + mv_x;
554  int py = y + mv_y;
555 
556  if (px < 0 || px > FFALIGN(avctx->width , 16) - 16 ||
557  py < 0 || py > FFALIGN(avctx->height, 16) - 16)
558  return AVERROR_INVALIDDATA;
559 
560  src[0] = prev->data[0] + in_linesize[0] * py + px;
561  src[1] = prev->data[0] + in_linesize[0] * py + px + 8;
562  src[2] = prev->data[0] + in_linesize[0] * (py + 8) + px;
563  src[3] = prev->data[0] + in_linesize[0] * (py + 8) + px + 8;
564  src[4] = prev->data[1] + in_linesize[4] * (py >> 1) + (px >> 1);
565  src[5] = prev->data[2] + in_linesize[5] * (py >> 1) + (px >> 1);
566 
567  if ((flags >> (cnt + 4)) & 1) {
568  for (int b = 0; b < 6; b++)
569  copy_block8(dst[b], src[b], linesize[b], in_linesize[b], 8);
570  } else {
571  for (int b = 0; b < 6; b++) {
572  int mode = get_bits_le(&mgb, 2);
573 
574  ret = decode_inter_block(avctx, mode, &gbyte, s->interq_tab[b >= 4],
575  s->block[b],
576  pfill[(b >= 4) + (b >= 5)],
577  dst[b], linesize[b],
578  src[b], in_linesize[b]);
579  if (ret < 0)
580  goto fail;
581  }
582  }
583  } else {
584  for (int b = 0; b < 6; b++) {
585  int mode = get_bits_le(&mgb, 2);
586 
587  ret = decode_intra_block(avctx, mode, &gbyte, s->intraq_tab[b >= 4],
588  s->block[b],
589  pfill[(b >= 4) + (b >= 5)],
590  dst[b], linesize[b]);
591  if (ret < 0)
592  goto fail;
593  }
594  }
595 
596  cnt++;
597  }
598  }
599 
600 fail:
601  return ret;
602 }
603 
605  int *got_frame, AVPacket *avpkt)
606 {
607  MV30Context *s = avctx->priv_data;
608  GetBitContext *gb = &s->gb;
609  int ret;
610 
611  if ((ret = init_get_bits8(gb, avpkt->data, avpkt->size)) < 0)
612  return ret;
613 
614  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
615  return ret;
616 
617  s->intra_quant = get_bits(gb, 8);
618  s->inter_quant = s->intra_quant + get_sbits(gb, 8);
619  s->is_inter = get_bits_le(gb, 16);
620  s->mode_size = get_bits_le(gb, 16);
621  if (s->is_inter)
622  s->nb_mvectors = get_bits_le(gb, 16);
623 
624  get_qtable(s->intraq_tab[0], s->intra_quant, luma_tab);
625  get_qtable(s->intraq_tab[1], s->intra_quant, chroma_tab);
626 
627  if (s->is_inter == 0) {
628  frame->flags |= AV_FRAME_FLAG_KEY;
629  ret = decode_intra(avctx, gb, frame);
630  if (ret < 0)
631  return ret;
632  } else {
633  get_qtable(s->interq_tab[0], s->inter_quant, luma_tab);
634  get_qtable(s->interq_tab[1], s->inter_quant, chroma_tab);
635 
636  if (!s->prev_frame->data[0]) {
637  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
638  return AVERROR_INVALIDDATA;
639  }
640 
641  frame->flags &= ~AV_FRAME_FLAG_KEY;
642  ret = decode_inter(avctx, gb, frame, s->prev_frame);
643  if (ret < 0)
644  return ret;
645  }
646 
647  if ((ret = av_frame_replace(s->prev_frame, frame)) < 0)
648  return ret;
649 
650  *got_frame = 1;
651 
652  return avpkt->size;
653 }
654 
655 static const uint8_t cbp_bits[] = {
656  2, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9, 9,
657 };
658 
659 static av_cold void init_static_data(void)
660 {
663  cbp_bits, 1, NULL, 0, 0, 0, 0);
664 }
665 
667 {
668  MV30Context *s = avctx->priv_data;
669  static AVOnce init_static_once = AV_ONCE_INIT;
670 
671  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
672  avctx->color_range = AVCOL_RANGE_JPEG;
673 
674  ff_blockdsp_init(&s->bdsp);
675 
676  s->prev_frame = av_frame_alloc();
677  if (!s->prev_frame)
678  return AVERROR(ENOMEM);
679 
680  ff_thread_once(&init_static_once, init_static_data);
681 
682  return 0;
683 }
684 
685 static void decode_flush(AVCodecContext *avctx)
686 {
687  MV30Context *s = avctx->priv_data;
688 
689  av_frame_unref(s->prev_frame);
690 }
691 
693 {
694  MV30Context *s = avctx->priv_data;
695 
696  av_frame_free(&s->prev_frame);
697  av_freep(&s->coeffs);
698  s->coeffs_size = 0;
699  av_freep(&s->mvectors);
700  s->mvectors_size = 0;
701 
702  return 0;
703 }
704 
706  .p.name = "mv30",
707  CODEC_LONG_NAME("MidiVid 3.0"),
708  .p.type = AVMEDIA_TYPE_VIDEO,
709  .p.id = AV_CODEC_ID_MV30,
710  .priv_data_size = sizeof(MV30Context),
711  .init = decode_init,
712  .close = decode_close,
714  .flush = decode_flush,
715  .p.capabilities = AV_CODEC_CAP_DR1,
716  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
717 };
MV30Context::mvectors_size
unsigned int mvectors_size
Definition: mv30.c:52
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:278
idct2_add
static void idct2_add(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int *block)
Definition: mv30.c:251
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
thread.h
ff_mv30_decoder
const FFCodec ff_mv30_decoder
Definition: mv30.c:705
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
mask
int mask
Definition: mediacodecdec_common.c:154
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
mode
Definition: swscale.c:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:403
CBP_VLC_BITS
#define CBP_VLC_BITS
Definition: mv30.c:39
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:717
AVPacket::data
uint8_t * data
Definition: packet.h:539
luma_tab
static const uint8_t luma_tab[]
Definition: mv30.c:65
b
#define b
Definition: input.c:41
table
static const uint16_t table[]
Definition: prosumer.c:203
FFCodec
Definition: codec_internal.h:127
copy_block8
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:47
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
BlockDSPContext
Definition: blockdsp.h:32
MV30Context::inter_quant
int inter_quant
Definition: mv30.c:45
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:424
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
decode_intra
static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
Definition: mv30.c:404
init_static_data
static av_cold void init_static_data(void)
Definition: mv30.c:659
fail
#define fail()
Definition: checkasm.h:193
MV30Context::bdsp
BlockDSPContext bdsp
Definition: mv30.c:59
AV_CODEC_ID_MV30
@ AV_CODEC_ID_MV30
Definition: codec_id.h:305
GetBitContext
Definition: get_bits.h:108
chroma_tab
static const uint8_t chroma_tab[]
Definition: mv30.c:76
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:151
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mv30.c:604
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:654
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:356
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
decode.h
get_bits.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
blk
#define blk(i)
Definition: sha.c:186
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mv30.c:685
MV30Context::prev_frame
AVFrame * prev_frame
Definition: mv30.c:60
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
cbp_bits
static const uint8_t cbp_bits[]
Definition: mv30.c:655
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
MV30Context
Definition: mv30.c:41
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:709
decode_intra_block
static int decode_intra_block(AVCodecContext *avctx, int mode, GetByteContext *gbyte, int16_t *qtab, int *block, int *pfill, uint8_t *dst, int linesize)
Definition: mv30.c:296
aandcttab.h
MV30Context::mode_size
int mode_size
Definition: mv30.c:47
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
mathops.h
idct2_put
static void idct2_put(uint8_t *dst, int stride, int *block)
Definition: mv30.c:221
zigzag
static const uint8_t zigzag[]
Definition: mv30.c:87
MV30Context::is_inter
int is_inter
Definition: mv30.c:46
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AVOnce
#define AVOnce
Definition: thread.h:202
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: mv30.c:692
MV30Context::nb_mvectors
int nb_mvectors
Definition: mv30.c:48
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
idct_put
static void idct_put(uint8_t *dst, int stride, int *block)
Definition: mv30.c:138
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
bytestream2_get_ne16
#define bytestream2_get_ne16
Definition: bytestream.h:119
VLCElem
Definition: vlc.h:32
MV30Context::gb
GetBitContext gb
Definition: mv30.c:42
flag
#define flag(name)
Definition: cbs_av1.c:474
MV30Context::interq_tab
int16_t interq_tab[2][64]
Definition: mv30.c:57
idct_1d
static void idct_1d(unsigned *blk, int step)
Definition: mv30.c:108
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
copy_block.h
MV30Context::coeffs
int16_t * coeffs
Definition: mv30.c:53
MV30Context::intra_quant
int intra_quant
Definition: mv30.c:44
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:623
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
MV30Context::coeffs_size
unsigned int coeffs_size
Definition: mv30.c:54
AVCodecContext::height
int height
Definition: avcodec.h:632
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:671
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
get_qtable
static void get_qtable(int16_t *table, int quant, const uint8_t *quant_tab)
Definition: mv30.c:98
MV30Context::block
int block[6][64]
Definition: mv30.c:50
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
MV30Context::intraq_tab
int16_t intraq_tab[2][64]
Definition: mv30.c:56
U
#define U(x)
Definition: vpx_arith.h:37
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:500
AVCodecContext
main external API structure.
Definition: avcodec.h:451
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
decode_coeffs
static int decode_coeffs(GetBitContext *gb, int16_t *coeffs, int nb_codes)
Definition: mv30.c:378
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
factor
static const int factor[16]
Definition: vf_pp7.c:80
MV30Context::mvectors
int16_t * mvectors
Definition: mv30.c:51
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
decode_inter
static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame, AVFrame *prev)
Definition: mv30.c:463
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: mv30.c:666
VLC_INIT_STATIC_TABLE_FROM_LENGTHS
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
Definition: vlc.h:280
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:632
bytestream.h
idct_add
static void idct_add(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int *block)
Definition: mv30.c:169
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:448
cbp_tab
static VLCElem cbp_tab[1<< CBP_VLC_BITS]
Definition: mv30.c:63
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
idct2_1d
static void idct2_1d(int *blk, int step)
Definition: mv30.c:202
src
#define src
Definition: vp8dsp.c:248
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
update_inter_block
static void update_inter_block(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int block)
Definition: mv30.c:284
decode_inter_block
static int decode_inter_block(AVCodecContext *avctx, int mode, GetByteContext *gbyte, int16_t *qtab, int *block, int *pfill, uint8_t *dst, int linesize, const uint8_t *src, int in_linesize)
Definition: mv30.c:337