FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/emms.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mem_internal.h"
38 #include "libavutil/reverse.h"
39 #include "libavutil/stereo3d.h"
40 #include "libavutil/timecode.h"
41 
42 #include "avcodec.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "error_resilience.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "mpeg_er.h"
51 #include "mpeg12.h"
52 #include "mpeg12codecs.h"
53 #include "mpeg12data.h"
54 #include "mpeg12dec.h"
55 #include "mpegutils.h"
56 #include "mpegvideo.h"
57 #include "mpegvideodata.h"
58 #include "mpegvideodec.h"
59 #include "profiles.h"
60 #include "startcode.h"
61 #include "thread.h"
62 
63 #define A53_MAX_CC_COUNT 2000
64 
70 };
71 
72 typedef struct Mpeg1Context {
74  int repeat_field; /* true if we must repeat the field */
75  AVPanScan pan_scan; /* some temporary storage for the panscan */
80  uint8_t afd;
81  int has_afd;
86  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
87  unsigned frame_rate_index;
88  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
90  int tmpgexs;
93  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
94 } Mpeg1Context;
95 
96 #define MB_TYPE_ZERO_MV 0x20000000
97 
98 static const uint32_t ptype2mb_type[7] = {
101  MB_TYPE_L0,
106 };
107 
108 static const uint32_t btype2mb_type[11] = {
110  MB_TYPE_L1,
112  MB_TYPE_L0,
114  MB_TYPE_L0L1,
120 };
121 
122 /* as H.263, but only 17 codes */
123 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
124 {
125  int code, sign, val, shift;
126 
127  code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
128  if (code == 0)
129  return pred;
130  if (code < 0)
131  return 0xffff;
132 
133  sign = get_bits1(&s->gb);
134  shift = fcode - 1;
135  val = code;
136  if (shift) {
137  val = (val - 1) << shift;
138  val |= get_bits(&s->gb, shift);
139  val++;
140  }
141  if (sign)
142  val = -val;
143  val += pred;
144 
145  /* modulo decoding */
146  return sign_extend(val, 5 + shift);
147 }
148 
149 #define MAX_INDEX (64 - 1)
150 #define check_scantable_index(ctx, x) \
151  do { \
152  if ((x) > MAX_INDEX) { \
153  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
154  ctx->mb_x, ctx->mb_y); \
155  return AVERROR_INVALIDDATA; \
156  } \
157  } while (0)
158 
160  int16_t *block, int n)
161 {
162  int level, i, j, run;
163  const uint8_t *const scantable = s->intra_scantable.permutated;
164  const uint16_t *quant_matrix = s->inter_matrix;
165  const int qscale = s->qscale;
166 
167  {
168  OPEN_READER(re, &s->gb);
169  i = -1;
170  // special case for first coefficient, no need to add second VLC table
171  UPDATE_CACHE(re, &s->gb);
172  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
173  level = (3 * qscale * quant_matrix[0]) >> 5;
174  level = (level - 1) | 1;
175  if (GET_CACHE(re, &s->gb) & 0x40000000)
176  level = -level;
177  block[0] = level;
178  i++;
179  SKIP_BITS(re, &s->gb, 2);
180  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
181  goto end;
182  }
183  /* now quantify & encode AC coefficients */
184  for (;;) {
185  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
186  TEX_VLC_BITS, 2, 0);
187 
188  if (level != 0) {
189  i += run;
190  if (i > MAX_INDEX)
191  break;
192  j = scantable[i];
193  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
194  level = (level - 1) | 1;
195  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
196  SHOW_SBITS(re, &s->gb, 1);
197  SKIP_BITS(re, &s->gb, 1);
198  } else {
199  /* escape */
200  run = SHOW_UBITS(re, &s->gb, 6) + 1;
201  LAST_SKIP_BITS(re, &s->gb, 6);
202  UPDATE_CACHE(re, &s->gb);
203  level = SHOW_SBITS(re, &s->gb, 8);
204  SKIP_BITS(re, &s->gb, 8);
205  if (level == -128) {
206  level = SHOW_UBITS(re, &s->gb, 8) - 256;
207  SKIP_BITS(re, &s->gb, 8);
208  } else if (level == 0) {
209  level = SHOW_UBITS(re, &s->gb, 8);
210  SKIP_BITS(re, &s->gb, 8);
211  }
212  i += run;
213  if (i > MAX_INDEX)
214  break;
215  j = scantable[i];
216  if (level < 0) {
217  level = -level;
218  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
219  level = (level - 1) | 1;
220  level = -level;
221  } else {
222  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
223  level = (level - 1) | 1;
224  }
225  }
226 
227  block[j] = level;
228  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
229  break;
230  UPDATE_CACHE(re, &s->gb);
231  }
232 end:
233  LAST_SKIP_BITS(re, &s->gb, 2);
234  CLOSE_READER(re, &s->gb);
235  }
236 
238 
239  s->block_last_index[n] = i;
240  return 0;
241 }
242 
244  int16_t *block, int n)
245 {
246  int level, i, j, run;
247  const uint8_t *const scantable = s->intra_scantable.permutated;
248  const uint16_t *quant_matrix;
249  const int qscale = s->qscale;
250  int mismatch;
251 
252  mismatch = 1;
253 
254  {
255  OPEN_READER(re, &s->gb);
256  i = -1;
257  if (n < 4)
258  quant_matrix = s->inter_matrix;
259  else
260  quant_matrix = s->chroma_inter_matrix;
261 
262  // Special case for first coefficient, no need to add second VLC table.
263  UPDATE_CACHE(re, &s->gb);
264  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
265  level = (3 * qscale * quant_matrix[0]) >> 5;
266  if (GET_CACHE(re, &s->gb) & 0x40000000)
267  level = -level;
268  block[0] = level;
269  mismatch ^= level;
270  i++;
271  SKIP_BITS(re, &s->gb, 2);
272  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
273  goto end;
274  }
275 
276  /* now quantify & encode AC coefficients */
277  for (;;) {
278  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
279  TEX_VLC_BITS, 2, 0);
280 
281  if (level != 0) {
282  i += run;
283  if (i > MAX_INDEX)
284  break;
285  j = scantable[i];
286  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
287  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
288  SHOW_SBITS(re, &s->gb, 1);
289  SKIP_BITS(re, &s->gb, 1);
290  } else {
291  /* escape */
292  run = SHOW_UBITS(re, &s->gb, 6) + 1;
293  LAST_SKIP_BITS(re, &s->gb, 6);
294  UPDATE_CACHE(re, &s->gb);
295  level = SHOW_SBITS(re, &s->gb, 12);
296  SKIP_BITS(re, &s->gb, 12);
297 
298  i += run;
299  if (i > MAX_INDEX)
300  break;
301  j = scantable[i];
302  if (level < 0) {
303  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
304  level = -level;
305  } else {
306  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
307  }
308  }
309 
310  mismatch ^= level;
311  block[j] = level;
312  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
313  break;
314  UPDATE_CACHE(re, &s->gb);
315  }
316 end:
317  LAST_SKIP_BITS(re, &s->gb, 2);
318  CLOSE_READER(re, &s->gb);
319  }
320  block[63] ^= (mismatch & 1);
321 
323 
324  s->block_last_index[n] = i;
325  return 0;
326 }
327 
329  int16_t *block, int n)
330 {
331  int level, dc, diff, i, j, run;
332  int component;
333  const RL_VLC_ELEM *rl_vlc;
334  const uint8_t *const scantable = s->intra_scantable.permutated;
335  const uint16_t *quant_matrix;
336  const int qscale = s->qscale;
337  int mismatch;
338 
339  /* DC coefficient */
340  if (n < 4) {
341  quant_matrix = s->intra_matrix;
342  component = 0;
343  } else {
344  quant_matrix = s->chroma_intra_matrix;
345  component = (n & 1) + 1;
346  }
347  diff = decode_dc(&s->gb, component);
348  dc = s->last_dc[component];
349  dc += diff;
350  s->last_dc[component] = dc;
351  block[0] = dc * (1 << (3 - s->intra_dc_precision));
352  ff_tlog(s->avctx, "dc=%d\n", block[0]);
353  mismatch = block[0] ^ 1;
354  i = 0;
355  if (s->intra_vlc_format)
357  else
359 
360  {
361  OPEN_READER(re, &s->gb);
362  /* now quantify & encode AC coefficients */
363  for (;;) {
364  UPDATE_CACHE(re, &s->gb);
365  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
366  TEX_VLC_BITS, 2, 0);
367 
368  if (level == 127) {
369  break;
370  } else if (level != 0) {
371  i += run;
372  if (i > MAX_INDEX)
373  break;
374  j = scantable[i];
375  level = (level * qscale * quant_matrix[j]) >> 4;
376  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
377  SHOW_SBITS(re, &s->gb, 1);
378  LAST_SKIP_BITS(re, &s->gb, 1);
379  } else {
380  /* escape */
381  run = SHOW_UBITS(re, &s->gb, 6) + 1;
382  SKIP_BITS(re, &s->gb, 6);
383  level = SHOW_SBITS(re, &s->gb, 12);
384  LAST_SKIP_BITS(re, &s->gb, 12);
385  i += run;
386  if (i > MAX_INDEX)
387  break;
388  j = scantable[i];
389  if (level < 0) {
390  level = (-level * qscale * quant_matrix[j]) >> 4;
391  level = -level;
392  } else {
393  level = (level * qscale * quant_matrix[j]) >> 4;
394  }
395  }
396 
397  mismatch ^= level;
398  block[j] = level;
399  }
400  CLOSE_READER(re, &s->gb);
401  }
402  block[63] ^= mismatch & 1;
403 
405 
406  s->block_last_index[n] = i;
407  return 0;
408 }
409 
410 /******************************************/
411 /* decoding */
412 
413 static inline int get_dmv(MpegEncContext *s)
414 {
415  if (get_bits1(&s->gb))
416  return 1 - (get_bits1(&s->gb) << 1);
417  else
418  return 0;
419 }
420 
421 /* motion type (for MPEG-2) */
422 #define MT_FIELD 1
423 #define MT_FRAME 2
424 #define MT_16X8 2
425 #define MT_DMV 3
426 
427 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
428 {
429  int i, j, k, cbp, val, mb_type, motion_type;
430  const int mb_block_count = 4 + (1 << s->chroma_format);
431  int ret;
432 
433  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
434 
435  av_assert2(s->mb_skipped == 0);
436 
437  if (s->mb_skip_run-- != 0) {
438  if (s->pict_type == AV_PICTURE_TYPE_P) {
439  s->mb_skipped = 1;
440  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
442  } else {
443  int mb_type;
444 
445  if (s->mb_x)
446  mb_type = s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
447  else
448  // FIXME not sure if this is allowed in MPEG at all
449  mb_type = s->cur_pic.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
450  if (IS_INTRA(mb_type)) {
451  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
452  return AVERROR_INVALIDDATA;
453  }
454  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
455  mb_type | MB_TYPE_SKIP;
456 
457  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
458  s->mb_skipped = 1;
459  }
460 
461  return 0;
462  }
463 
464  switch (s->pict_type) {
465  default:
466  case AV_PICTURE_TYPE_I:
467  if (get_bits1(&s->gb) == 0) {
468  if (get_bits1(&s->gb) == 0) {
469  av_log(s->avctx, AV_LOG_ERROR,
470  "Invalid mb type in I-frame at %d %d\n",
471  s->mb_x, s->mb_y);
472  return AVERROR_INVALIDDATA;
473  }
474  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
475  } else {
476  mb_type = MB_TYPE_INTRA;
477  }
478  break;
479  case AV_PICTURE_TYPE_P:
480  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
481  if (mb_type < 0) {
482  av_log(s->avctx, AV_LOG_ERROR,
483  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
484  return AVERROR_INVALIDDATA;
485  }
486  mb_type = ptype2mb_type[mb_type];
487  break;
488  case AV_PICTURE_TYPE_B:
489  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
490  if (mb_type < 0) {
491  av_log(s->avctx, AV_LOG_ERROR,
492  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
493  return AVERROR_INVALIDDATA;
494  }
495  mb_type = btype2mb_type[mb_type];
496  break;
497  }
498  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
499 // motion_type = 0; /* avoid warning */
500  if (IS_INTRA(mb_type)) {
501  s->bdsp.clear_blocks(s->block[0]);
502 
503  if (!s->chroma_y_shift)
504  s->bdsp.clear_blocks(s->block[6]);
505 
506  /* compute DCT type */
507  // FIXME: add an interlaced_dct coded var?
508  if (s->picture_structure == PICT_FRAME &&
509  !s->frame_pred_frame_dct)
510  s->interlaced_dct = get_bits1(&s->gb);
511 
512  if (IS_QUANT(mb_type))
513  s->qscale = mpeg_get_qscale(s);
514 
515  if (s->concealment_motion_vectors) {
516  /* just parse them */
517  if (s->picture_structure != PICT_FRAME)
518  skip_bits1(&s->gb); /* field select */
519 
520  s->mv[0][0][0] =
521  s->last_mv[0][0][0] =
522  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
523  s->last_mv[0][0][0]);
524  s->mv[0][0][1] =
525  s->last_mv[0][0][1] =
526  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
527  s->last_mv[0][0][1]);
528 
529  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
530  } else {
531  /* reset mv prediction */
532  memset(s->last_mv, 0, sizeof(s->last_mv));
533  }
534  s->mb_intra = 1;
535 
536  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
537  for (i = 0; i < mb_block_count; i++)
538  if ((ret = mpeg2_decode_block_intra(s, s->block[i], i)) < 0)
539  return ret;
540  } else {
541  for (i = 0; i < 6; i++) {
543  s->intra_matrix,
544  s->intra_scantable.permutated,
545  s->last_dc, s->block[i],
546  i, s->qscale);
547  if (ret < 0) {
548  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
549  s->mb_x, s->mb_y);
550  return ret;
551  }
552 
553  s->block_last_index[i] = ret;
554  }
555  }
556  } else {
557  if (mb_type & MB_TYPE_ZERO_MV) {
558  av_assert2(mb_type & MB_TYPE_CBP);
559 
560  s->mv_dir = MV_DIR_FORWARD;
561  if (s->picture_structure == PICT_FRAME) {
562  if (s->picture_structure == PICT_FRAME
563  && !s->frame_pred_frame_dct)
564  s->interlaced_dct = get_bits1(&s->gb);
565  s->mv_type = MV_TYPE_16X16;
566  } else {
567  s->mv_type = MV_TYPE_FIELD;
568  mb_type |= MB_TYPE_INTERLACED;
569  s->field_select[0][0] = s->picture_structure - 1;
570  }
571 
572  if (IS_QUANT(mb_type))
573  s->qscale = mpeg_get_qscale(s);
574 
575  s->last_mv[0][0][0] = 0;
576  s->last_mv[0][0][1] = 0;
577  s->last_mv[0][1][0] = 0;
578  s->last_mv[0][1][1] = 0;
579  s->mv[0][0][0] = 0;
580  s->mv[0][0][1] = 0;
581  } else {
582  av_assert2(mb_type & MB_TYPE_L0L1);
583  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
584  /* get additional motion vector type */
585  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
586  motion_type = MT_FRAME;
587  } else {
588  motion_type = get_bits(&s->gb, 2);
589  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
590  s->interlaced_dct = get_bits1(&s->gb);
591  }
592 
593  if (IS_QUANT(mb_type))
594  s->qscale = mpeg_get_qscale(s);
595 
596  /* motion vectors */
597  s->mv_dir = (mb_type >> 13) & 3;
598  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
599  switch (motion_type) {
600  case MT_FRAME: /* or MT_16X8 */
601  if (s->picture_structure == PICT_FRAME) {
602  mb_type |= MB_TYPE_16x16;
603  s->mv_type = MV_TYPE_16X16;
604  for (i = 0; i < 2; i++) {
605  if (USES_LIST(mb_type, i)) {
606  /* MT_FRAME */
607  s->mv[i][0][0] =
608  s->last_mv[i][0][0] =
609  s->last_mv[i][1][0] =
610  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
611  s->last_mv[i][0][0]);
612  s->mv[i][0][1] =
613  s->last_mv[i][0][1] =
614  s->last_mv[i][1][1] =
615  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
616  s->last_mv[i][0][1]);
617  /* full_pel: only for MPEG-1 */
618  if (s->full_pel[i]) {
619  s->mv[i][0][0] *= 2;
620  s->mv[i][0][1] *= 2;
621  }
622  }
623  }
624  } else {
625  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
626  s->mv_type = MV_TYPE_16X8;
627  for (i = 0; i < 2; i++) {
628  if (USES_LIST(mb_type, i)) {
629  /* MT_16X8 */
630  for (j = 0; j < 2; j++) {
631  s->field_select[i][j] = get_bits1(&s->gb);
632  for (k = 0; k < 2; k++) {
633  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
634  s->last_mv[i][j][k]);
635  s->last_mv[i][j][k] = val;
636  s->mv[i][j][k] = val;
637  }
638  }
639  }
640  }
641  }
642  break;
643  case MT_FIELD:
644  s->mv_type = MV_TYPE_FIELD;
645  if (s->picture_structure == PICT_FRAME) {
646  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
647  for (i = 0; i < 2; i++) {
648  if (USES_LIST(mb_type, i)) {
649  for (j = 0; j < 2; j++) {
650  s->field_select[i][j] = get_bits1(&s->gb);
651  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
652  s->last_mv[i][j][0]);
653  s->last_mv[i][j][0] = val;
654  s->mv[i][j][0] = val;
655  ff_tlog(s->avctx, "fmx=%d\n", val);
656  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
657  s->last_mv[i][j][1] >> 1);
658  s->last_mv[i][j][1] = 2 * val;
659  s->mv[i][j][1] = val;
660  ff_tlog(s->avctx, "fmy=%d\n", val);
661  }
662  }
663  }
664  } else {
665  av_assert0(!s->progressive_sequence);
666  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
667  for (i = 0; i < 2; i++) {
668  if (USES_LIST(mb_type, i)) {
669  s->field_select[i][0] = get_bits1(&s->gb);
670  for (k = 0; k < 2; k++) {
671  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
672  s->last_mv[i][0][k]);
673  s->last_mv[i][0][k] = val;
674  s->last_mv[i][1][k] = val;
675  s->mv[i][0][k] = val;
676  }
677  }
678  }
679  }
680  break;
681  case MT_DMV:
682  if (s->progressive_sequence){
683  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
684  return AVERROR_INVALIDDATA;
685  }
686  s->mv_type = MV_TYPE_DMV;
687  for (i = 0; i < 2; i++) {
688  if (USES_LIST(mb_type, i)) {
689  int dmx, dmy, mx, my, m;
690  const int my_shift = s->picture_structure == PICT_FRAME;
691 
692  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
693  s->last_mv[i][0][0]);
694  s->last_mv[i][0][0] = mx;
695  s->last_mv[i][1][0] = mx;
696  dmx = get_dmv(s);
697  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
698  s->last_mv[i][0][1] >> my_shift);
699  dmy = get_dmv(s);
700 
701 
702  s->last_mv[i][0][1] = my * (1 << my_shift);
703  s->last_mv[i][1][1] = my * (1 << my_shift);
704 
705  s->mv[i][0][0] = mx;
706  s->mv[i][0][1] = my;
707  s->mv[i][1][0] = mx; // not used
708  s->mv[i][1][1] = my; // not used
709 
710  if (s->picture_structure == PICT_FRAME) {
711  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
712 
713  // m = 1 + 2 * s->top_field_first;
714  m = s->top_field_first ? 1 : 3;
715 
716  /* top -> top pred */
717  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
718  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
719  m = 4 - m;
720  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
721  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
722  } else {
723  mb_type |= MB_TYPE_16x16;
724 
725  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
726  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
727  if (s->picture_structure == PICT_TOP_FIELD)
728  s->mv[i][2][1]--;
729  else
730  s->mv[i][2][1]++;
731  }
732  }
733  }
734  break;
735  default:
736  av_log(s->avctx, AV_LOG_ERROR,
737  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
738  return AVERROR_INVALIDDATA;
739  }
740  }
741 
742  s->mb_intra = 0;
743  if (HAS_CBP(mb_type)) {
744  s->bdsp.clear_blocks(s->block[0]);
745 
746  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
747  if (mb_block_count > 6) {
748  cbp *= 1 << mb_block_count - 6;
749  cbp |= get_bits(&s->gb, mb_block_count - 6);
750  s->bdsp.clear_blocks(s->block[6]);
751  }
752  if (cbp <= 0) {
753  av_log(s->avctx, AV_LOG_ERROR,
754  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
755  return AVERROR_INVALIDDATA;
756  }
757 
758  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
759  cbp <<= 12 - mb_block_count;
760 
761  for (i = 0; i < mb_block_count; i++) {
762  if (cbp & (1 << 11)) {
763  if ((ret = mpeg2_decode_block_non_intra(s, s->block[i], i)) < 0)
764  return ret;
765  } else {
766  s->block_last_index[i] = -1;
767  }
768  cbp += cbp;
769  }
770  } else {
771  for (i = 0; i < 6; i++) {
772  if (cbp & 32) {
773  if ((ret = mpeg1_decode_block_inter(s, s->block[i], i)) < 0)
774  return ret;
775  } else {
776  s->block_last_index[i] = -1;
777  }
778  cbp += cbp;
779  }
780  }
781  } else {
782  for (i = 0; i < 12; i++)
783  s->block_last_index[i] = -1;
784  }
785  }
786 
787  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
788 
789  return 0;
790 }
791 
793 {
794  Mpeg1Context *s = avctx->priv_data;
795  MpegEncContext *s2 = &s->mpeg_enc_ctx;
796  int ret;
797 
798  s2->out_format = FMT_MPEG1;
799 
800  if ( avctx->codec_tag != AV_RL32("VCR2")
801  && avctx->codec_tag != AV_RL32("BW10"))
802  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
803  ret = ff_mpv_decode_init(s2, avctx);
804  if (ret < 0)
805  return ret;
806 
808 
809  s2->chroma_format = 1;
810  s->repeat_field = 0;
811  avctx->color_range = AVCOL_RANGE_MPEG;
812  return 0;
813 }
814 
815 #if HAVE_THREADS
816 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
817  const AVCodecContext *avctx_from)
818 {
819  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
820  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
821  int err;
822 
823  if (avctx == avctx_from || !s1->context_initialized)
824  return 0;
825 
826  err = ff_mpeg_update_thread_context(avctx, avctx_from);
827  if (err)
828  return err;
829 
830  if (!s->context_initialized)
831  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
832 
833  return 0;
834 }
835 #endif
836 
838 #if CONFIG_MPEG1_NVDEC_HWACCEL
840 #endif
841 #if CONFIG_MPEG1_VDPAU_HWACCEL
843 #endif
846 };
847 
849 #if CONFIG_MPEG2_NVDEC_HWACCEL
851 #endif
852 #if CONFIG_MPEG2_VDPAU_HWACCEL
854 #endif
855 #if CONFIG_MPEG2_DXVA2_HWACCEL
857 #endif
858 #if CONFIG_MPEG2_D3D11VA_HWACCEL
861 #endif
862 #if CONFIG_MPEG2_D3D12VA_HWACCEL
864 #endif
865 #if CONFIG_MPEG2_VAAPI_HWACCEL
867 #endif
868 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
870 #endif
873 };
874 
875 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
878 };
879 
880 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
883 };
884 
886 {
887  Mpeg1Context *s1 = avctx->priv_data;
888  MpegEncContext *s = &s1->mpeg_enc_ctx;
889  const enum AVPixelFormat *pix_fmts;
890 
891  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
892  return AV_PIX_FMT_GRAY8;
893 
894  if (s->chroma_format < 2)
898  else if (s->chroma_format == 2)
900  else
902 
903  return ff_get_format(avctx, pix_fmts);
904 }
905 
906 /* Call this function when we know all parameters.
907  * It may be called in different places for MPEG-1 and MPEG-2. */
909 {
910  Mpeg1Context *s1 = avctx->priv_data;
911  MpegEncContext *s = &s1->mpeg_enc_ctx;
912  int ret;
913 
914  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
915  // MPEG-1 aspect
916  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
917  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
918  } else { // MPEG-2
919  // MPEG-2 aspect
920  if (s1->aspect_ratio_info > 1) {
921  AVRational dar =
923  (AVRational) { s1->pan_scan.width,
924  s1->pan_scan.height }),
925  (AVRational) { s->width, s->height });
926 
927  /* We ignore the spec here and guess a bit as reality does not
928  * match the spec, see for example res_change_ffmpeg_aspect.ts
929  * and sequence-display-aspect.mpg.
930  * issue1613, 621, 562 */
931  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
932  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
933  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
934  s->avctx->sample_aspect_ratio =
936  (AVRational) { s->width, s->height });
937  } else {
938  s->avctx->sample_aspect_ratio =
940  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
941 // issue1613 4/3 16/9 -> 16/9
942 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
943 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
944 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
945  ff_dlog(avctx, "aspect A %d/%d\n",
948  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
949  s->avctx->sample_aspect_ratio.den);
950  }
951  } else {
952  s->avctx->sample_aspect_ratio =
954  }
955  } // MPEG-2
956 
957  if (av_image_check_sar(s->width, s->height,
958  avctx->sample_aspect_ratio) < 0) {
959  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
960  avctx->sample_aspect_ratio.num,
961  avctx->sample_aspect_ratio.den);
962  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
963  }
964 
965  if (!s->context_initialized ||
966  avctx->coded_width != s->width ||
967  avctx->coded_height != s->height ||
968  s1->save_width != s->width ||
969  s1->save_height != s->height ||
970  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
971  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
972  0) {
973  if (s->context_initialized)
975 
976  ret = ff_set_dimensions(avctx, s->width, s->height);
977  if (ret < 0)
978  return ret;
979 
980  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate &&
981  (s->bit_rate != 0x3FFFF*400)) {
982  avctx->rc_max_rate = s->bit_rate;
983  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
984  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
985  avctx->bit_rate = s->bit_rate;
986  }
987  s1->save_aspect = s->avctx->sample_aspect_ratio;
988  s1->save_width = s->width;
989  s1->save_height = s->height;
990  s1->save_progressive_seq = s->progressive_sequence;
991 
992  /* low_delay may be forced, in this case we will have B-frames
993  * that behave like P-frames. */
994  avctx->has_b_frames = !s->low_delay;
995 
996  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
997  // MPEG-1 fps
999 #if FF_API_TICKS_PER_FRAME
1001  avctx->ticks_per_frame = 1;
1003 #endif
1004 
1006  } else { // MPEG-2
1007  // MPEG-2 fps
1008  av_reduce(&s->avctx->framerate.num,
1009  &s->avctx->framerate.den,
1012  1 << 30);
1013 #if FF_API_TICKS_PER_FRAME
1015  avctx->ticks_per_frame = 2;
1017 #endif
1018 
1019  switch (s->chroma_format) {
1020  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
1021  case 2:
1022  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
1023  default: av_assert0(0);
1024  }
1025  } // MPEG-2
1026 
1027  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1028 
1029  if ((ret = ff_mpv_common_init(s)) < 0)
1030  return ret;
1031  }
1032  return 0;
1033 }
1034 
1035 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1036  int buf_size)
1037 {
1038  Mpeg1Context *s1 = avctx->priv_data;
1039  MpegEncContext *s = &s1->mpeg_enc_ctx;
1040  int ref, f_code, vbv_delay, ret;
1041 
1042  ret = init_get_bits8(&s->gb, buf, buf_size);
1043  if (ret < 0)
1044  return ret;
1045 
1046  ref = get_bits(&s->gb, 10); /* temporal ref */
1047  s->pict_type = get_bits(&s->gb, 3);
1048  if (s->pict_type == 0 || s->pict_type > 3)
1049  return AVERROR_INVALIDDATA;
1050 
1051  vbv_delay = get_bits(&s->gb, 16);
1052  s->vbv_delay = vbv_delay;
1053  if (s->pict_type == AV_PICTURE_TYPE_P ||
1054  s->pict_type == AV_PICTURE_TYPE_B) {
1055  s->full_pel[0] = get_bits1(&s->gb);
1056  f_code = get_bits(&s->gb, 3);
1057  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1058  return AVERROR_INVALIDDATA;
1059  f_code += !f_code;
1060  s->mpeg_f_code[0][0] = f_code;
1061  s->mpeg_f_code[0][1] = f_code;
1062  }
1063  if (s->pict_type == AV_PICTURE_TYPE_B) {
1064  s->full_pel[1] = get_bits1(&s->gb);
1065  f_code = get_bits(&s->gb, 3);
1066  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1067  return AVERROR_INVALIDDATA;
1068  f_code += !f_code;
1069  s->mpeg_f_code[1][0] = f_code;
1070  s->mpeg_f_code[1][1] = f_code;
1071  }
1072 
1073  if (avctx->debug & FF_DEBUG_PICT_INFO)
1074  av_log(avctx, AV_LOG_DEBUG,
1075  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1076 
1077  s->y_dc_scale = 8;
1078  s->c_dc_scale = 8;
1079  return 0;
1080 }
1081 
1083 {
1084  MpegEncContext *s = &s1->mpeg_enc_ctx;
1085  int horiz_size_ext, vert_size_ext;
1086  int bit_rate_ext;
1087 
1088  skip_bits(&s->gb, 1); /* profile and level esc*/
1089  s->avctx->profile = get_bits(&s->gb, 3);
1090  s->avctx->level = get_bits(&s->gb, 4);
1091  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1092  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1093 
1094  if (!s->chroma_format) {
1095  s->chroma_format = 1;
1096  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1097  }
1098 
1099  horiz_size_ext = get_bits(&s->gb, 2);
1100  vert_size_ext = get_bits(&s->gb, 2);
1101  s->width |= (horiz_size_ext << 12);
1102  s->height |= (vert_size_ext << 12);
1103  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1104  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1105  check_marker(s->avctx, &s->gb, "after bit rate extension");
1106  s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1107 
1108  s->low_delay = get_bits1(&s->gb);
1109  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1110  s->low_delay = 1;
1111 
1112  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1113  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1114 
1115  ff_dlog(s->avctx, "sequence extension\n");
1116  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1117 
1118  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1119  av_log(s->avctx, AV_LOG_DEBUG,
1120  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1121  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1122  s->avctx->rc_buffer_size, s->bit_rate);
1123 }
1124 
1126 {
1127  MpegEncContext *s = &s1->mpeg_enc_ctx;
1128  int color_description, w, h;
1129 
1130  skip_bits(&s->gb, 3); /* video format */
1131  color_description = get_bits1(&s->gb);
1132  if (color_description) {
1133  s->avctx->color_primaries = get_bits(&s->gb, 8);
1134  s->avctx->color_trc = get_bits(&s->gb, 8);
1135  s->avctx->colorspace = get_bits(&s->gb, 8);
1136  }
1137  w = get_bits(&s->gb, 14);
1138  skip_bits(&s->gb, 1); // marker
1139  h = get_bits(&s->gb, 14);
1140  // remaining 3 bits are zero padding
1141 
1142  s1->pan_scan.width = 16 * w;
1143  s1->pan_scan.height = 16 * h;
1144 
1145  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1146  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1147 }
1148 
1150 {
1151  MpegEncContext *s = &s1->mpeg_enc_ctx;
1152  int i, nofco;
1153 
1154  nofco = 1;
1155  if (s->progressive_sequence) {
1156  if (s->repeat_first_field) {
1157  nofco++;
1158  if (s->top_field_first)
1159  nofco++;
1160  }
1161  } else {
1162  if (s->picture_structure == PICT_FRAME) {
1163  nofco++;
1164  if (s->repeat_first_field)
1165  nofco++;
1166  }
1167  }
1168  for (i = 0; i < nofco; i++) {
1169  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1170  skip_bits(&s->gb, 1); // marker
1171  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1172  skip_bits(&s->gb, 1); // marker
1173  }
1174 
1175  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1176  av_log(s->avctx, AV_LOG_DEBUG,
1177  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1178  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1179  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1180  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1181 }
1182 
1183 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1184  uint16_t matrix1[64], int intra)
1185 {
1186  int i;
1187 
1188  for (i = 0; i < 64; i++) {
1189  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1190  int v = get_bits(&s->gb, 8);
1191  if (v == 0) {
1192  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1193  return AVERROR_INVALIDDATA;
1194  }
1195  if (intra && i == 0 && v != 8) {
1196  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1197  v = 8; // needed by pink.mpg / issue1046
1198  }
1199  matrix0[j] = v;
1200  if (matrix1)
1201  matrix1[j] = v;
1202  }
1203  return 0;
1204 }
1205 
1207 {
1208  ff_dlog(s->avctx, "matrix extension\n");
1209 
1210  if (get_bits1(&s->gb))
1211  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1212  if (get_bits1(&s->gb))
1213  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1214  if (get_bits1(&s->gb))
1215  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1216  if (get_bits1(&s->gb))
1217  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1218 }
1219 
1221 {
1222  MpegEncContext *s = &s1->mpeg_enc_ctx;
1223 
1224  s->full_pel[0] = s->full_pel[1] = 0;
1225  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1226  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1227  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1228  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1229  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1230  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1231  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1232  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1233  if (!s->pict_type && s->context_initialized) {
1234  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1235  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1236  return AVERROR_INVALIDDATA;
1237  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1238  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1239  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1240  s->pict_type = AV_PICTURE_TYPE_I;
1241  else
1242  s->pict_type = AV_PICTURE_TYPE_P;
1243  } else
1244  s->pict_type = AV_PICTURE_TYPE_B;
1245  }
1246 
1247  s->intra_dc_precision = get_bits(&s->gb, 2);
1248  s->picture_structure = get_bits(&s->gb, 2);
1249  s->top_field_first = get_bits1(&s->gb);
1250  s->frame_pred_frame_dct = get_bits1(&s->gb);
1251  s->concealment_motion_vectors = get_bits1(&s->gb);
1252  s->q_scale_type = get_bits1(&s->gb);
1253  s->intra_vlc_format = get_bits1(&s->gb);
1254  s->alternate_scan = get_bits1(&s->gb);
1255  s->repeat_first_field = get_bits1(&s->gb);
1256  s->chroma_420_type = get_bits1(&s->gb);
1257  s->progressive_frame = get_bits1(&s->gb);
1258 
1259  // We only initialize intra_scantable, as both scantables always coincide
1260  // and all code therefore only uses the intra one.
1261  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable,
1262  s->alternate_scan ? ff_alternate_vertical_scan : ff_zigzag_direct);
1263 
1264  /* composite display not parsed */
1265  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1266  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1267  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1268  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1269  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1270  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1271  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1272  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1273  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1274 
1275  return 0;
1276 }
1277 
1278 static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
1279 {
1280  MpegEncContext *s = &s1->mpeg_enc_ctx;
1281  AVCodecContext *avctx = s->avctx;
1282  int second_field = 0;
1283  int ret;
1284 
1285  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1286  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1287  return AVERROR_INVALIDDATA;
1288  }
1289 
1290  /* start frame decoding */
1291  if (s->first_field || s->picture_structure == PICT_FRAME) {
1292  AVFrameSideData *pan_scan;
1293 
1294  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1295  return ret;
1296 
1297  if (s->picture_structure != PICT_FRAME) {
1298  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
1299  (s->picture_structure == PICT_TOP_FIELD);
1300 
1301  for (int i = 0; i < 3; i++) {
1302  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1303  s->cur_pic.data[i] = FF_PTR_ADD(s->cur_pic.data[i],
1304  s->cur_pic.linesize[i]);
1305  }
1306  s->cur_pic.linesize[i] *= 2;
1307  s->last_pic.linesize[i] *= 2;
1308  s->next_pic.linesize[i] *= 2;
1309  }
1310  }
1311 
1313 
1314  /* first check if we must repeat the frame */
1315  s->cur_pic.ptr->f->repeat_pict = 0;
1316  if (s->repeat_first_field) {
1317  if (s->progressive_sequence) {
1318  if (s->top_field_first)
1319  s->cur_pic.ptr->f->repeat_pict = 4;
1320  else
1321  s->cur_pic.ptr->f->repeat_pict = 2;
1322  } else if (s->progressive_frame) {
1323  s->cur_pic.ptr->f->repeat_pict = 1;
1324  }
1325  }
1326 
1327  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1328  AV_FRAME_DATA_PANSCAN, sizeof(s1->pan_scan),
1329  &pan_scan);
1330  if (ret < 0)
1331  return ret;
1332  if (pan_scan)
1333  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1334 
1335  if (s1->a53_buf_ref) {
1337  s->avctx, s->cur_pic.ptr->f, AV_FRAME_DATA_A53_CC,
1338  &s1->a53_buf_ref, NULL);
1339  if (ret < 0)
1340  return ret;
1341  }
1342 
1343  if (s1->has_stereo3d) {
1344  AVStereo3D *stereo = av_stereo3d_create_side_data(s->cur_pic.ptr->f);
1345  if (!stereo)
1346  return AVERROR(ENOMEM);
1347 
1348  *stereo = s1->stereo3d;
1349  s1->has_stereo3d = 0;
1350  }
1351 
1352  if (s1->has_afd) {
1353  AVFrameSideData *sd;
1354  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1355  AV_FRAME_DATA_AFD, 1, &sd);
1356  if (ret < 0)
1357  return ret;
1358  if (sd)
1359  *sd->data = s1->afd;
1360  s1->has_afd = 0;
1361  }
1362 
1363  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1364  ff_thread_finish_setup(avctx);
1365  } else { // second field
1366  second_field = 1;
1367  if (!s->cur_pic.ptr) {
1368  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1369  return AVERROR_INVALIDDATA;
1370  }
1371 
1372  if (s->avctx->hwaccel) {
1373  if ((ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame)) < 0) {
1374  av_log(avctx, AV_LOG_ERROR,
1375  "hardware accelerator failed to decode first field\n");
1376  return ret;
1377  }
1378  }
1380  if (ret < 0)
1381  return ret;
1382 
1383  for (int i = 0; i < 3; i++) {
1384  s->cur_pic.data[i] = s->cur_pic.ptr->f->data[i];
1385  if (s->picture_structure == PICT_BOTTOM_FIELD)
1386  s->cur_pic.data[i] +=
1387  s->cur_pic.ptr->f->linesize[i];
1388  }
1389  }
1390 
1391  if (avctx->hwaccel) {
1392  if ((ret = FF_HW_CALL(avctx, start_frame, buf, buf_size)) < 0)
1393  return ret;
1394  } else if (s->codec_tag == MKTAG('V', 'C', 'R', '2')) {
1395  // Exchange UV
1396  FFSWAP(uint8_t*, s->cur_pic.data[1], s->cur_pic.data[2]);
1397  FFSWAP(ptrdiff_t, s->cur_pic.linesize[1], s->cur_pic.linesize[2]);
1398  if (!second_field) {
1399  FFSWAP(uint8_t*, s->next_pic.data[1], s->next_pic.data[2]);
1400  FFSWAP(ptrdiff_t, s->next_pic.linesize[1], s->next_pic.linesize[2]);
1401  FFSWAP(uint8_t*, s->last_pic.data[1], s->last_pic.data[2]);
1402  FFSWAP(ptrdiff_t, s->last_pic.linesize[1], s->last_pic.linesize[2]);
1403  }
1404  }
1405 
1406  return 0;
1407 }
1408 
1409 #define DECODE_SLICE_ERROR -1
1410 #define DECODE_SLICE_OK 0
1411 
1412 /**
1413  * Decode a slice.
1414  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1415  * @return DECODE_SLICE_ERROR if the slice is damaged,
1416  * DECODE_SLICE_OK if this slice is OK
1417  */
1418 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1419  const uint8_t **buf, int buf_size)
1420 {
1421  AVCodecContext *avctx = s->avctx;
1422  const int lowres = s->avctx->lowres;
1423  const int field_pic = s->picture_structure != PICT_FRAME;
1424  int ret;
1425 
1426  s->resync_mb_x =
1427  s->resync_mb_y = -1;
1428 
1429  av_assert0(mb_y < s->mb_height);
1430 
1431  ret = init_get_bits8(&s->gb, *buf, buf_size);
1432  if (ret < 0)
1433  return ret;
1434 
1435  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1436  skip_bits(&s->gb, 3);
1437 
1439  s->interlaced_dct = 0;
1440 
1441  s->qscale = mpeg_get_qscale(s);
1442 
1443  if (s->qscale == 0) {
1444  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1445  return AVERROR_INVALIDDATA;
1446  }
1447 
1448  /* extra slice info */
1449  if (skip_1stop_8data_bits(&s->gb) < 0)
1450  return AVERROR_INVALIDDATA;
1451 
1452  s->mb_x = 0;
1453 
1454  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1455  skip_bits1(&s->gb);
1456  } else {
1457  while (get_bits_left(&s->gb) > 0) {
1458  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1459  MBINCR_VLC_BITS, 2);
1460  if (code < 0) {
1461  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1462  return AVERROR_INVALIDDATA;
1463  }
1464  if (code >= 33) {
1465  if (code == 33)
1466  s->mb_x += 33;
1467  /* otherwise, stuffing, nothing to do */
1468  } else {
1469  s->mb_x += code;
1470  break;
1471  }
1472  }
1473  }
1474 
1475  if (s->mb_x >= (unsigned) s->mb_width) {
1476  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1477  return AVERROR_INVALIDDATA;
1478  }
1479 
1480  if (avctx->hwaccel) {
1481  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1482  int start_code = -1;
1483  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1484  if (buf_end < *buf + buf_size)
1485  buf_end -= 4;
1486  s->mb_y = mb_y;
1487  if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
1488  return DECODE_SLICE_ERROR;
1489  *buf = buf_end;
1490  return DECODE_SLICE_OK;
1491  }
1492 
1493  s->resync_mb_x = s->mb_x;
1494  s->resync_mb_y = s->mb_y = mb_y;
1495  s->mb_skip_run = 0;
1497 
1498  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1499  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1500  av_log(s->avctx, AV_LOG_DEBUG,
1501  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1502  s->qscale,
1503  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1504  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1505  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1506  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1507  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1508  s->progressive_sequence ? "ps" : "",
1509  s->progressive_frame ? "pf" : "",
1510  s->alternate_scan ? "alt" : "",
1511  s->top_field_first ? "top" : "",
1512  s->intra_dc_precision, s->picture_structure,
1513  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1514  s->q_scale_type, s->intra_vlc_format,
1515  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1516  }
1517  }
1518 
1519  for (;;) {
1520  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1521  return ret;
1522 
1523  // Note motion_val is normally NULL unless we want to extract the MVs.
1524  if (s->cur_pic.motion_val[0]) {
1525  const int wrap = s->b8_stride;
1526  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1527  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1528  int motion_x, motion_y, dir, i;
1529 
1530  for (i = 0; i < 2; i++) {
1531  for (dir = 0; dir < 2; dir++) {
1532  if (s->mb_intra ||
1533  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1534  motion_x = motion_y = 0;
1535  } else if (s->mv_type == MV_TYPE_16X16 ||
1536  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1537  motion_x = s->mv[dir][0][0];
1538  motion_y = s->mv[dir][0][1];
1539  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1540  motion_x = s->mv[dir][i][0];
1541  motion_y = s->mv[dir][i][1];
1542  }
1543 
1544  s->cur_pic.motion_val[dir][xy][0] = motion_x;
1545  s->cur_pic.motion_val[dir][xy][1] = motion_y;
1546  s->cur_pic.motion_val[dir][xy + 1][0] = motion_x;
1547  s->cur_pic.motion_val[dir][xy + 1][1] = motion_y;
1548  s->cur_pic.ref_index [dir][b8_xy] =
1549  s->cur_pic.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1550  av_assert2(s->field_select[dir][i] == 0 ||
1551  s->field_select[dir][i] == 1);
1552  }
1553  xy += wrap;
1554  b8_xy += 2;
1555  }
1556  }
1557 
1558  s->dest[0] += 16 >> lowres;
1559  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1560  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1561 
1562  ff_mpv_reconstruct_mb(s, s->block);
1563 
1564  if (++s->mb_x >= s->mb_width) {
1565  const int mb_size = 16 >> s->avctx->lowres;
1566  int left;
1567 
1568  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1570 
1571  s->mb_x = 0;
1572  s->mb_y += 1 << field_pic;
1573 
1574  if (s->mb_y >= s->mb_height) {
1575  int left = get_bits_left(&s->gb);
1576  int is_d10 = s->chroma_format == 2 &&
1577  s->pict_type == AV_PICTURE_TYPE_I &&
1578  avctx->profile == 0 && avctx->level == 5 &&
1579  s->intra_dc_precision == 2 &&
1580  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1581  s->progressive_frame == 0
1582  /* vbv_delay == 0xBBB || 0xE10 */;
1583 
1584  if (left >= 32 && !is_d10) {
1585  GetBitContext gb = s->gb;
1586  align_get_bits(&gb);
1587  if (show_bits(&gb, 24) == 0x060E2B) {
1588  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1589  is_d10 = 1;
1590  }
1591  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1592  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1593  goto eos;
1594  }
1595  }
1596 
1597  if (left < 0 ||
1598  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1599  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1600  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1601  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1602  return AVERROR_INVALIDDATA;
1603  } else
1604  goto eos;
1605  }
1606  // There are some files out there which are missing the last slice
1607  // in cases where the slice is completely outside the visible
1608  // area, we detect this here instead of running into the end expecting
1609  // more data
1610  left = get_bits_left(&s->gb);
1611  if (s->mb_y >= ((s->height + 15) >> 4) &&
1612  !s->progressive_sequence &&
1613  left <= 25 &&
1614  left >= 0 &&
1615  s->mb_skip_run == -1 &&
1616  (!left || show_bits(&s->gb, left) == 0))
1617  goto eos;
1618 
1620  }
1621 
1622  /* skip mb handling */
1623  if (s->mb_skip_run == -1) {
1624  /* read increment again */
1625  s->mb_skip_run = 0;
1626  for (;;) {
1627  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1628  MBINCR_VLC_BITS, 2);
1629  if (code < 0) {
1630  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1631  return AVERROR_INVALIDDATA;
1632  }
1633  if (code >= 33) {
1634  if (code == 33) {
1635  s->mb_skip_run += 33;
1636  } else if (code == 35) {
1637  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1638  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1639  return AVERROR_INVALIDDATA;
1640  }
1641  goto eos; /* end of slice */
1642  }
1643  /* otherwise, stuffing, nothing to do */
1644  } else {
1645  s->mb_skip_run += code;
1646  break;
1647  }
1648  }
1649  if (s->mb_skip_run) {
1650  int i;
1651  if (s->pict_type == AV_PICTURE_TYPE_I) {
1652  av_log(s->avctx, AV_LOG_ERROR,
1653  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1654  return AVERROR_INVALIDDATA;
1655  }
1656 
1657  /* skip mb */
1658  s->mb_intra = 0;
1659  for (i = 0; i < 12; i++)
1660  s->block_last_index[i] = -1;
1661  if (s->picture_structure == PICT_FRAME)
1662  s->mv_type = MV_TYPE_16X16;
1663  else
1664  s->mv_type = MV_TYPE_FIELD;
1665  if (s->pict_type == AV_PICTURE_TYPE_P) {
1666  /* if P type, zero motion vector is implied */
1667  s->mv_dir = MV_DIR_FORWARD;
1668  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1669  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1670  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1671  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1672  } else {
1673  /* if B type, reuse previous vectors and directions */
1674  s->mv[0][0][0] = s->last_mv[0][0][0];
1675  s->mv[0][0][1] = s->last_mv[0][0][1];
1676  s->mv[1][0][0] = s->last_mv[1][0][0];
1677  s->mv[1][0][1] = s->last_mv[1][0][1];
1678  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1679  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1680  }
1681  }
1682  }
1683  }
1684 eos: // end of slice
1685  if (get_bits_left(&s->gb) < 0) {
1686  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1687  return AVERROR_INVALIDDATA;
1688  }
1689  *buf += (get_bits_count(&s->gb) - 1) / 8;
1690  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1691  return 0;
1692 }
1693 
1695 {
1696  MpegEncContext *s = *(void **) arg;
1697  const uint8_t *buf = s->gb.buffer;
1698  int mb_y = s->start_mb_y;
1699  const int field_pic = s->picture_structure != PICT_FRAME;
1700 
1701  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1702 
1703  for (;;) {
1704  uint32_t start_code;
1705  int ret;
1706 
1707  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1708  emms_c();
1709  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1710  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1711  s->start_mb_y, s->end_mb_y, s->er.error_count);
1712  if (ret < 0) {
1713  if (c->err_recognition & AV_EF_EXPLODE)
1714  return ret;
1715  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1716  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1717  s->mb_x, s->mb_y,
1719  } else {
1720  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1721  s->mb_x - 1, s->mb_y,
1723  }
1724 
1725  if (s->mb_y == s->end_mb_y)
1726  return 0;
1727 
1728  start_code = -1;
1729  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1730  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1731  return AVERROR_INVALIDDATA;
1733  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1734  mb_y += (*buf&0xE0)<<2;
1735  mb_y <<= field_pic;
1736  if (s->picture_structure == PICT_BOTTOM_FIELD)
1737  mb_y++;
1738  if (mb_y >= s->end_mb_y)
1739  return AVERROR_INVALIDDATA;
1740  }
1741 }
1742 
1743 /**
1744  * Handle slice ends.
1745  * @return 1 if it seems to be the last slice
1746  */
1747 static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
1748 {
1749  Mpeg1Context *s1 = avctx->priv_data;
1750  MpegEncContext *s = &s1->mpeg_enc_ctx;
1751 
1752  if (!s->context_initialized || !s->cur_pic.ptr)
1753  return 0;
1754 
1755  if (s->avctx->hwaccel) {
1756  int ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
1757  if (ret < 0) {
1758  av_log(avctx, AV_LOG_ERROR,
1759  "hardware accelerator failed to decode picture\n");
1760  return ret;
1761  }
1762  }
1763 
1764  /* end of slice reached */
1765  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
1766  /* end of image */
1767 
1768  ff_er_frame_end(&s->er, NULL);
1769 
1771 
1772  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1773  int ret = av_frame_ref(pict, s->cur_pic.ptr->f);
1774  if (ret < 0)
1775  return ret;
1776  ff_print_debug_info(s, s->cur_pic.ptr, pict);
1777  ff_mpv_export_qp_table(s, pict, s->cur_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1778  *got_output = 1;
1779  } else {
1780  /* latency of 1 frame for I- and P-frames */
1781  if (s->last_pic.ptr && !s->last_pic.ptr->dummy) {
1782  int ret = av_frame_ref(pict, s->last_pic.ptr->f);
1783  if (ret < 0)
1784  return ret;
1785  ff_print_debug_info(s, s->last_pic.ptr, pict);
1786  ff_mpv_export_qp_table(s, pict, s->last_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1787  *got_output = 1;
1788  }
1789  }
1790 
1791  return 1;
1792  } else {
1793  return 0;
1794  }
1795 }
1796 
1798  const uint8_t *buf, int buf_size)
1799 {
1800  Mpeg1Context *s1 = avctx->priv_data;
1801  MpegEncContext *s = &s1->mpeg_enc_ctx;
1802  int width, height;
1803  int i, v, j;
1804 
1805  int ret = init_get_bits8(&s->gb, buf, buf_size);
1806  if (ret < 0)
1807  return ret;
1808 
1809  width = get_bits(&s->gb, 12);
1810  height = get_bits(&s->gb, 12);
1811  if (width == 0 || height == 0) {
1812  av_log(avctx, AV_LOG_WARNING,
1813  "Invalid horizontal or vertical size value.\n");
1815  return AVERROR_INVALIDDATA;
1816  }
1817  s1->aspect_ratio_info = get_bits(&s->gb, 4);
1818  if (s1->aspect_ratio_info == 0) {
1819  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
1821  return AVERROR_INVALIDDATA;
1822  }
1823  s1->frame_rate_index = get_bits(&s->gb, 4);
1824  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
1825  av_log(avctx, AV_LOG_WARNING,
1826  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
1827  s1->frame_rate_index = 1;
1828  }
1829  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
1830  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
1831  return AVERROR_INVALIDDATA;
1832  }
1833 
1834  s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
1835  skip_bits(&s->gb, 1);
1836 
1837  /* get matrix */
1838  if (get_bits1(&s->gb)) {
1839  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1840  } else {
1841  for (i = 0; i < 64; i++) {
1842  j = s->idsp.idct_permutation[i];
1844  s->intra_matrix[j] = v;
1845  s->chroma_intra_matrix[j] = v;
1846  }
1847  }
1848  if (get_bits1(&s->gb)) {
1849  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1850  } else {
1851  for (i = 0; i < 64; i++) {
1852  int j = s->idsp.idct_permutation[i];
1854  s->inter_matrix[j] = v;
1855  s->chroma_inter_matrix[j] = v;
1856  }
1857  }
1858 
1859  if (show_bits(&s->gb, 23) != 0) {
1860  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
1861  return AVERROR_INVALIDDATA;
1862  }
1863 
1864  s->width = width;
1865  s->height = height;
1866 
1867  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
1868  s->progressive_sequence = 1;
1869  s->progressive_frame = 1;
1870  s->picture_structure = PICT_FRAME;
1871  s->first_field = 0;
1872  s->frame_pred_frame_dct = 1;
1873  s->chroma_format = 1;
1874  s->codec_id =
1875  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1876  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1877  s->low_delay = 1;
1878 
1879  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1880  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
1881  s->avctx->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
1882 
1883  return 0;
1884 }
1885 
1887 {
1888  Mpeg1Context *s1 = avctx->priv_data;
1889  MpegEncContext *s = &s1->mpeg_enc_ctx;
1890  int i, v, ret;
1891 
1892  /* start new MPEG-1 context decoding */
1893  if (s->context_initialized)
1895 
1896  s->width = avctx->coded_width;
1897  s->height = avctx->coded_height;
1898  avctx->has_b_frames = 0; // true?
1899  s->low_delay = 1;
1900 
1901  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1902 
1903  if ((ret = ff_mpv_common_init(s)) < 0)
1904  return ret;
1905 
1906  for (i = 0; i < 64; i++) {
1907  int j = s->idsp.idct_permutation[i];
1909  s->intra_matrix[j] = v;
1910  s->chroma_intra_matrix[j] = v;
1911 
1913  s->inter_matrix[j] = v;
1914  s->chroma_inter_matrix[j] = v;
1915  }
1916 
1917  s->progressive_sequence = 1;
1918  s->progressive_frame = 1;
1919  s->picture_structure = PICT_FRAME;
1920  s->first_field = 0;
1921  s->frame_pred_frame_dct = 1;
1922  s->chroma_format = 1;
1923  if (s->codec_tag == AV_RL32("BW10")) {
1924  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1925  } else {
1926  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1927  }
1928  s1->save_width = s->width;
1929  s1->save_height = s->height;
1930  s1->save_progressive_seq = s->progressive_sequence;
1931  return 0;
1932 }
1933 
1935  const char *label)
1936 {
1937  Mpeg1Context *s1 = avctx->priv_data;
1938 
1940 
1941  if (!s1->cc_format) {
1942  s1->cc_format = format;
1943 
1944  av_log(avctx, AV_LOG_DEBUG, "CC: first seen substream is %s format\n", label);
1945  }
1946 }
1947 
1949  const uint8_t *p, int buf_size)
1950 {
1951  Mpeg1Context *s1 = avctx->priv_data;
1952 
1953  if ((!s1->cc_format || s1->cc_format == CC_FORMAT_A53_PART4) &&
1954  buf_size >= 6 &&
1955  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
1956  p[4] == 3 && (p[5] & 0x40)) {
1957  /* extract A53 Part 4 CC data */
1958  int cc_count = p[5] & 0x1f;
1959  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
1960  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1961  const uint64_t new_size = (old_size + cc_count
1962  * UINT64_C(3));
1963  int ret;
1964 
1965  if (new_size > 3*A53_MAX_CC_COUNT)
1966  return AVERROR(EINVAL);
1967 
1968  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1969  if (ret >= 0)
1970  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
1971 
1973  mpeg_set_cc_format(avctx, CC_FORMAT_A53_PART4, "A/53 Part 4");
1974  }
1975  return 1;
1976  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_SCTE20) &&
1977  buf_size >= 2 &&
1978  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
1979  /* extract SCTE-20 CC data */
1980  GetBitContext gb;
1981  int cc_count = 0;
1982  int i, ret;
1983 
1984  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
1985  if (ret < 0)
1986  return ret;
1987  cc_count = get_bits(&gb, 5);
1988  if (cc_count > 0) {
1989  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1990  const uint64_t new_size = (old_size + cc_count
1991  * UINT64_C(3));
1992  if (new_size > 3*A53_MAX_CC_COUNT)
1993  return AVERROR(EINVAL);
1994 
1995  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1996  if (ret >= 0) {
1997  uint8_t field, cc1, cc2;
1998  uint8_t *cap = s1->a53_buf_ref->data;
1999 
2000  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
2001  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
2002  skip_bits(&gb, 2); // priority
2003  field = get_bits(&gb, 2);
2004  skip_bits(&gb, 5); // line_offset
2005  cc1 = get_bits(&gb, 8);
2006  cc2 = get_bits(&gb, 8);
2007  skip_bits(&gb, 1); // marker
2008 
2009  if (!field) { // forbidden
2010  cap[0] = cap[1] = cap[2] = 0x00;
2011  } else {
2012  field = (field == 2 ? 1 : 0);
2013  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
2014  cap[0] = 0x04 | field;
2015  cap[1] = ff_reverse[cc1];
2016  cap[2] = ff_reverse[cc2];
2017  }
2018  cap += 3;
2019  }
2020  }
2021 
2023  mpeg_set_cc_format(avctx, CC_FORMAT_SCTE20, "SCTE-20");
2024  }
2025  return 1;
2026  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DVD) &&
2027  buf_size >= 11 &&
2028  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
2029  /* extract DVD CC data
2030  *
2031  * uint32_t user_data_start_code 0x000001B2 (big endian)
2032  * uint16_t user_identifier 0x4343 "CC"
2033  * uint8_t user_data_type_code 0x01
2034  * uint8_t caption_block_size 0xF8
2035  * uint8_t
2036  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2037  * bit 6 caption_filler 0
2038  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2039  * bit 0 caption_extra_field_added 1=one additional caption word
2040  *
2041  * struct caption_field_block {
2042  * uint8_t
2043  * bit 7:1 caption_filler 0x7F (all 1s)
2044  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2045  * uint8_t caption_first_byte
2046  * uint8_t caption_second_byte
2047  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2048  *
2049  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2050  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2051  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2052  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2053  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2054  int cc_count = 0;
2055  int i, ret;
2056  // There is a caption count field in the data, but it is often
2057  // incorrect. So count the number of captions present.
2058  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2059  cc_count++;
2060  // Transform the DVD format into A53 Part 4 format
2061  if (cc_count > 0) {
2062  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2063  const uint64_t new_size = (old_size + cc_count
2064  * UINT64_C(6));
2065  if (new_size > 3*A53_MAX_CC_COUNT)
2066  return AVERROR(EINVAL);
2067 
2068  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2069  if (ret >= 0) {
2070  uint8_t field1 = !!(p[4] & 0x80);
2071  uint8_t *cap = s1->a53_buf_ref->data;
2072  p += 5;
2073  for (i = 0; i < cc_count; i++) {
2074  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2075  cap[1] = p[1];
2076  cap[2] = p[2];
2077  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2078  cap[4] = p[4];
2079  cap[5] = p[5];
2080  cap += 6;
2081  p += 6;
2082  }
2083  }
2084 
2086  mpeg_set_cc_format(avctx, CC_FORMAT_DVD, "DVD");
2087  }
2088  return 1;
2089  }
2090  return 0;
2091 }
2092 
2094  const uint8_t *p, int buf_size)
2095 {
2096  Mpeg1Context *s = avctx->priv_data;
2097  const uint8_t *buf_end = p + buf_size;
2098  Mpeg1Context *s1 = avctx->priv_data;
2099 
2100 #if 0
2101  int i;
2102  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2103  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2104  }
2105  av_log(avctx, AV_LOG_ERROR, "\n");
2106 #endif
2107 
2108  if (buf_size > 29){
2109  int i;
2110  for(i=0; i<20; i++)
2111  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2112  s->tmpgexs= 1;
2113  }
2114  }
2115  /* we parse the DTG active format information */
2116  if (buf_end - p >= 5 &&
2117  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2118  int flags = p[4];
2119  p += 5;
2120  if (flags & 0x80) {
2121  /* skip event id */
2122  p += 2;
2123  }
2124  if (flags & 0x40) {
2125  if (buf_end - p < 1)
2126  return;
2127  s1->has_afd = 1;
2128  s1->afd = p[0] & 0x0f;
2129  }
2130  } else if (buf_end - p >= 6 &&
2131  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2132  p[4] == 0x03) { // S3D_video_format_length
2133  // the 0x7F mask ignores the reserved_bit value
2134  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2135 
2136  if (S3D_video_format_type == 0x03 ||
2137  S3D_video_format_type == 0x04 ||
2138  S3D_video_format_type == 0x08 ||
2139  S3D_video_format_type == 0x23) {
2140 
2141  s1->has_stereo3d = 1;
2142 
2143  switch (S3D_video_format_type) {
2144  case 0x03:
2146  break;
2147  case 0x04:
2149  break;
2150  case 0x08:
2152  break;
2153  case 0x23:
2155  break;
2156  }
2157  }
2158  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2159  return;
2160  }
2161 }
2162 
2164  const uint8_t *buf, int buf_size)
2165 {
2166  Mpeg1Context *s1 = avctx->priv_data;
2167  MpegEncContext *s = &s1->mpeg_enc_ctx;
2168  int broken_link;
2169  int64_t tc;
2170 
2171  int ret = init_get_bits8(&s->gb, buf, buf_size);
2172  if (ret < 0)
2173  return ret;
2174 
2175  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2176 
2177  s1->closed_gop = get_bits1(&s->gb);
2178  /* broken_link indicates that after editing the
2179  * reference frames of the first B-Frames after GOP I-Frame
2180  * are missing (open gop) */
2181  broken_link = get_bits1(&s->gb);
2182 
2183  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2184  char tcbuf[AV_TIMECODE_STR_SIZE];
2186  av_log(s->avctx, AV_LOG_DEBUG,
2187  "GOP (%s) closed_gop=%d broken_link=%d\n",
2188  tcbuf, s1->closed_gop, broken_link);
2189  }
2190 
2191  return 0;
2192 }
2193 
2194 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2195  int *got_output, const uint8_t *buf, int buf_size)
2196 {
2197  Mpeg1Context *s = avctx->priv_data;
2198  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2199  const uint8_t *buf_ptr = buf;
2200  const uint8_t *buf_end = buf + buf_size;
2201  int ret, input_size;
2202  int last_code = 0, skip_frame = 0;
2203  int picture_start_code_seen = 0;
2204 
2205  for (;;) {
2206  /* find next start code */
2207  uint32_t start_code = -1;
2208  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2209  if (start_code > 0x1ff) {
2210  if (!skip_frame) {
2211  if (HAVE_THREADS &&
2212  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2213  !avctx->hwaccel) {
2214  int i;
2215  av_assert0(avctx->thread_count > 1);
2216 
2217  avctx->execute(avctx, slice_decode_thread,
2218  &s2->thread_context[0], NULL,
2219  s->slice_count, sizeof(void *));
2220  for (i = 0; i < s->slice_count; i++)
2222  }
2223 
2224  ret = slice_end(avctx, picture, got_output);
2225  if (ret < 0)
2226  return ret;
2227  }
2228  s2->pict_type = 0;
2229 
2230  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2231  return AVERROR_INVALIDDATA;
2232 
2233  return FFMAX(0, buf_ptr - buf);
2234  }
2235 
2236  input_size = buf_end - buf_ptr;
2237 
2238  if (avctx->debug & FF_DEBUG_STARTCODE)
2239  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2240  start_code, buf_ptr - buf, input_size);
2241 
2242  /* prepare data for next start code */
2243  switch (start_code) {
2244  case SEQ_START_CODE:
2245  if (last_code == 0) {
2246  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2247  if (buf != avctx->extradata)
2248  s->sync = 1;
2249  } else {
2250  av_log(avctx, AV_LOG_ERROR,
2251  "ignoring SEQ_START_CODE after %X\n", last_code);
2252  if (avctx->err_recognition & AV_EF_EXPLODE)
2253  return AVERROR_INVALIDDATA;
2254  }
2255  break;
2256 
2257  case PICTURE_START_CODE:
2258  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2259  /* If it's a frame picture, there can't be more than one picture header.
2260  Yet, it does happen and we need to handle it. */
2261  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2262  break;
2263  }
2264  picture_start_code_seen = 1;
2265 
2266  if (buf == avctx->extradata && avctx->codec_tag == AV_RL32("AVmp")) {
2267  av_log(avctx, AV_LOG_WARNING, "ignoring picture start code in AVmp extradata\n");
2268  break;
2269  }
2270 
2271  if (s2->width <= 0 || s2->height <= 0) {
2272  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2273  s2->width, s2->height);
2274  return AVERROR_INVALIDDATA;
2275  }
2276 
2277  if (s->tmpgexs){
2278  s2->intra_dc_precision= 3;
2279  s2->intra_matrix[0]= 1;
2280  }
2281  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2282  !avctx->hwaccel && s->slice_count) {
2283  int i;
2284 
2285  avctx->execute(avctx, slice_decode_thread,
2286  s2->thread_context, NULL,
2287  s->slice_count, sizeof(void *));
2288  for (i = 0; i < s->slice_count; i++)
2290  s->slice_count = 0;
2291  }
2292  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2293  ret = mpeg_decode_postinit(avctx);
2294  if (ret < 0) {
2295  av_log(avctx, AV_LOG_ERROR,
2296  "mpeg_decode_postinit() failure\n");
2297  return ret;
2298  }
2299 
2300  /* We have a complete image: we try to decompress it. */
2301  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2302  s2->pict_type = 0;
2303  s->first_slice = 1;
2304  last_code = PICTURE_START_CODE;
2305  } else {
2306  av_log(avctx, AV_LOG_ERROR,
2307  "ignoring pic after %X\n", last_code);
2308  if (avctx->err_recognition & AV_EF_EXPLODE)
2309  return AVERROR_INVALIDDATA;
2310  }
2311  break;
2312  case EXT_START_CODE:
2313  ret = init_get_bits8(&s2->gb, buf_ptr, input_size);
2314  if (ret < 0)
2315  return ret;
2316 
2317  switch (get_bits(&s2->gb, 4)) {
2318  case 0x1:
2319  if (last_code == 0) {
2321  } else {
2322  av_log(avctx, AV_LOG_ERROR,
2323  "ignoring seq ext after %X\n", last_code);
2324  if (avctx->err_recognition & AV_EF_EXPLODE)
2325  return AVERROR_INVALIDDATA;
2326  }
2327  break;
2328  case 0x2:
2330  break;
2331  case 0x3:
2333  break;
2334  case 0x7:
2336  break;
2337  case 0x8:
2338  if (last_code == PICTURE_START_CODE) {
2340  if (ret < 0)
2341  return ret;
2342  } else {
2343  av_log(avctx, AV_LOG_ERROR,
2344  "ignoring pic cod ext after %X\n", last_code);
2345  if (avctx->err_recognition & AV_EF_EXPLODE)
2346  return AVERROR_INVALIDDATA;
2347  }
2348  break;
2349  }
2350  break;
2351  case USER_START_CODE:
2352  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2353  break;
2354  case GOP_START_CODE:
2355  if (last_code == 0) {
2356  s2->first_field = 0;
2357  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2358  if (ret < 0)
2359  return ret;
2360  s->sync = 1;
2361  } else {
2362  av_log(avctx, AV_LOG_ERROR,
2363  "ignoring GOP_START_CODE after %X\n", last_code);
2364  if (avctx->err_recognition & AV_EF_EXPLODE)
2365  return AVERROR_INVALIDDATA;
2366  }
2367  break;
2368  default:
2370  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2371  if (s2->progressive_sequence && !s2->progressive_frame) {
2372  s2->progressive_frame = 1;
2373  av_log(s2->avctx, AV_LOG_ERROR,
2374  "interlaced frame in progressive sequence, ignoring\n");
2375  }
2376 
2377  if (s2->picture_structure == 0 ||
2379  av_log(s2->avctx, AV_LOG_ERROR,
2380  "picture_structure %d invalid, ignoring\n",
2381  s2->picture_structure);
2383  }
2384 
2386  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2387 
2388  if (s2->picture_structure == PICT_FRAME) {
2389  s2->first_field = 0;
2390  s2->v_edge_pos = 16 * s2->mb_height;
2391  } else {
2392  s2->first_field ^= 1;
2393  s2->v_edge_pos = 8 * s2->mb_height;
2394  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2395  }
2396  }
2398  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2399  const int field_pic = s2->picture_structure != PICT_FRAME;
2400  int mb_y = start_code - SLICE_MIN_START_CODE;
2401  last_code = SLICE_MIN_START_CODE;
2402  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2403  mb_y += (*buf_ptr&0xE0)<<2;
2404 
2405  mb_y <<= field_pic;
2407  mb_y++;
2408 
2409  if (buf_end - buf_ptr < 2) {
2410  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2411  return AVERROR_INVALIDDATA;
2412  }
2413 
2414  if (mb_y >= s2->mb_height) {
2415  av_log(s2->avctx, AV_LOG_ERROR,
2416  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2417  return AVERROR_INVALIDDATA;
2418  }
2419 
2420  if (!s2->last_pic.ptr) {
2421  /* Skip B-frames if we do not have reference frames and
2422  * GOP is not closed. */
2423  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2424  if (!s->closed_gop) {
2425  skip_frame = 1;
2426  av_log(s2->avctx, AV_LOG_DEBUG,
2427  "Skipping B slice due to open GOP\n");
2428  break;
2429  }
2430  }
2431  }
2433  s->sync = 1;
2434  if (!s2->next_pic.ptr) {
2435  /* Skip P-frames if we do not have a reference frame or
2436  * we have an invalid header. */
2437  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2438  skip_frame = 1;
2439  av_log(s2->avctx, AV_LOG_DEBUG,
2440  "Skipping P slice due to !sync\n");
2441  break;
2442  }
2443  }
2444  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2445  s2->pict_type == AV_PICTURE_TYPE_B) ||
2446  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2447  s2->pict_type != AV_PICTURE_TYPE_I) ||
2448  avctx->skip_frame >= AVDISCARD_ALL) {
2449  skip_frame = 1;
2450  break;
2451  }
2452 
2453  if (!s2->context_initialized)
2454  break;
2455 
2456  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2457  if (mb_y < avctx->skip_top ||
2458  mb_y >= s2->mb_height - avctx->skip_bottom)
2459  break;
2460  }
2461 
2462  if (!s2->pict_type) {
2463  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2464  if (avctx->err_recognition & AV_EF_EXPLODE)
2465  return AVERROR_INVALIDDATA;
2466  break;
2467  }
2468 
2469  if (s->first_slice) {
2470  skip_frame = 0;
2471  s->first_slice = 0;
2472  if ((ret = mpeg_field_start(s, buf, buf_size)) < 0)
2473  return ret;
2474  }
2475  if (!s2->cur_pic.ptr) {
2476  av_log(avctx, AV_LOG_ERROR,
2477  "current_picture not initialized\n");
2478  return AVERROR_INVALIDDATA;
2479  }
2480 
2481  if (HAVE_THREADS &&
2482  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2483  !avctx->hwaccel) {
2484  int threshold = (s2->mb_height * s->slice_count +
2485  s2->slice_context_count / 2) /
2486  s2->slice_context_count;
2487  av_assert0(avctx->thread_count > 1);
2488  if (threshold <= mb_y) {
2489  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2490 
2491  thread_context->start_mb_y = mb_y;
2492  thread_context->end_mb_y = s2->mb_height;
2493  if (s->slice_count) {
2494  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2495  ret = ff_update_duplicate_context(thread_context, s2);
2496  if (ret < 0)
2497  return ret;
2498  }
2499  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2500  if (ret < 0)
2501  return ret;
2502  s->slice_count++;
2503  }
2504  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2505  } else {
2506  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2507  emms_c();
2508 
2509  if (ret < 0) {
2510  if (avctx->err_recognition & AV_EF_EXPLODE)
2511  return ret;
2512  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2513  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2514  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2516  } else {
2517  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2518  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2520  }
2521  }
2522  }
2523  break;
2524  }
2525  }
2526 }
2527 
2528 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2529  int *got_output, AVPacket *avpkt)
2530 {
2531  const uint8_t *buf = avpkt->data;
2532  int ret;
2533  int buf_size = avpkt->size;
2534  Mpeg1Context *s = avctx->priv_data;
2535  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2536 
2537  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2538  /* special case for last picture */
2539  if (s2->low_delay == 0 && s2->next_pic.ptr) {
2540  int ret = av_frame_ref(picture, s2->next_pic.ptr->f);
2541  if (ret < 0)
2542  return ret;
2543 
2545 
2546  *got_output = 1;
2547  }
2548  return buf_size;
2549  }
2550 
2551  if (!s2->context_initialized &&
2552  (s2->codec_tag == AV_RL32("VCR2") || s2->codec_tag == AV_RL32("BW10")))
2553  vcr2_init_sequence(avctx);
2554 
2555  s->slice_count = 0;
2556 
2557  if (avctx->extradata && !s->extradata_decoded) {
2558  ret = decode_chunks(avctx, picture, got_output,
2559  avctx->extradata, avctx->extradata_size);
2560  if (*got_output) {
2561  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2562  av_frame_unref(picture);
2563  *got_output = 0;
2564  }
2565  s->extradata_decoded = 1;
2566  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2568  return ret;
2569  }
2570  }
2571 
2572  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2573  if (ret<0 || *got_output) {
2575 
2576  if (s->timecode_frame_start != -1 && *got_output) {
2577  char tcbuf[AV_TIMECODE_STR_SIZE];
2578  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2580  sizeof(int64_t));
2581  if (!tcside)
2582  return AVERROR(ENOMEM);
2583  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2584 
2585  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2586  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2587 
2588  s->timecode_frame_start = -1;
2589  }
2590  }
2591 
2592  return ret;
2593 }
2594 
2595 static void flush(AVCodecContext *avctx)
2596 {
2597  Mpeg1Context *s = avctx->priv_data;
2598 
2599  s->sync = 0;
2600  s->closed_gop = 0;
2601 
2602  av_buffer_unref(&s->a53_buf_ref);
2603  ff_mpeg_flush(avctx);
2604 }
2605 
2607 {
2608  Mpeg1Context *s = avctx->priv_data;
2609 
2610  av_buffer_unref(&s->a53_buf_ref);
2611  return ff_mpv_decode_close(avctx);
2612 }
2613 
2615  .p.name = "mpeg1video",
2616  CODEC_LONG_NAME("MPEG-1 video"),
2617  .p.type = AVMEDIA_TYPE_VIDEO,
2618  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2619  .priv_data_size = sizeof(Mpeg1Context),
2621  .close = mpeg_decode_end,
2623  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2625  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2626  .flush = flush,
2627  .p.max_lowres = 3,
2628  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2629  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2630 #if CONFIG_MPEG1_NVDEC_HWACCEL
2631  HWACCEL_NVDEC(mpeg1),
2632 #endif
2633 #if CONFIG_MPEG1_VDPAU_HWACCEL
2634  HWACCEL_VDPAU(mpeg1),
2635 #endif
2636 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2637  HWACCEL_VIDEOTOOLBOX(mpeg1),
2638 #endif
2639  NULL
2640  },
2641 };
2642 
2643 #define M2V_OFFSET(x) offsetof(Mpeg1Context, x)
2644 #define M2V_PARAM AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2645 
2646 static const AVOption mpeg2video_options[] = {
2647  { "cc_format", "extract a specific Closed Captions format",
2648  M2V_OFFSET(cc_format), AV_OPT_TYPE_INT, { .i64 = CC_FORMAT_AUTO },
2649  CC_FORMAT_AUTO, CC_FORMAT_DVD, M2V_PARAM, .unit = "cc_format" },
2650 
2651  { "auto", "pick first seen CC substream", 0, AV_OPT_TYPE_CONST,
2652  { .i64 = CC_FORMAT_AUTO }, .flags = M2V_PARAM, .unit = "cc_format" },
2653  { "a53", "pick A/53 Part 4 CC substream", 0, AV_OPT_TYPE_CONST,
2654  { .i64 = CC_FORMAT_A53_PART4 }, .flags = M2V_PARAM, .unit = "cc_format" },
2655  { "scte20", "pick SCTE-20 CC substream", 0, AV_OPT_TYPE_CONST,
2656  { .i64 = CC_FORMAT_SCTE20 }, .flags = M2V_PARAM, .unit = "cc_format" },
2657  { "dvd", "pick DVD CC substream", 0, AV_OPT_TYPE_CONST,
2658  { .i64 = CC_FORMAT_DVD }, .flags = M2V_PARAM, .unit = "cc_format" },
2659  { NULL }
2660 };
2661 
2662 static const AVClass mpeg2video_class = {
2663  .class_name = "MPEG-2 video",
2664  .item_name = av_default_item_name,
2665  .option = mpeg2video_options,
2666  .version = LIBAVUTIL_VERSION_INT,
2667  .category = AV_CLASS_CATEGORY_DECODER,
2668 };
2669 
2671  .p.name = "mpeg2video",
2672  CODEC_LONG_NAME("MPEG-2 video"),
2673  .p.type = AVMEDIA_TYPE_VIDEO,
2674  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2675  .p.priv_class = &mpeg2video_class,
2676  .priv_data_size = sizeof(Mpeg1Context),
2678  .close = mpeg_decode_end,
2680  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2682  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2683  .flush = flush,
2684  .p.max_lowres = 3,
2686  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2687 #if CONFIG_MPEG2_DXVA2_HWACCEL
2688  HWACCEL_DXVA2(mpeg2),
2689 #endif
2690 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2691  HWACCEL_D3D11VA(mpeg2),
2692 #endif
2693 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2694  HWACCEL_D3D11VA2(mpeg2),
2695 #endif
2696 #if CONFIG_MPEG2_D3D12VA_HWACCEL
2697  HWACCEL_D3D12VA(mpeg2),
2698 #endif
2699 #if CONFIG_MPEG2_NVDEC_HWACCEL
2700  HWACCEL_NVDEC(mpeg2),
2701 #endif
2702 #if CONFIG_MPEG2_VAAPI_HWACCEL
2703  HWACCEL_VAAPI(mpeg2),
2704 #endif
2705 #if CONFIG_MPEG2_VDPAU_HWACCEL
2706  HWACCEL_VDPAU(mpeg2),
2707 #endif
2708 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2709  HWACCEL_VIDEOTOOLBOX(mpeg2),
2710 #endif
2711  NULL
2712  },
2713 };
2714 
2715 //legacy decoder
2717  .p.name = "mpegvideo",
2718  CODEC_LONG_NAME("MPEG-1 video"),
2719  .p.type = AVMEDIA_TYPE_VIDEO,
2720  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2721  .priv_data_size = sizeof(Mpeg1Context),
2723  .close = mpeg_decode_end,
2725  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2727  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2728  .flush = flush,
2729  .p.max_lowres = 3,
2730 };
2731 
2732 typedef struct IPUContext {
2734 
2735  int flags;
2736  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2737 } IPUContext;
2738 
2740  int *got_frame, AVPacket *avpkt)
2741 {
2742  IPUContext *s = avctx->priv_data;
2743  MpegEncContext *m = &s->m;
2744  GetBitContext *gb = &m->gb;
2745  int ret;
2746 
2747  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
2748  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2LL + 3*4 + 2*2 + 2*6))
2749  return AVERROR_INVALIDDATA;
2750 
2751  ret = ff_get_buffer(avctx, frame, 0);
2752  if (ret < 0)
2753  return ret;
2754 
2755  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2756  if (ret < 0)
2757  return ret;
2758 
2759  s->flags = get_bits(gb, 8);
2760  m->intra_dc_precision = s->flags & 3;
2761  m->q_scale_type = !!(s->flags & 0x40);
2762  m->intra_vlc_format = !!(s->flags & 0x20);
2763  m->alternate_scan = !!(s->flags & 0x10);
2764 
2766  s->flags & 0x10 ? ff_alternate_vertical_scan : ff_zigzag_direct);
2767 
2768  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2769  m->qscale = 1;
2770 
2771  for (int y = 0; y < avctx->height; y += 16) {
2772  int intraquant;
2773 
2774  for (int x = 0; x < avctx->width; x += 16) {
2775  if (x || y) {
2776  if (!get_bits1(gb))
2777  return AVERROR_INVALIDDATA;
2778  }
2779  if (get_bits1(gb)) {
2780  intraquant = 0;
2781  } else {
2782  if (!get_bits1(gb))
2783  return AVERROR_INVALIDDATA;
2784  intraquant = 1;
2785  }
2786 
2787  if (s->flags & 4)
2788  skip_bits1(gb);
2789 
2790  if (intraquant)
2791  m->qscale = mpeg_get_qscale(m);
2792 
2793  memset(s->block, 0, sizeof(s->block));
2794 
2795  for (int n = 0; n < 6; n++) {
2796  if (s->flags & 0x80) {
2798  m->intra_matrix,
2800  m->last_dc, s->block[n],
2801  n, m->qscale);
2802  } else {
2803  ret = mpeg2_decode_block_intra(m, s->block[n], n);
2804  }
2805 
2806  if (ret < 0)
2807  return ret;
2808  }
2809 
2810  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
2811  frame->linesize[0], s->block[0]);
2812  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
2813  frame->linesize[0], s->block[1]);
2814  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
2815  frame->linesize[0], s->block[2]);
2816  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
2817  frame->linesize[0], s->block[3]);
2818  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
2819  frame->linesize[1], s->block[4]);
2820  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
2821  frame->linesize[2], s->block[5]);
2822  }
2823  }
2824 
2825  align_get_bits(gb);
2826  if (get_bits_left(gb) != 32)
2827  return AVERROR_INVALIDDATA;
2828 
2829  *got_frame = 1;
2830 
2831  return avpkt->size;
2832 }
2833 
2835 {
2836  IPUContext *s = avctx->priv_data;
2837  MpegEncContext *m = &s->m;
2838 
2839  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2840  m->avctx = avctx;
2841 
2842  ff_idctdsp_init(&m->idsp, avctx);
2844 
2845  for (int i = 0; i < 64; i++) {
2846  int j = m->idsp.idct_permutation[i];
2848  m->intra_matrix[j] = v;
2849  m->chroma_intra_matrix[j] = v;
2850  }
2851 
2852  return 0;
2853 }
2854 
2856  .p.name = "ipu",
2857  CODEC_LONG_NAME("IPU Video"),
2858  .p.type = AVMEDIA_TYPE_VIDEO,
2859  .p.id = AV_CODEC_ID_IPU,
2860  .priv_data_size = sizeof(IPUContext),
2861  .init = ipu_decode_init,
2863  .p.capabilities = AV_CODEC_CAP_DR1,
2864 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:1886
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:699
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1427
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:55
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:81
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:444
M2V_OFFSET
#define M2V_OFFSET(x)
Definition: mpeg12dec.c:2643
ff_mb_pat_vlc
VLCElem ff_mb_pat_vlc[512]
Definition: mpeg12.c:126
level
uint8_t level
Definition: svq3.c:205
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:78
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
AVPanScan::position
int16_t position[3][2]
position of the top left corner in 1/16 pel for up to 3 fields/frames
Definition: defs.h:262
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:1948
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
AV_CLASS_CATEGORY_DECODER
@ AV_CLASS_CATEGORY_DECODER
Definition: log.h:35
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf, AVFrameSideData **psd)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:1997
mem_internal.h
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1241
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2528
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:437
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
MpegEncContext::top_field_first
int top_field_first
Definition: mpegvideo.h:452
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:795
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:150
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:422
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
AVPanScan
Pan Scan area.
Definition: defs.h:241
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1420
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:43
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:2834
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:471
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:221
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:485
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2716
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:520
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:427
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:89
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:328
AVOption
AVOption.
Definition: opt.h:357
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:180
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:42
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:126
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:826
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:832
reverse.h
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:56
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:91
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:908
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:96
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:32
thread.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1397
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:100
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:129
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:84
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:87
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:560
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.c:96
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:425
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:925
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:124
ff_mbincr_vlc
VLCElem ff_mbincr_vlc[538]
Definition: mpeg12.c:123
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1747
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2194
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1820
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1206
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1583
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
MpegEncContext::picture_structure
int picture_structure
Definition: mpegvideo.h:448
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:108
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:87
AVPanScan::width
int width
width and height in 1/16 pel
Definition: defs.h:254
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1694
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:62
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:59
val
static double val(void *priv, double ch)
Definition: aeval.c:78
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:90
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:880
MpegEncContext::width
int width
Definition: mpegvideo.h:96
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1797
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:89
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:451
IPUContext
Definition: mpeg12dec.c:2732
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:837
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:786
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2670
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:87
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2739
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:31
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:848
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1035
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2595
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:85
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:63
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:896
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:74
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:286
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mv_vlc
VLCElem ff_mv_vlc[266]
Definition: mpeg12.c:118
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:260
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:48
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:83
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1125
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:75
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:49
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:31
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:875
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:241
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1292
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:177
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
arg
const char * arg
Definition: jacosubdec.c:67
rl_vlc
static const VLCElem * rl_vlc[2]
Definition: mobiclip.c:278
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:125
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:392
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:220
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:432
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:204
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:30
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:288
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
hwaccel_internal.h
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:88
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:165
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:707
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:709
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1149
M2V_PARAM
#define M2V_PARAM
Definition: mpeg12dec.c:2644
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
profiles.h
CC_FORMAT_A53_PART4
@ CC_FORMAT_A53_PART4
Definition: mpeg12dec.c:67
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:58
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
lowres
static int lowres
Definition: ffplay.c:330
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:128
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:280
CC_FORMAT_AUTO
@ CC_FORMAT_AUTO
Definition: mpeg12dec.c:66
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
mpeg12codecs.h
MpegEncContext::slice_context_count
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:153
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1784
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:85
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:222
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:309
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
startcode.h
CC_FORMAT_DVD
@ CC_FORMAT_DVD
Definition: mpeg12dec.c:69
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:219
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:81
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:65
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:509
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1575
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
mpeg2video_options
static const AVOption mpeg2video_options[]
Definition: mpeg12dec.c:2646
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:521
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:306
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
CC_FORMAT_SCTE20
@ CC_FORMAT_SCTE20
Definition: mpeg12dec.c:68
RL_VLC_ELEM
Definition: vlc.h:56
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:423
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
shift
static int shift(int a, int b)
Definition: bonk.c:261
IPUContext::flags
int flags
Definition: mpeg12dec.c:2735
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
mpeg_field_start
static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1278
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:127
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2614
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:1959
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:50
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1595
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:463
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:1848
AVCodecHWConfigInternal
Definition: hwconfig.h:25
MpegEncContext::mbskip_table
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
Definition: mpegvideo.h:191
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:119
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:352
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:46
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:471
height
#define height
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:77
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:792
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:172
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:413
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
mpeg2video_class
static const AVClass mpeg2video_class
Definition: mpeg12dec.c:2662
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1594
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:115
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
emms.h
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:57
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:321
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:425
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:467
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:380
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1796
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
btype2mb_type
static const uint32_t btype2mb_type[11]
Definition: mpeg12dec.c:108
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
internal.h
mpeg_set_cc_format
static void mpeg_set_cc_format(AVCodecContext *avctx, enum Mpeg2ClosedCaptionsFormat format, const char *label)
Definition: mpeg12dec.c:1934
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:83
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:288
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:164
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1404
MpegEncContext::thread_context
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:152
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:159
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
ptype2mb_type
static const uint32_t ptype2mb_type[7]
Definition: mpeg12dec.c:98
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2733
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:159
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:456
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:702
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:149
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:531
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:669
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
Mpeg1Context::stereo3d
AVStereo3D stereo3d
Definition: mpeg12dec.c:76
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:606
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2163
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:177
ff_mpv_decode_init
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:46
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:147
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:885
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:371
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:76
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:700
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1602
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:167
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:450
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1613
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:708
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:245
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1640
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:692
Mpeg1Context::cc_format
enum Mpeg2ClosedCaptionsFormat cc_format
Definition: mpeg12dec.c:79
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:417
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:82
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:576
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
MpegEncContext::resync_mb_x
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:351
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1798
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
ff_mb_ptype_vlc
VLCElem ff_mb_ptype_vlc[64]
Definition: mpeg12.c:124
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1396
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:413
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2606
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:86
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:2855
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:470
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:454
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:73
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:141
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
AVPacket
This structure stores compressed data.
Definition: packet.h:497
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:58
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:72
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1082
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:86
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:123
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2736
AVPanScan::height
int height
Definition: defs.h:255
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
ff_mb_btype_vlc
VLCElem ff_mb_btype_vlc[64]
Definition: mpeg12.c:125
MpegEncContext::resync_mb_y
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:352
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2093
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:151
Mpeg2ClosedCaptionsFormat
Mpeg2ClosedCaptionsFormat
Definition: mpeg12dec.c:65
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:93
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:150
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:216
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:457
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1410
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1409
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:254
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1183
MpegEncContext::codec_id
enum AVCodecID codec_id
Definition: mpegvideo.h:108
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:28
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:80
Mpeg1Context
Definition: mpeg12dec.c:72
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:301
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1220
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:92
ff_mpv_decode_close
int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:171
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:243
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:61
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1418
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:460
MpegEncContext::codec_tag
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:115