FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/intreadwrite.h"
35 #include "libavutil/mem.h"
36 
37 #include "avcodec.h"
38 #include "blockdsp.h"
39 #include "idctdsp.h"
40 #include "mathops.h"
41 #include "mpeg_er.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
44 #include "mpegvideodata.h"
45 #include "libavutil/refstruct.h"
46 
47 
48 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
49 {
50  while(h--)
51  memset(dst + h*linesize, 128, 16);
52 }
53 
54 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
55 {
56  while(h--)
57  memset(dst + h*linesize, 128, 8);
58 }
59 
60 /* init common dct for both encoder and decoder */
62 {
63  ff_blockdsp_init(&s->bdsp);
64  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
65  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
66 
67  if (s->avctx->debug & FF_DEBUG_NOMC) {
68  int i;
69  for (i=0; i<4; i++) {
70  s->hdsp.avg_pixels_tab[0][i] = gray16;
71  s->hdsp.put_pixels_tab[0][i] = gray16;
72  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
73 
74  s->hdsp.avg_pixels_tab[1][i] = gray8;
75  s->hdsp.put_pixels_tab[1][i] = gray8;
76  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
77  }
78  }
79 }
80 
82 {
83  if (s->codec_id == AV_CODEC_ID_MPEG4)
84  s->idsp.mpeg4_studio_profile = s->studio_profile;
85  ff_idctdsp_init(&s->idsp, s->avctx);
86 
87  /* load & permutate scantables
88  * note: only wmv uses different ones
89  */
90  if (s->alternate_scan) {
91  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
92  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
93  } else {
94  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
95  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
96  }
97  ff_permute_scantable(s->permutated_intra_h_scantable, ff_alternate_horizontal_scan,
98  s->idsp.idct_permutation);
99  ff_permute_scantable(s->permutated_intra_v_scantable, ff_alternate_vertical_scan,
100  s->idsp.idct_permutation);
101 }
102 
104 {
105  const int nb_slices = s->slice_context_count;
106  const size_t slice_size = s->slice_ctx_size;
107 
108  for (int i = 1; i < nb_slices; i++) {
109  s->thread_context[i] = av_memdup(s, slice_size);
110  if (!s->thread_context[i])
111  return AVERROR(ENOMEM);
112  s->thread_context[i]->start_mb_y =
113  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
114  s->thread_context[i]->end_mb_y =
115  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
116  }
117  s->start_mb_y = 0;
118  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
119  : s->mb_height;
120  return 0;
121 }
122 
124 {
125  if (!s)
126  return;
127 
128  av_freep(&s->sc.edge_emu_buffer);
129  av_freep(&s->sc.scratchpad_buf);
130  s->sc.obmc_scratchpad = NULL;
131  s->sc.linesize = 0;
132 }
133 
135 {
136  for (int i = 1; i < s->slice_context_count; i++) {
137  free_duplicate_context(s->thread_context[i]);
138  av_freep(&s->thread_context[i]);
139  }
141 }
142 
144 {
145 #define COPY(M) \
146  M(ScratchpadContext, sc) \
147  M(int, start_mb_y) \
148  M(int, end_mb_y) \
149  M(int16_t*, dc_val) \
150  M(void*, ac_val)
151 
152  int ret;
153  // FIXME copy only needed parts
154 #define BACKUP(T, member) T member = dst->member;
155  COPY(BACKUP)
156  memcpy(dst, src, sizeof(MpegEncContext));
157 #define RESTORE(T, member) dst->member = member;
158  COPY(RESTORE)
159 
160  ret = ff_mpv_framesize_alloc(dst->avctx, &dst->sc, dst->linesize);
161  if (ret < 0) {
162  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
163  "scratch buffers.\n");
164  return ret;
165  }
166  return 0;
167 }
168 
169 /**
170  * Set the given MpegEncContext to common defaults
171  * (same for encoding and decoding).
172  * The changed fields will not depend upon the
173  * prior state of the MpegEncContext.
174  */
176 {
177  s->chroma_qscale_table = ff_default_chroma_qscale_table;
178  s->progressive_frame = 1;
179  s->progressive_sequence = 1;
180  s->picture_structure = PICT_FRAME;
181 
182  s->slice_context_count = 1;
183 }
184 
186 {
192  pools->alloc_mb_height = pools->alloc_mb_width = pools->alloc_mb_stride = 0;
193 }
194 
196 {
197  int nb_slices = (HAVE_THREADS &&
198  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
199  s->avctx->thread_count : 1;
200  BufferPoolContext *const pools = &s->buffer_pools;
201  int y_size, c_size, yc_size, mb_array_size, mv_table_size, x, y;
202  int mb_height;
203 
204  if (s->encoding && s->avctx->slices)
205  nb_slices = s->avctx->slices;
206 
207  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
208  s->mb_height = (s->height + 31) / 32 * 2;
209  else
210  s->mb_height = (s->height + 15) / 16;
211 
212  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
213  int max_slices;
214  if (s->mb_height)
215  max_slices = FFMIN(MAX_THREADS, s->mb_height);
216  else
217  max_slices = MAX_THREADS;
218  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
219  " reducing to %d\n", nb_slices, max_slices);
220  nb_slices = max_slices;
221  }
222 
223  s->slice_context_count = nb_slices;
224 
225  /* VC-1 can change from being progressive to interlaced on a per-frame
226  * basis. We therefore allocate certain buffers so big that they work
227  * in both instances. */
228  mb_height = s->msmpeg4_version == MSMP4_VC1 ?
229  FFALIGN(s->mb_height, 2) : s->mb_height;
230 
231  s->mb_width = (s->width + 15) / 16;
232  s->mb_stride = s->mb_width + 1;
233  s->b8_stride = s->mb_width * 2 + 1;
234  mb_array_size = mb_height * s->mb_stride;
235  mv_table_size = (mb_height + 2) * s->mb_stride + 1;
236 
237  /* set default edge pos, will be overridden
238  * in decode_header if needed */
239  s->h_edge_pos = s->mb_width * 16;
240  s->v_edge_pos = s->mb_height * 16;
241 
242  s->mb_num = s->mb_width * s->mb_height;
243 
244  s->block_wrap[0] =
245  s->block_wrap[1] =
246  s->block_wrap[2] =
247  s->block_wrap[3] = s->b8_stride;
248  s->block_wrap[4] =
249  s->block_wrap[5] = s->mb_stride;
250 
251  y_size = s->b8_stride * (2 * mb_height + 1);
252  c_size = s->mb_stride * (mb_height + 1);
253  yc_size = y_size + 2 * c_size;
254 
255  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
256  return AVERROR(ENOMEM);
257  for (y = 0; y < s->mb_height; y++)
258  for (x = 0; x < s->mb_width; x++)
259  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
260 
261  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
262 
263 #define ALLOC_POOL(name, size, flags) do { \
264  pools->name ##_pool = av_refstruct_pool_alloc((size), (flags)); \
265  if (!pools->name ##_pool) \
266  return AVERROR(ENOMEM); \
267 } while (0)
268 
269  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
270  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
271  /* interlaced direct mode decoding tables */
272  int16_t (*tmp)[2] = av_calloc(mv_table_size, 4 * sizeof(*tmp));
273  if (!tmp)
274  return AVERROR(ENOMEM);
275  s->p_field_mv_table_base = tmp;
276  tmp += s->mb_stride + 1;
277  for (int i = 0; i < 2; i++) {
278  for (int j = 0; j < 2; j++) {
279  s->p_field_mv_table[i][j] = tmp;
280  tmp += mv_table_size;
281  }
282  }
283  if (s->codec_id == AV_CODEC_ID_MPEG4) {
284  ALLOC_POOL(mbskip_table, mb_array_size + 2,
285  !s->encoding ? AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME : 0);
286  if (!s->encoding) {
287  /* cbp, pred_dir */
288  if (!(s->cbp_table = av_mallocz(mb_array_size)) ||
289  !(s->pred_dir_table = av_mallocz(mb_array_size)))
290  return AVERROR(ENOMEM);
291  }
292  }
293  }
294 
295  if (s->msmpeg4_version >= MSMP4_V3) {
296  s->coded_block_base = av_mallocz(y_size);
297  if (!s->coded_block_base)
298  return AVERROR(ENOMEM);
299  s->coded_block = s->coded_block_base + s->b8_stride + 1;
300  }
301 
302  if (s->h263_pred || s->h263_aic || !s->encoding) {
303  // When encoding, each slice (and therefore each thread)
304  // gets its own ac_val and dc_val buffers in order to avoid
305  // races.
306  size_t allslice_yc_size = yc_size * (s->encoding ? nb_slices : 1);
307  if (s->out_format == FMT_H263) {
308  /* ac values */
309  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, allslice_yc_size))
310  return AVERROR(ENOMEM);
311  s->ac_val = s->ac_val_base + s->b8_stride + 1;
312  }
313 
314  /* dc values */
315  // MN: we need these for error resilience of intra-frames
316  // Allocating them unconditionally for decoders also means
317  // that we don't need to reinitialize when e.g. h263_aic changes.
318 
319  // y_size and therefore yc_size is always odd; allocate one element
320  // more for each encoder slice in order to be able to align each slice's
321  // dc_val to four in order to use aligned stores when cleaning dc_val.
322  allslice_yc_size += s->encoding * nb_slices;
323  if (!FF_ALLOC_TYPED_ARRAY(s->dc_val_base, allslice_yc_size))
324  return AVERROR(ENOMEM);
325  s->dc_val = s->dc_val_base + s->b8_stride + 1;
326  for (size_t i = 0; i < allslice_yc_size; ++i)
327  s->dc_val_base[i] = 1024;
328  }
329 
330  // Note the + 1 is for a quicker MPEG-4 slice_end detection
331  if (!(s->mbskip_table = av_mallocz(mb_array_size + 2)) ||
332  /* which mb is an intra block, init macroblock skip table */
333  !(s->mbintra_table = av_mallocz(mb_array_size)))
334  return AVERROR(ENOMEM);
335 
336  ALLOC_POOL(qscale_table, mv_table_size, 0);
337  ALLOC_POOL(mb_type, mv_table_size * sizeof(uint32_t), 0);
338 
339  if (s->out_format == FMT_H263 || s->encoding ||
340  (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS)) {
341  const int b8_array_size = s->b8_stride * mb_height * 2;
342  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
343  int ref_index_size = 4 * mb_array_size;
344 
345  /* FIXME: The output of H.263 with OBMC depends upon
346  * the earlier content of the buffer; therefore we set
347  * the flags to always reset returned buffers here. */
349  ALLOC_POOL(ref_index, ref_index_size, 0);
350  }
351 #undef ALLOC_POOL
352  pools->alloc_mb_width = s->mb_width;
353  pools->alloc_mb_height = mb_height;
354  pools->alloc_mb_stride = s->mb_stride;
355 
356  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
357 }
358 
359 /**
360  * init common structure for both encoder and decoder.
361  * this assumes that some variables like width/height are already set
362  */
364 {
365  int ret;
366 
367  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
368  av_log(s->avctx, AV_LOG_ERROR,
369  "decoding to AV_PIX_FMT_NONE is not supported.\n");
370  return AVERROR(EINVAL);
371  }
372 
373  if ((s->width || s->height) &&
374  av_image_check_size(s->width, s->height, 0, s->avctx))
375  return AVERROR(EINVAL);
376 
377  dsp_init(s);
378 
379  /* set chroma shifts */
380  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
381  &s->chroma_x_shift,
382  &s->chroma_y_shift);
383  if (ret)
384  return ret;
385 
387  goto fail;
388 
389  s->context_initialized = 1;
390  s->thread_context[0] = s;
391 
392 // if (s->width && s->height) {
393  if (!s->encoding) {
395  if (ret < 0)
396  goto fail;
397  }
398 // }
399 
400  return 0;
401  fail:
403  return ret;
404 }
405 
407 {
409 
410  free_buffer_pools(&s->buffer_pools);
411  av_freep(&s->p_field_mv_table_base);
412  for (int i = 0; i < 2; i++)
413  for (int j = 0; j < 2; j++)
414  s->p_field_mv_table[i][j] = NULL;
415 
416  av_freep(&s->ac_val_base);
417  av_freep(&s->dc_val_base);
418  av_freep(&s->coded_block_base);
419  av_freep(&s->mbintra_table);
420  av_freep(&s->cbp_table);
421  av_freep(&s->pred_dir_table);
422 
423  av_freep(&s->mbskip_table);
424 
425  av_freep(&s->er.error_status_table);
426  av_freep(&s->er.er_temp_buffer);
427  av_freep(&s->mb_index2xy);
428 
429  s->linesize = s->uvlinesize = 0;
430 }
431 
433 {
435  if (s->slice_context_count > 1)
436  s->slice_context_count = 1;
437 
438  ff_mpv_unref_picture(&s->last_pic);
439  ff_mpv_unref_picture(&s->cur_pic);
440  ff_mpv_unref_picture(&s->next_pic);
441 
442  s->context_initialized = 0;
443  s->context_reinit = 0;
444  s->linesize = s->uvlinesize = 0;
445 }
446 
447 
448 /**
449  * Clean dc, ac for the current non-intra MB.
450  */
452 {
453  int wrap = s->b8_stride;
454  int xy = s->block_index[0];
455  /* chroma */
456  unsigned uxy = s->block_index[4];
457  unsigned vxy = s->block_index[5];
458  int16_t *dc_val = s->dc_val;
459 
460  AV_WN32A(dc_val + xy, 1024 << 16 | 1024);
461  AV_WN32 (dc_val + xy + wrap, 1024 << 16 | 1024);
462  dc_val[uxy] =
463  dc_val[vxy] = 1024;
464  /* ac pred */
465  int16_t (*ac_val)[16] = s->ac_val;
466  av_assume(!((uintptr_t)ac_val & 0xF));
467  // Don't reset the upper-left luma block, as it will only ever be
468  // referenced by blocks from the same macroblock.
469  memset(ac_val[xy + 1], 0, sizeof(*ac_val));
470  memset(ac_val[xy + wrap], 0, 2 * sizeof(*ac_val));
471  /* ac pred */
472  memset(ac_val[uxy], 0, sizeof(*ac_val));
473  memset(ac_val[vxy], 0, sizeof(*ac_val));
474 }
475 
476 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
477  const int linesize = s->cur_pic.linesize[0]; //not s->linesize as this would be wrong for field pics
478  const int uvlinesize = s->cur_pic.linesize[1];
479  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
480  const int height_of_mb = 4 - s->avctx->lowres;
481 
482  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
483  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
484  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
485  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
486  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
487  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
488  //block_index is not used by mpeg2, so it is not affected by chroma_format
489 
490  s->dest[0] = s->cur_pic.data[0] + (int)((s->mb_x - 1U) << width_of_mb);
491  s->dest[1] = s->cur_pic.data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
492  s->dest[2] = s->cur_pic.data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
493 
494  if (s->picture_structure == PICT_FRAME) {
495  s->dest[0] += s->mb_y * linesize << height_of_mb;
496  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
497  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
498  } else {
499  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
500  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
501  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
502  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
503  }
504 }
505 
506 /**
507  * set qscale and update qscale dependent variables.
508  */
509 void ff_set_qscale(MpegEncContext * s, int qscale)
510 {
511  if (qscale < 1)
512  qscale = 1;
513  else if (qscale > 31)
514  qscale = 31;
515 
516  s->qscale = qscale;
517  s->chroma_qscale= s->chroma_qscale_table[qscale];
518 
519  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
520  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
521 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:363
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
BufferPoolContext::ref_index_pool
struct AVRefStructPool * ref_index_pool
Definition: mpegpicture.h:49
blockdsp.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
BufferPoolContext::mbskip_table_pool
struct AVRefStructPool * mbskip_table_pool
Definition: mpegpicture.h:45
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:103
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:143
BACKUP
#define BACKUP(T, member)
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:451
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:534
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:476
mpegvideo.h
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
mpegutils.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
fail
#define fail()
Definition: checkasm.h:214
AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
wrap
#define wrap(func)
Definition: neontest.h:65
ff_init_scantable
void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo_unquantize.c:36
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:77
COPY
#define COPY(M)
refstruct.h
BufferPoolContext::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:52
ff_mpv_init_context_frame
av_cold int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:195
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:48
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:106
free_buffer_pools
static av_cold void free_buffer_pools(BufferPoolContext *pools)
Definition: mpegvideo.c:185
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mpv_framesize_alloc
int ff_mpv_framesize_alloc(AVCodecContext *avctx, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:138
BufferPoolContext::mb_type_pool
struct AVRefStructPool * mb_type_pool
Definition: mpegpicture.h:47
ALLOC_POOL
#define ALLOC_POOL(name, size, flags)
free_duplicate_contexts
static av_cold void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:134
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
BufferPoolContext::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:51
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:432
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
av_assume
#define av_assume(cond)
Definition: avassert.h:111
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:81
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:175
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:509
mathops.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:52
RESTORE
#define RESTORE(T, member)
free_duplicate_context
static av_cold void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:123
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:372
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1581
BufferPoolContext::qscale_table_pool
struct AVRefStructPool * qscale_table_pool
Definition: mpegpicture.h:46
mpegvideodata.h
attributes.h
ff_mpeg_er_init
av_cold int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:96
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
dsp_init
static av_cold void dsp_init(MpegEncContext *s)
Definition: mpegvideo.c:61
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1397
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
ret
ret
Definition: filter_design.txt:187
U
#define U(x)
Definition: vpx_arith.h:37
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
BufferPoolContext::motion_val_pool
struct AVRefStructPool * motion_val_pool
Definition: mpegpicture.h:48
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
mem.h
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:386
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
mpeg_er.h
imgutils.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
BufferPoolContext
Definition: mpegpicture.h:44
ff_mpv_free_context_frame
av_cold void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:406
h
h
Definition: vp9dsp_template.c:2070
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
BufferPoolContext::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:50
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:54
src
#define src
Definition: vp8dsp.c:248
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337