FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <stdint.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/internal.h"
40 #include "libavutil/intmath.h"
41 #include "libavutil/mathematics.h"
42 #include "libavutil/mem.h"
43 #include "libavutil/mem_internal.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/thread.h"
46 #include "avcodec.h"
47 #include "encode.h"
48 #include "idctdsp.h"
49 #include "mpeg12codecs.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mjpegenc.h"
63 #include "speedhqenc.h"
64 #include "msmpeg4enc.h"
65 #include "pixblockdsp.h"
66 #include "qpeldsp.h"
67 #include "faandct.h"
68 #include "aandcttab.h"
69 #include "flvenc.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "wmv2enc.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MpegEncContext *s, const AVPacket *pkt);
88 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MpegEncContext *s);
90 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
91 static int dct_quantize_c(MpegEncContext *s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
97 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
98 
99 static const AVOption mpv_generic_options[] = {
102  { NULL },
103 };
104 
106  .class_name = "generic mpegvideo encoder",
107  .item_name = av_default_item_name,
108  .option = mpv_generic_options,
109  .version = LIBAVUTIL_VERSION_INT,
110 };
111 
112 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
113  uint16_t (*qmat16)[2][64],
114  const uint16_t *quant_matrix,
115  int bias, int qmin, int qmax, int intra)
116 {
117  FDCTDSPContext *fdsp = &s->fdsp;
118  int qscale;
119  int shift = 0;
120 
121  for (qscale = qmin; qscale <= qmax; qscale++) {
122  int i;
123  int qscale2;
124 
125  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
126  else qscale2 = qscale << 1;
127 
128  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
129 #if CONFIG_FAANDCT
130  fdsp->fdct == ff_faandct ||
131 #endif /* CONFIG_FAANDCT */
133  for (i = 0; i < 64; i++) {
134  const int j = s->idsp.idct_permutation[i];
135  int64_t den = (int64_t) qscale2 * quant_matrix[j];
136  /* 16 <= qscale * quant_matrix[i] <= 7905
137  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
138  * 19952 <= x <= 249205026
139  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
140  * 3444240 >= (1 << 36) / (x) >= 275 */
141 
142  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143  }
144  } else if (fdsp->fdct == ff_fdct_ifast) {
145  for (i = 0; i < 64; i++) {
146  const int j = s->idsp.idct_permutation[i];
147  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
148  /* 16 <= qscale * quant_matrix[i] <= 7905
149  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
150  * 19952 <= x <= 249205026
151  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
152  * 3444240 >= (1 << 36) / (x) >= 275 */
153 
154  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
155  }
156  } else {
157  for (i = 0; i < 64; i++) {
158  const int j = s->idsp.idct_permutation[i];
159  int64_t den = (int64_t) qscale2 * quant_matrix[j];
160  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
161  * Assume x = qscale * quant_matrix[i]
162  * So 16 <= x <= 7905
163  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
164  * so 32768 >= (1 << 19) / (x) >= 67 */
165  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
166  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
167  // (qscale * quant_matrix[i]);
168  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
169 
170  if (qmat16[qscale][0][i] == 0 ||
171  qmat16[qscale][0][i] == 128 * 256)
172  qmat16[qscale][0][i] = 128 * 256 - 1;
173  qmat16[qscale][1][i] =
174  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
175  qmat16[qscale][0][i]);
176  }
177  }
178 
179  for (i = intra; i < 64; i++) {
180  int64_t max = 8191;
181  if (fdsp->fdct == ff_fdct_ifast) {
182  max = (8191LL * ff_aanscales[i]) >> 14;
183  }
184  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
185  shift++;
186  }
187  }
188  }
189  if (shift) {
190  av_log(s->avctx, AV_LOG_INFO,
191  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
192  QMAT_SHIFT - shift);
193  }
194 }
195 
196 static inline void update_qscale(MpegEncContext *s)
197 {
198  if (s->q_scale_type == 1 && 0) {
199  int i;
200  int bestdiff=INT_MAX;
201  int best = 1;
202 
203  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
204  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
205  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
206  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
207  continue;
208  if (diff < bestdiff) {
209  bestdiff = diff;
210  best = i;
211  }
212  }
213  s->qscale = best;
214  } else {
215  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
216  (FF_LAMBDA_SHIFT + 7);
217  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
218  }
219 
220  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
222 }
223 
225 {
226  int i;
227 
228  if (matrix) {
229  put_bits(pb, 1, 1);
230  for (i = 0; i < 64; i++) {
232  }
233  } else
234  put_bits(pb, 1, 0);
235 }
236 
237 /**
238  * init s->cur_pic.qscale_table from s->lambda_table
239  */
241 {
242  int8_t * const qscale_table = s->cur_pic.qscale_table;
243  int i;
244 
245  for (i = 0; i < s->mb_num; i++) {
246  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
247  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
248  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
249  s->avctx->qmax);
250  }
251 }
252 
254  const MpegEncContext *src)
255 {
256 #define COPY(a) dst->a= src->a
257  COPY(pict_type);
258  COPY(f_code);
259  COPY(b_code);
260  COPY(qscale);
261  COPY(lambda);
262  COPY(lambda2);
263  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
264  COPY(progressive_frame); // FIXME don't set in encode_header
265  COPY(partitioned_frame); // FIXME don't set in encode_header
266 #undef COPY
267 }
268 
269 static void mpv_encode_init_static(void)
270 {
271  for (int i = -16; i < 16; i++)
272  default_fcode_tab[i + MAX_MV] = 1;
273 }
274 
275 /**
276  * Set the given MpegEncContext to defaults for encoding.
277  * the changed fields will not depend upon the prior state of the MpegEncContext.
278  */
280 {
281  static AVOnce init_static_once = AV_ONCE_INIT;
282 
284 
285  ff_thread_once(&init_static_once, mpv_encode_init_static);
286 
287  s->me.mv_penalty = default_mv_penalty;
288  s->fcode_tab = default_fcode_tab;
289 
290  s->input_picture_number = 0;
291  s->picture_in_gop_number = 0;
292 }
293 
295 {
296  s->dct_quantize = dct_quantize_c;
297  s->denoise_dct = denoise_dct_c;
298 
299 #if ARCH_MIPS
301 #elif ARCH_X86
303 #endif
304 
305  if (s->avctx->trellis)
306  s->dct_quantize = dct_quantize_trellis_c;
307 }
308 
309 /* init video encoder */
311 {
313  AVCPBProperties *cpb_props;
314  int i, ret;
315  int mb_array_size, mv_table_size;
316 
318 
319  switch (avctx->pix_fmt) {
320  case AV_PIX_FMT_YUVJ444P:
321  case AV_PIX_FMT_YUV444P:
322  s->chroma_format = CHROMA_444;
323  break;
324  case AV_PIX_FMT_YUVJ422P:
325  case AV_PIX_FMT_YUV422P:
326  s->chroma_format = CHROMA_422;
327  break;
328  case AV_PIX_FMT_YUVJ420P:
329  case AV_PIX_FMT_YUV420P:
330  default:
331  s->chroma_format = CHROMA_420;
332  break;
333  }
334 
336 
337  s->bit_rate = avctx->bit_rate;
338  s->width = avctx->width;
339  s->height = avctx->height;
340  if (avctx->gop_size > 600 &&
343  "keyframe interval too large!, reducing it from %d to %d\n",
344  avctx->gop_size, 600);
345  avctx->gop_size = 600;
346  }
347  s->gop_size = avctx->gop_size;
348  s->avctx = avctx;
350  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
351  "is %d.\n", MAX_B_FRAMES);
353  } else if (avctx->max_b_frames < 0) {
355  "max b frames must be 0 or positive for mpegvideo based encoders\n");
356  return AVERROR(EINVAL);
357  }
358  s->max_b_frames = avctx->max_b_frames;
359  s->codec_id = avctx->codec->id;
360  if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
361  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
362  return AVERROR(EINVAL);
363  }
364 
365  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
366  s->rtp_mode = !!s->rtp_payload_size;
367  s->intra_dc_precision = avctx->intra_dc_precision;
368 
369  // workaround some differences between how applications specify dc precision
370  if (s->intra_dc_precision < 0) {
371  s->intra_dc_precision += 8;
372  } else if (s->intra_dc_precision >= 8)
373  s->intra_dc_precision -= 8;
374 
375  if (s->intra_dc_precision < 0) {
377  "intra dc precision must be positive, note some applications use"
378  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
379  return AVERROR(EINVAL);
380  }
381 
382  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
383  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
384  return AVERROR(EINVAL);
385  }
386  s->user_specified_pts = AV_NOPTS_VALUE;
387 
388  if (s->gop_size <= 1) {
389  s->intra_only = 1;
390  s->gop_size = 12;
391  } else {
392  s->intra_only = 0;
393  }
394 
395  /* Fixed QSCALE */
396  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
397 
398  s->adaptive_quant = (avctx->lumi_masking ||
399  avctx->dark_masking ||
402  avctx->p_masking ||
403  s->border_masking ||
404  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
405  !s->fixed_qscale;
406 
407  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
408 
410  switch(avctx->codec_id) {
413  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
414  break;
415  case AV_CODEC_ID_MPEG4:
419  if (avctx->rc_max_rate >= 15000000) {
420  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
421  } else if(avctx->rc_max_rate >= 2000000) {
422  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
423  } else if(avctx->rc_max_rate >= 384000) {
424  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
425  } else
426  avctx->rc_buffer_size = 40;
427  avctx->rc_buffer_size *= 16384;
428  break;
429  }
430  if (avctx->rc_buffer_size) {
431  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
432  }
433  }
434 
435  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
436  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
437  return AVERROR(EINVAL);
438  }
439 
442  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
443  }
444 
446  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
447  return AVERROR(EINVAL);
448  }
449 
451  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
452  return AVERROR(EINVAL);
453  }
454 
455  if (avctx->rc_max_rate &&
459  "impossible bitrate constraints, this will fail\n");
460  }
461 
462  if (avctx->rc_buffer_size &&
463  avctx->bit_rate * (int64_t)avctx->time_base.num >
464  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
465  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
466  return AVERROR(EINVAL);
467  }
468 
469  if (!s->fixed_qscale &&
472  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
474  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
475  if (nbt <= INT_MAX) {
476  avctx->bit_rate_tolerance = nbt;
477  } else
478  avctx->bit_rate_tolerance = INT_MAX;
479  }
480 
481  if (avctx->rc_max_rate &&
483  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
484  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
485  90000LL * (avctx->rc_buffer_size - 1) >
486  avctx->rc_max_rate * 0xFFFFLL) {
488  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
489  "specified vbv buffer is too large for the given bitrate!\n");
490  }
491 
492  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
493  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
494  s->codec_id != AV_CODEC_ID_FLV1) {
495  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
496  return AVERROR(EINVAL);
497  }
498 
499  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
501  "OBMC is only supported with simple mb decision\n");
502  return AVERROR(EINVAL);
503  }
504 
505  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
506  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
507  return AVERROR(EINVAL);
508  }
509 
510  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
511  s->codec_id == AV_CODEC_ID_H263 ||
512  s->codec_id == AV_CODEC_ID_H263P) &&
513  (avctx->sample_aspect_ratio.num > 255 ||
514  avctx->sample_aspect_ratio.den > 255)) {
516  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
520  }
521 
522  if ((s->codec_id == AV_CODEC_ID_H263 ||
523  s->codec_id == AV_CODEC_ID_H263P) &&
524  (avctx->width > 2048 ||
525  avctx->height > 1152 )) {
526  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
527  return AVERROR(EINVAL);
528  }
529  if ((s->codec_id == AV_CODEC_ID_H263 ||
530  s->codec_id == AV_CODEC_ID_H263P ||
531  s->codec_id == AV_CODEC_ID_RV20) &&
532  ((avctx->width &3) ||
533  (avctx->height&3) )) {
534  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
535  return AVERROR(EINVAL);
536  }
537 
538  if (s->codec_id == AV_CODEC_ID_RV10 &&
539  (avctx->width &15 ||
540  avctx->height&15 )) {
541  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
542  return AVERROR(EINVAL);
543  }
544 
545  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
546  s->codec_id == AV_CODEC_ID_WMV2) &&
547  avctx->width & 1) {
548  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
549  return AVERROR(EINVAL);
550  }
551 
553  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
554  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
555  return AVERROR(EINVAL);
556  }
557 
558  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
559  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
560  return AVERROR(EINVAL);
561  }
562 
563  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
565  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
566  return AVERROR(EINVAL);
567  }
568 
569  if (s->scenechange_threshold < 1000000000 &&
572  "closed gop with scene change detection are not supported yet, "
573  "set threshold to 1000000000\n");
574  return AVERROR_PATCHWELCOME;
575  }
576 
578  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
581  "low delay forcing is only available for mpeg2, "
582  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
583  return AVERROR(EINVAL);
584  }
585  if (s->max_b_frames != 0) {
587  "B-frames cannot be used with low delay\n");
588  return AVERROR(EINVAL);
589  }
590  }
591 
592  if (s->q_scale_type == 1) {
593  if (avctx->qmax > 28) {
595  "non linear quant only supports qmax <= 28 currently\n");
596  return AVERROR_PATCHWELCOME;
597  }
598  }
599 
600  if (avctx->slices > 1 &&
602  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
603  return AVERROR(EINVAL);
604  }
605 
606  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
608  "notice: b_frame_strategy only affects the first pass\n");
609  s->b_frame_strategy = 0;
610  }
611 
613  if (i > 1) {
614  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
615  avctx->time_base.den /= i;
616  avctx->time_base.num /= i;
617  //return -1;
618  }
619 
620  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
621  // (a + x * 3 / 8) / x
622  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
623  s->inter_quant_bias = 0;
624  } else {
625  s->intra_quant_bias = 0;
626  // (a - x / 4) / x
627  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
628  }
629 
630  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
631  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
632  return AVERROR(EINVAL);
633  }
634 
635  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
636 
637  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
638  avctx->time_base.den > (1 << 16) - 1) {
640  "timebase %d/%d not supported by MPEG 4 standard, "
641  "the maximum admitted value for the timebase denominator "
642  "is %d\n", avctx->time_base.num, avctx->time_base.den,
643  (1 << 16) - 1);
644  return AVERROR(EINVAL);
645  }
646  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
647 
648  switch (avctx->codec->id) {
649 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
651  s->rtp_mode = 1;
652  /* fallthrough */
654  s->out_format = FMT_MPEG1;
655  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
656  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
658  break;
659 #endif
660 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
661  case AV_CODEC_ID_MJPEG:
662  case AV_CODEC_ID_AMV:
663  s->out_format = FMT_MJPEG;
664  s->intra_only = 1; /* force intra only for jpeg */
665  if ((ret = ff_mjpeg_encode_init(s)) < 0)
666  return ret;
667  avctx->delay = 0;
668  s->low_delay = 1;
669  break;
670 #endif
671  case AV_CODEC_ID_SPEEDHQ:
672  s->out_format = FMT_SPEEDHQ;
673  s->intra_only = 1; /* force intra only for SHQ */
674  if (!CONFIG_SPEEDHQ_ENCODER)
676  if ((ret = ff_speedhq_encode_init(s)) < 0)
677  return ret;
678  avctx->delay = 0;
679  s->low_delay = 1;
680  break;
681  case AV_CODEC_ID_H261:
682  if (!CONFIG_H261_ENCODER)
685  if (ret < 0)
686  return ret;
687  s->out_format = FMT_H261;
688  avctx->delay = 0;
689  s->low_delay = 1;
690  s->rtp_mode = 0; /* Sliced encoding not supported */
691  break;
692  case AV_CODEC_ID_H263:
693  if (!CONFIG_H263_ENCODER)
696  s->width, s->height) == 8) {
698  "The specified picture size of %dx%d is not valid for "
699  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
700  "352x288, 704x576, and 1408x1152. "
701  "Try H.263+.\n", s->width, s->height);
702  return AVERROR(EINVAL);
703  }
704  s->out_format = FMT_H263;
705  avctx->delay = 0;
706  s->low_delay = 1;
707  break;
708  case AV_CODEC_ID_H263P:
709  s->out_format = FMT_H263;
710  s->h263_plus = 1;
711  /* Fx */
712  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
713  s->modified_quant = s->h263_aic;
714  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
715  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
716 
717  /* /Fx */
718  /* These are just to be sure */
719  avctx->delay = 0;
720  s->low_delay = 1;
721  break;
722  case AV_CODEC_ID_FLV1:
723  s->out_format = FMT_H263;
724  s->h263_flv = 2; /* format = 1; 11-bit codes */
725  s->unrestricted_mv = 1;
726  s->rtp_mode = 0; /* don't allow GOB */
727  avctx->delay = 0;
728  s->low_delay = 1;
729  break;
730  case AV_CODEC_ID_RV10:
731  s->out_format = FMT_H263;
732  avctx->delay = 0;
733  s->low_delay = 1;
734  break;
735  case AV_CODEC_ID_RV20:
736  s->out_format = FMT_H263;
737  avctx->delay = 0;
738  s->low_delay = 1;
739  s->modified_quant = 1;
740  s->h263_aic = 1;
741  s->h263_plus = 1;
742  s->loop_filter = 1;
743  s->unrestricted_mv = 0;
744  break;
745  case AV_CODEC_ID_MPEG4:
746  s->out_format = FMT_H263;
747  s->h263_pred = 1;
748  s->unrestricted_mv = 1;
749  s->low_delay = s->max_b_frames ? 0 : 1;
750  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
751  break;
753  s->out_format = FMT_H263;
754  s->h263_pred = 1;
755  s->unrestricted_mv = 1;
756  s->msmpeg4_version = MSMP4_V2;
757  avctx->delay = 0;
758  s->low_delay = 1;
759  break;
761  s->out_format = FMT_H263;
762  s->h263_pred = 1;
763  s->unrestricted_mv = 1;
764  s->msmpeg4_version = MSMP4_V3;
765  s->flipflop_rounding = 1;
766  avctx->delay = 0;
767  s->low_delay = 1;
768  break;
769  case AV_CODEC_ID_WMV1:
770  s->out_format = FMT_H263;
771  s->h263_pred = 1;
772  s->unrestricted_mv = 1;
773  s->msmpeg4_version = MSMP4_WMV1;
774  s->flipflop_rounding = 1;
775  avctx->delay = 0;
776  s->low_delay = 1;
777  break;
778  case AV_CODEC_ID_WMV2:
779  s->out_format = FMT_H263;
780  s->h263_pred = 1;
781  s->unrestricted_mv = 1;
782  s->msmpeg4_version = MSMP4_WMV2;
783  s->flipflop_rounding = 1;
784  avctx->delay = 0;
785  s->low_delay = 1;
786  break;
787  default:
788  return AVERROR(EINVAL);
789  }
790 
791  avctx->has_b_frames = !s->low_delay;
792 
793  s->encoding = 1;
794 
795  s->progressive_frame =
796  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
798  s->alternate_scan);
799 
800  if (s->lmin > s->lmax) {
801  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", s->lmax);
802  s->lmin = s->lmax;
803  }
804 
805  /* init */
807  if ((ret = ff_mpv_common_init(s)) < 0)
808  return ret;
809 
810  ff_fdctdsp_init(&s->fdsp, avctx);
811  ff_me_cmp_init(&s->mecc, avctx);
812  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
813  ff_pixblockdsp_init(&s->pdsp, avctx);
814 
815  if (!(avctx->stats_out = av_mallocz(256)) ||
816  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
817  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
818  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
819  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
820  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
821  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
822  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
823  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
824  !(s->new_pic = av_frame_alloc()) ||
825  !(s->picture_pool = ff_mpv_alloc_pic_pool(0)))
826  return AVERROR(ENOMEM);
827 
828  /* Allocate MV tables; the MV and MB tables will be copied
829  * to slice contexts by ff_update_duplicate_context(). */
830  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
831  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
832  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
833  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
834  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
835  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
836  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
837  return AVERROR(ENOMEM);
838  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
839  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
840  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
841  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
842  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
843  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
844 
845  /* Allocate MB type table */
846  mb_array_size = s->mb_stride * s->mb_height;
847  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
848  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
849  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
850  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size) ||
851  !FF_ALLOCZ_TYPED_ARRAY(s->mc_mb_var, mb_array_size) ||
852  !FF_ALLOCZ_TYPED_ARRAY(s->mb_var, mb_array_size) ||
853  !(s->mb_mean = av_mallocz(mb_array_size)))
854  return AVERROR(ENOMEM);
855 
856 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
857  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
858  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
859  int16_t (*tmp1)[2];
860  uint8_t *tmp2;
861  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
862  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
863  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
864  return AVERROR(ENOMEM);
865 
866  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
867  tmp1 += s->mb_stride + 1;
868 
869  for (int i = 0; i < 2; i++) {
870  for (int j = 0; j < 2; j++) {
871  for (int k = 0; k < 2; k++) {
872  s->b_field_mv_table[i][j][k] = tmp1;
873  tmp1 += mv_table_size;
874  }
875  s->b_field_select_table[i][j] = tmp2;
876  tmp2 += 2 * mv_table_size;
877  }
878  }
879  }
880 
881  if (s->noise_reduction) {
882  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
883  return AVERROR(ENOMEM);
884  }
885 
887 
888  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
889  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
890  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
891  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
892  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
893  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
894  } else {
895  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
896  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
897  }
898 
899  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
900  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
901 
902  if (s->slice_context_count > 1) {
903  s->rtp_mode = 1;
904 
906  s->h263_slice_structured = 1;
907  }
908 
909  s->quant_precision = 5;
910 
911  ret = ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
912  ret |= ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
913  if (ret < 0)
914  return AVERROR(EINVAL);
915 
916  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
918 #if CONFIG_MSMPEG4ENC
919  if (s->msmpeg4_version != MSMP4_UNUSED)
921 #endif
922  }
923 
924  /* init q matrix */
925  for (i = 0; i < 64; i++) {
926  int j = s->idsp.idct_permutation[i];
927  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
928  s->mpeg_quant) {
929  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
930  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
931  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
932  s->intra_matrix[j] =
933  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
934  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
935  s->intra_matrix[j] =
936  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
937  } else {
938  /* MPEG-1/2 */
939  s->chroma_intra_matrix[j] =
940  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
941  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
942  }
943  if (avctx->intra_matrix)
944  s->intra_matrix[j] = avctx->intra_matrix[i];
945  if (avctx->inter_matrix)
946  s->inter_matrix[j] = avctx->inter_matrix[i];
947  }
948 
949  /* precompute matrix */
950  /* for mjpeg, we do include qscale in the matrix */
951  if (s->out_format != FMT_MJPEG) {
952  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
953  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
954  31, 1);
955  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
956  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
957  31, 0);
958  }
959 
960  if ((ret = ff_rate_control_init(s)) < 0)
961  return ret;
962 
963  if (s->b_frame_strategy == 2) {
964  for (i = 0; i < s->max_b_frames + 2; i++) {
965  s->tmp_frames[i] = av_frame_alloc();
966  if (!s->tmp_frames[i])
967  return AVERROR(ENOMEM);
968 
969  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
970  s->tmp_frames[i]->width = s->width >> s->brd_scale;
971  s->tmp_frames[i]->height = s->height >> s->brd_scale;
972 
973  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
974  if (ret < 0)
975  return ret;
976  }
977  }
978 
979  cpb_props = ff_encode_add_cpb_side_data(avctx);
980  if (!cpb_props)
981  return AVERROR(ENOMEM);
982  cpb_props->max_bitrate = avctx->rc_max_rate;
983  cpb_props->min_bitrate = avctx->rc_min_rate;
984  cpb_props->avg_bitrate = avctx->bit_rate;
985  cpb_props->buffer_size = avctx->rc_buffer_size;
986 
987  return 0;
988 }
989 
991 {
993  int i;
994 
995  ff_rate_control_uninit(&s->rc_context);
996 
998  ff_refstruct_pool_uninit(&s->picture_pool);
999 
1000  if (s->input_picture && s->reordered_input_picture) {
1001  for (int i = 0; i < MAX_B_FRAMES + 1; i++) {
1002  ff_refstruct_unref(&s->input_picture[i]);
1003  ff_refstruct_unref(&s->reordered_input_picture[i]);
1004  }
1005  }
1006  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1007  av_frame_free(&s->tmp_frames[i]);
1008 
1009  av_frame_free(&s->new_pic);
1010 
1012 
1013  av_freep(&s->p_mv_table_base);
1014  av_freep(&s->b_forw_mv_table_base);
1015  av_freep(&s->b_back_mv_table_base);
1016  av_freep(&s->b_bidir_forw_mv_table_base);
1017  av_freep(&s->b_bidir_back_mv_table_base);
1018  av_freep(&s->b_direct_mv_table_base);
1019  av_freep(&s->b_field_mv_table_base);
1020  av_freep(&s->b_field_select_table[0][0]);
1021  av_freep(&s->p_field_select_table[0]);
1022 
1023  av_freep(&s->mb_type);
1024  av_freep(&s->lambda_table);
1025 
1026  av_freep(&s->cplx_tab);
1027  av_freep(&s->bits_tab);
1028 
1029  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1030  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1031  s->q_chroma_intra_matrix= NULL;
1032  s->q_chroma_intra_matrix16= NULL;
1033  av_freep(&s->q_intra_matrix);
1034  av_freep(&s->q_inter_matrix);
1035  av_freep(&s->q_intra_matrix16);
1036  av_freep(&s->q_inter_matrix16);
1037  av_freep(&s->input_picture);
1038  av_freep(&s->reordered_input_picture);
1039  av_freep(&s->dct_offset);
1040  av_freep(&s->mb_var);
1041  av_freep(&s->mc_mb_var);
1042  av_freep(&s->mb_mean);
1043 
1044  return 0;
1045 }
1046 
1047 #define IS_ENCODER 1
1049 
1050 static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
1051 {
1052  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1053  /* print DCT coefficients */
1054  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1055  for (int i = 0; i < 6; i++) {
1056  for (int j = 0; j < 64; j++) {
1057  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1058  block[i][s->idsp.idct_permutation[j]]);
1059  }
1060  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1061  }
1062  }
1063 
1065 }
1066 
1067 static int get_sae(const uint8_t *src, int ref, int stride)
1068 {
1069  int x,y;
1070  int acc = 0;
1071 
1072  for (y = 0; y < 16; y++) {
1073  for (x = 0; x < 16; x++) {
1074  acc += FFABS(src[x + y * stride] - ref);
1075  }
1076  }
1077 
1078  return acc;
1079 }
1080 
1081 static int get_intra_count(MpegEncContext *s, const uint8_t *src,
1082  const uint8_t *ref, int stride)
1083 {
1084  int x, y, w, h;
1085  int acc = 0;
1086 
1087  w = s->width & ~15;
1088  h = s->height & ~15;
1089 
1090  for (y = 0; y < h; y += 16) {
1091  for (x = 0; x < w; x += 16) {
1092  int offset = x + y * stride;
1093  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1094  stride, 16);
1095  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1096  int sae = get_sae(src + offset, mean, stride);
1097 
1098  acc += sae + 500 < sad;
1099  }
1100  }
1101  return acc;
1102 }
1103 
1104 /**
1105  * Allocates new buffers for an AVFrame and copies the properties
1106  * from another AVFrame.
1107  */
1108 static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_frame)
1109 {
1110  AVCodecContext *avctx = s->avctx;
1111  int ret;
1112 
1113  f->width = avctx->width + 2 * EDGE_WIDTH;
1114  f->height = avctx->height + 2 * EDGE_WIDTH;
1115 
1117  if (ret < 0)
1118  return ret;
1119 
1120  ret = ff_mpv_pic_check_linesize(avctx, f, &s->linesize, &s->uvlinesize);
1121  if (ret < 0)
1122  return ret;
1123 
1124  for (int i = 0; f->data[i]; i++) {
1125  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
1126  f->linesize[i] +
1127  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
1128  f->data[i] += offset;
1129  }
1130  f->width = avctx->width;
1131  f->height = avctx->height;
1132 
1133  ret = av_frame_copy_props(f, props_frame);
1134  if (ret < 0)
1135  return ret;
1136 
1137  return 0;
1138 }
1139 
1140 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1141 {
1142  MPVPicture *pic = NULL;
1143  int64_t pts;
1144  int display_picture_number = 0, ret;
1145  int encoding_delay = s->max_b_frames ? s->max_b_frames
1146  : (s->low_delay ? 0 : 1);
1147  int flush_offset = 1;
1148  int direct = 1;
1149 
1150  av_assert1(!s->input_picture[0]);
1151 
1152  if (pic_arg) {
1153  pts = pic_arg->pts;
1154  display_picture_number = s->input_picture_number++;
1155 
1156  if (pts != AV_NOPTS_VALUE) {
1157  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1158  int64_t last = s->user_specified_pts;
1159 
1160  if (pts <= last) {
1161  av_log(s->avctx, AV_LOG_ERROR,
1162  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1163  pts, last);
1164  return AVERROR(EINVAL);
1165  }
1166 
1167  if (!s->low_delay && display_picture_number == 1)
1168  s->dts_delta = pts - last;
1169  }
1170  s->user_specified_pts = pts;
1171  } else {
1172  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1173  s->user_specified_pts =
1174  pts = s->user_specified_pts + 1;
1175  av_log(s->avctx, AV_LOG_INFO,
1176  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1177  pts);
1178  } else {
1179  pts = display_picture_number;
1180  }
1181  }
1182 
1183  if (pic_arg->linesize[0] != s->linesize ||
1184  pic_arg->linesize[1] != s->uvlinesize ||
1185  pic_arg->linesize[2] != s->uvlinesize)
1186  direct = 0;
1187  if ((s->width & 15) || (s->height & 15))
1188  direct = 0;
1189  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1190  direct = 0;
1191  if (s->linesize & (STRIDE_ALIGN-1))
1192  direct = 0;
1193 
1194  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1195  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1196 
1197  pic = ff_refstruct_pool_get(s->picture_pool);
1198  if (!pic)
1199  return AVERROR(ENOMEM);
1200 
1201  if (direct) {
1202  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1203  goto fail;
1204  pic->shared = 1;
1205  } else {
1206  ret = prepare_picture(s, pic->f, pic_arg);
1207  if (ret < 0)
1208  goto fail;
1209 
1210  for (int i = 0; i < 3; i++) {
1211  ptrdiff_t src_stride = pic_arg->linesize[i];
1212  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1213  int h_shift = i ? s->chroma_x_shift : 0;
1214  int v_shift = i ? s->chroma_y_shift : 0;
1215  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1216  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1217  const uint8_t *src = pic_arg->data[i];
1218  uint8_t *dst = pic->f->data[i];
1219  int vpad = 16;
1220 
1221  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1222  && !s->progressive_sequence
1223  && FFALIGN(s->height, 32) - s->height > 16)
1224  vpad = 32;
1225 
1226  if (!s->avctx->rc_buffer_size)
1227  dst += INPLACE_OFFSET;
1228 
1229  if (src_stride == dst_stride)
1230  memcpy(dst, src, src_stride * h - src_stride + w);
1231  else {
1232  int h2 = h;
1233  uint8_t *dst2 = dst;
1234  while (h2--) {
1235  memcpy(dst2, src, w);
1236  dst2 += dst_stride;
1237  src += src_stride;
1238  }
1239  }
1240  if ((s->width & 15) || (s->height & (vpad-1))) {
1241  s->mpvencdsp.draw_edges(dst, dst_stride,
1242  w, h,
1243  16 >> h_shift,
1244  vpad >> v_shift,
1245  EDGE_BOTTOM);
1246  }
1247  }
1248  emms_c();
1249  }
1250 
1251  pic->display_picture_number = display_picture_number;
1252  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1253  } else if (!s->reordered_input_picture[1]) {
1254  /* Flushing: When the above check is true, the encoder is about to run
1255  * out of frames to encode. Check if there are input_pictures left;
1256  * if so, ensure s->input_picture[0] contains the first picture.
1257  * A flush_offset != 1 will only happen if we did not receive enough
1258  * input frames. */
1259  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1260  if (s->input_picture[flush_offset])
1261  break;
1262 
1263  encoding_delay -= flush_offset - 1;
1264  }
1265 
1266  /* shift buffer entries */
1267  for (int i = flush_offset; i <= MAX_B_FRAMES; i++)
1268  s->input_picture[i - flush_offset] = s->input_picture[i];
1269 
1270  s->input_picture[encoding_delay] = pic;
1271 
1272  return 0;
1273 fail:
1274  ff_refstruct_unref(&pic);
1275  return ret;
1276 }
1277 
1278 static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
1279 {
1280  int x, y, plane;
1281  int score = 0;
1282  int64_t score64 = 0;
1283 
1284  for (plane = 0; plane < 3; plane++) {
1285  const int stride = p->f->linesize[plane];
1286  const int bw = plane ? 1 : 2;
1287  for (y = 0; y < s->mb_height * bw; y++) {
1288  for (x = 0; x < s->mb_width * bw; x++) {
1289  int off = p->shared ? 0 : 16;
1290  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1291  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1292  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1293 
1294  switch (FFABS(s->frame_skip_exp)) {
1295  case 0: score = FFMAX(score, v); break;
1296  case 1: score += FFABS(v); break;
1297  case 2: score64 += v * (int64_t)v; break;
1298  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1299  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1300  }
1301  }
1302  }
1303  }
1304  emms_c();
1305 
1306  if (score)
1307  score64 = score;
1308  if (s->frame_skip_exp < 0)
1309  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1310  -1.0/s->frame_skip_exp);
1311 
1312  if (score64 < s->frame_skip_threshold)
1313  return 1;
1314  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1315  return 1;
1316  return 0;
1317 }
1318 
1320 {
1321  int ret;
1322  int size = 0;
1323 
1325  if (ret < 0)
1326  return ret;
1327 
1328  do {
1330  if (ret >= 0) {
1331  size += pkt->size;
1333  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1334  return ret;
1335  } while (ret >= 0);
1336 
1337  return size;
1338 }
1339 
1341 {
1342  AVPacket *pkt;
1343  const int scale = s->brd_scale;
1344  int width = s->width >> scale;
1345  int height = s->height >> scale;
1346  int i, j, out_size, p_lambda, b_lambda, lambda2;
1347  int64_t best_rd = INT64_MAX;
1348  int best_b_count = -1;
1349  int ret = 0;
1350 
1351  av_assert0(scale >= 0 && scale <= 3);
1352 
1353  pkt = av_packet_alloc();
1354  if (!pkt)
1355  return AVERROR(ENOMEM);
1356 
1357  //emms_c();
1358  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1359  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1360  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1361  if (!b_lambda) // FIXME we should do this somewhere else
1362  b_lambda = p_lambda;
1363  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1365 
1366  for (i = 0; i < s->max_b_frames + 2; i++) {
1367  const MPVPicture *pre_input_ptr = i ? s->input_picture[i - 1] :
1368  s->next_pic.ptr;
1369 
1370  if (pre_input_ptr) {
1371  const uint8_t *data[4];
1372  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1373 
1374  if (!pre_input_ptr->shared && i) {
1375  data[0] += INPLACE_OFFSET;
1376  data[1] += INPLACE_OFFSET;
1377  data[2] += INPLACE_OFFSET;
1378  }
1379 
1380  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1381  s->tmp_frames[i]->linesize[0],
1382  data[0],
1383  pre_input_ptr->f->linesize[0],
1384  width, height);
1385  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1386  s->tmp_frames[i]->linesize[1],
1387  data[1],
1388  pre_input_ptr->f->linesize[1],
1389  width >> 1, height >> 1);
1390  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1391  s->tmp_frames[i]->linesize[2],
1392  data[2],
1393  pre_input_ptr->f->linesize[2],
1394  width >> 1, height >> 1);
1395  }
1396  }
1397 
1398  for (j = 0; j < s->max_b_frames + 1; j++) {
1399  AVCodecContext *c;
1400  int64_t rd = 0;
1401 
1402  if (!s->input_picture[j])
1403  break;
1404 
1406  if (!c) {
1407  ret = AVERROR(ENOMEM);
1408  goto fail;
1409  }
1410 
1411  c->width = width;
1412  c->height = height;
1414  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1415  c->mb_decision = s->avctx->mb_decision;
1416  c->me_cmp = s->avctx->me_cmp;
1417  c->mb_cmp = s->avctx->mb_cmp;
1418  c->me_sub_cmp = s->avctx->me_sub_cmp;
1419  c->pix_fmt = AV_PIX_FMT_YUV420P;
1420  c->time_base = s->avctx->time_base;
1421  c->max_b_frames = s->max_b_frames;
1422 
1423  ret = avcodec_open2(c, s->avctx->codec, NULL);
1424  if (ret < 0)
1425  goto fail;
1426 
1427 
1428  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1429  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1430 
1431  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1432  if (out_size < 0) {
1433  ret = out_size;
1434  goto fail;
1435  }
1436 
1437  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1438 
1439  for (i = 0; i < s->max_b_frames + 1; i++) {
1440  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1441 
1442  s->tmp_frames[i + 1]->pict_type = is_p ?
1444  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1445 
1446  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1447  if (out_size < 0) {
1448  ret = out_size;
1449  goto fail;
1450  }
1451 
1452  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1453  }
1454 
1455  /* get the delayed frames */
1457  if (out_size < 0) {
1458  ret = out_size;
1459  goto fail;
1460  }
1461  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1462 
1463  rd += c->error[0] + c->error[1] + c->error[2];
1464 
1465  if (rd < best_rd) {
1466  best_rd = rd;
1467  best_b_count = j;
1468  }
1469 
1470 fail:
1473  if (ret < 0) {
1474  best_b_count = ret;
1475  break;
1476  }
1477  }
1478 
1479  av_packet_free(&pkt);
1480 
1481  return best_b_count;
1482 }
1483 
1484 /**
1485  * Determines whether an input picture is discarded or not
1486  * and if not determines the length of the next chain of B frames
1487  * and moves these pictures (including the P frame) into
1488  * reordered_input_picture.
1489  * input_picture[0] is always NULL when exiting this function, even on error;
1490  * reordered_input_picture[0] is always NULL when exiting this function on error.
1491  */
1493 {
1494  /* Either nothing to do or can't do anything */
1495  if (s->reordered_input_picture[0] || !s->input_picture[0])
1496  return 0;
1497 
1498  /* set next picture type & ordering */
1499  if (s->frame_skip_threshold || s->frame_skip_factor) {
1500  if (s->picture_in_gop_number < s->gop_size &&
1501  s->next_pic.ptr &&
1502  skip_check(s, s->input_picture[0], s->next_pic.ptr)) {
1503  // FIXME check that the gop check above is +-1 correct
1504  ff_refstruct_unref(&s->input_picture[0]);
1505 
1506  ff_vbv_update(s, 0);
1507 
1508  return 0;
1509  }
1510  }
1511 
1512  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1513  !s->next_pic.ptr || s->intra_only) {
1514  s->reordered_input_picture[0] = s->input_picture[0];
1515  s->input_picture[0] = NULL;
1516  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1517  s->reordered_input_picture[0]->coded_picture_number =
1518  s->coded_picture_number++;
1519  } else {
1520  int b_frames = 0;
1521 
1522  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1523  for (int i = 0; i < s->max_b_frames + 1; i++) {
1524  int pict_num = s->input_picture[0]->display_picture_number + i;
1525 
1526  if (pict_num >= s->rc_context.num_entries)
1527  break;
1528  if (!s->input_picture[i]) {
1529  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1530  break;
1531  }
1532 
1533  s->input_picture[i]->f->pict_type =
1534  s->rc_context.entry[pict_num].new_pict_type;
1535  }
1536  }
1537 
1538  if (s->b_frame_strategy == 0) {
1539  b_frames = s->max_b_frames;
1540  while (b_frames && !s->input_picture[b_frames])
1541  b_frames--;
1542  } else if (s->b_frame_strategy == 1) {
1543  int i;
1544  for (i = 1; i < s->max_b_frames + 1; i++) {
1545  if (s->input_picture[i] &&
1546  s->input_picture[i]->b_frame_score == 0) {
1547  s->input_picture[i]->b_frame_score =
1549  s->input_picture[i ]->f->data[0],
1550  s->input_picture[i - 1]->f->data[0],
1551  s->linesize) + 1;
1552  }
1553  }
1554  for (i = 0; i < s->max_b_frames + 1; i++) {
1555  if (!s->input_picture[i] ||
1556  s->input_picture[i]->b_frame_score - 1 >
1557  s->mb_num / s->b_sensitivity)
1558  break;
1559  }
1560 
1561  b_frames = FFMAX(0, i - 1);
1562 
1563  /* reset scores */
1564  for (i = 0; i < b_frames + 1; i++) {
1565  s->input_picture[i]->b_frame_score = 0;
1566  }
1567  } else if (s->b_frame_strategy == 2) {
1568  b_frames = estimate_best_b_count(s);
1569  if (b_frames < 0) {
1570  ff_refstruct_unref(&s->input_picture[0]);
1571  return b_frames;
1572  }
1573  }
1574 
1575  emms_c();
1576 
1577  for (int i = b_frames - 1; i >= 0; i--) {
1578  int type = s->input_picture[i]->f->pict_type;
1579  if (type && type != AV_PICTURE_TYPE_B)
1580  b_frames = i;
1581  }
1582  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1583  b_frames == s->max_b_frames) {
1584  av_log(s->avctx, AV_LOG_ERROR,
1585  "warning, too many B-frames in a row\n");
1586  }
1587 
1588  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1589  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1590  s->gop_size > s->picture_in_gop_number) {
1591  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1592  } else {
1593  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1594  b_frames = 0;
1595  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1596  }
1597  }
1598 
1599  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1600  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1601  b_frames--;
1602 
1603  s->reordered_input_picture[0] = s->input_picture[b_frames];
1604  s->input_picture[b_frames] = NULL;
1605  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1606  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1607  s->reordered_input_picture[0]->coded_picture_number =
1608  s->coded_picture_number++;
1609  for (int i = 0; i < b_frames; i++) {
1610  s->reordered_input_picture[i + 1] = s->input_picture[i];
1611  s->input_picture[i] = NULL;
1612  s->reordered_input_picture[i + 1]->f->pict_type =
1614  s->reordered_input_picture[i + 1]->coded_picture_number =
1615  s->coded_picture_number++;
1616  }
1617  }
1618 
1619  return 0;
1620 }
1621 
1623 {
1624  int ret;
1625 
1626  av_assert1(!s->reordered_input_picture[0]);
1627 
1628  for (int i = 1; i <= MAX_B_FRAMES; i++)
1629  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1630  s->reordered_input_picture[MAX_B_FRAMES] = NULL;
1631 
1633  av_assert1(!s->input_picture[0]);
1634  if (ret < 0)
1635  return ret;
1636 
1637  av_frame_unref(s->new_pic);
1638 
1639  if (s->reordered_input_picture[0]) {
1640  s->reordered_input_picture[0]->reference =
1641  s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_B;
1642 
1643  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1644  // input is a shared pix, so we can't modify it -> allocate a new
1645  // one & ensure that the shared one is reuseable
1646  av_frame_move_ref(s->new_pic, s->reordered_input_picture[0]->f);
1647 
1648  ret = prepare_picture(s, s->reordered_input_picture[0]->f, s->new_pic);
1649  if (ret < 0)
1650  goto fail;
1651  } else {
1652  // input is not a shared pix -> reuse buffer for current_pix
1653  ret = av_frame_ref(s->new_pic, s->reordered_input_picture[0]->f);
1654  if (ret < 0)
1655  goto fail;
1656  for (int i = 0; i < MPV_MAX_PLANES; i++) {
1657  if (s->new_pic->data[i])
1658  s->new_pic->data[i] += INPLACE_OFFSET;
1659  }
1660  }
1661  s->cur_pic.ptr = s->reordered_input_picture[0];
1662  s->reordered_input_picture[0] = NULL;
1663  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
1664  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height);
1665  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
1666  ret = ff_mpv_alloc_pic_accessories(s->avctx, &s->cur_pic,
1667  &s->sc, &s->buffer_pools, s->mb_height);
1668  if (ret < 0) {
1669  ff_mpv_unref_picture(&s->cur_pic);
1670  return ret;
1671  }
1672  s->picture_number = s->cur_pic.ptr->display_picture_number;
1673 
1674  }
1675  return 0;
1676 fail:
1677  ff_refstruct_unref(&s->reordered_input_picture[0]);
1678  return ret;
1679 }
1680 
1682 {
1683  if (s->unrestricted_mv &&
1684  s->cur_pic.reference &&
1685  !s->intra_only) {
1686  int hshift = s->chroma_x_shift;
1687  int vshift = s->chroma_y_shift;
1688  s->mpvencdsp.draw_edges(s->cur_pic.data[0],
1689  s->cur_pic.linesize[0],
1690  s->h_edge_pos, s->v_edge_pos,
1692  EDGE_TOP | EDGE_BOTTOM);
1693  s->mpvencdsp.draw_edges(s->cur_pic.data[1],
1694  s->cur_pic.linesize[1],
1695  s->h_edge_pos >> hshift,
1696  s->v_edge_pos >> vshift,
1697  EDGE_WIDTH >> hshift,
1698  EDGE_WIDTH >> vshift,
1699  EDGE_TOP | EDGE_BOTTOM);
1700  s->mpvencdsp.draw_edges(s->cur_pic.data[2],
1701  s->cur_pic.linesize[2],
1702  s->h_edge_pos >> hshift,
1703  s->v_edge_pos >> vshift,
1704  EDGE_WIDTH >> hshift,
1705  EDGE_WIDTH >> vshift,
1706  EDGE_TOP | EDGE_BOTTOM);
1707  }
1708 
1709  emms_c();
1710 
1711  s->last_pict_type = s->pict_type;
1712  s->last_lambda_for [s->pict_type] = s->cur_pic.ptr->f->quality;
1713  if (s->pict_type!= AV_PICTURE_TYPE_B)
1714  s->last_non_b_pict_type = s->pict_type;
1715 }
1716 
1718 {
1719  int intra, i;
1720 
1721  for (intra = 0; intra < 2; intra++) {
1722  if (s->dct_count[intra] > (1 << 16)) {
1723  for (i = 0; i < 64; i++) {
1724  s->dct_error_sum[intra][i] >>= 1;
1725  }
1726  s->dct_count[intra] >>= 1;
1727  }
1728 
1729  for (i = 0; i < 64; i++) {
1730  s->dct_offset[intra][i] = (s->noise_reduction *
1731  s->dct_count[intra] +
1732  s->dct_error_sum[intra][i] / 2) /
1733  (s->dct_error_sum[intra][i] + 1);
1734  }
1735  }
1736 }
1737 
1739 {
1740  s->cur_pic.ptr->f->pict_type = s->pict_type;
1741 
1742  if (s->pict_type != AV_PICTURE_TYPE_B) {
1743  ff_mpv_replace_picture(&s->last_pic, &s->next_pic);
1744  ff_mpv_replace_picture(&s->next_pic, &s->cur_pic);
1745  }
1746 
1747  if (s->dct_error_sum) {
1748  av_assert2(s->noise_reduction && s->encoding);
1750  }
1751 }
1752 
1754  const AVFrame *pic_arg, int *got_packet)
1755 {
1757  int stuffing_count, ret;
1758  int context_count = s->slice_context_count;
1759 
1760  ff_mpv_unref_picture(&s->cur_pic);
1761 
1762  s->vbv_ignore_qmax = 0;
1763 
1764  s->picture_in_gop_number++;
1765 
1766  if (load_input_picture(s, pic_arg) < 0)
1767  return -1;
1768 
1769  if (select_input_picture(s) < 0) {
1770  return -1;
1771  }
1772 
1773  /* output? */
1774  if (s->new_pic->data[0]) {
1775  int growing_buffer = context_count == 1 && !s->data_partitioning;
1776  size_t pkt_size = 10000 + s->mb_width * s->mb_height *
1777  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1778  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1779  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1780  if (ret < 0)
1781  return ret;
1782  }
1783  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1784  return ret;
1786  if (s->mb_info) {
1787  s->mb_info_ptr = av_packet_new_side_data(pkt,
1789  s->mb_width*s->mb_height*12);
1790  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1791  }
1792 
1793  s->pict_type = s->new_pic->pict_type;
1794  //emms_c();
1795  frame_start(s);
1796 vbv_retry:
1797  ret = encode_picture(s, pkt);
1798  if (growing_buffer) {
1799  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1800  pkt->data = s->pb.buf;
1802  }
1803  if (ret < 0)
1804  return -1;
1805 
1806  frame_end(s);
1807 
1808  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1809  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1810 
1811  if (avctx->rc_buffer_size) {
1812  RateControlContext *rcc = &s->rc_context;
1813  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1814  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1815  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1816 
1817  if (put_bits_count(&s->pb) > max_size &&
1818  s->lambda < s->lmax) {
1819  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1820  (s->qscale + 1) / s->qscale);
1821  if (s->adaptive_quant) {
1822  int i;
1823  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1824  s->lambda_table[i] =
1825  FFMAX(s->lambda_table[i] + min_step,
1826  s->lambda_table[i] * (s->qscale + 1) /
1827  s->qscale);
1828  }
1829  s->mb_skipped = 0; // done in frame_start()
1830  // done in encode_picture() so we must undo it
1831  if (s->pict_type == AV_PICTURE_TYPE_P) {
1832  if (s->flipflop_rounding ||
1833  s->codec_id == AV_CODEC_ID_H263P ||
1834  s->codec_id == AV_CODEC_ID_MPEG4)
1835  s->no_rounding ^= 1;
1836  }
1837  if (s->pict_type != AV_PICTURE_TYPE_B) {
1838  s->time_base = s->last_time_base;
1839  s->last_non_b_time = s->time - s->pp_time;
1840  }
1841  s->vbv_ignore_qmax = 1;
1842  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1843  goto vbv_retry;
1844  }
1845 
1847  }
1848 
1851 
1852  for (int i = 0; i < MPV_MAX_PLANES; i++)
1853  avctx->error[i] += s->encoding_error[i];
1854  ff_side_data_set_encoder_stats(pkt, s->cur_pic.ptr->f->quality,
1855  s->encoding_error,
1857  s->pict_type);
1858 
1860  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1861  s->misc_bits + s->i_tex_bits +
1862  s->p_tex_bits);
1863  flush_put_bits(&s->pb);
1864  s->frame_bits = put_bits_count(&s->pb);
1865 
1866  stuffing_count = ff_vbv_update(s, s->frame_bits);
1867  s->stuffing_bits = 8*stuffing_count;
1868  if (stuffing_count) {
1869  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1870  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1871  return -1;
1872  }
1873 
1874  switch (s->codec_id) {
1877  while (stuffing_count--) {
1878  put_bits(&s->pb, 8, 0);
1879  }
1880  break;
1881  case AV_CODEC_ID_MPEG4:
1882  put_bits(&s->pb, 16, 0);
1883  put_bits(&s->pb, 16, 0x1C3);
1884  stuffing_count -= 4;
1885  while (stuffing_count--) {
1886  put_bits(&s->pb, 8, 0xFF);
1887  }
1888  break;
1889  default:
1890  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1891  s->stuffing_bits = 0;
1892  }
1893  flush_put_bits(&s->pb);
1894  s->frame_bits = put_bits_count(&s->pb);
1895  }
1896 
1897  /* update MPEG-1/2 vbv_delay for CBR */
1898  if (avctx->rc_max_rate &&
1900  s->out_format == FMT_MPEG1 &&
1901  90000LL * (avctx->rc_buffer_size - 1) <=
1902  avctx->rc_max_rate * 0xFFFFLL) {
1903  AVCPBProperties *props;
1904  size_t props_size;
1905 
1906  int vbv_delay, min_delay;
1907  double inbits = avctx->rc_max_rate *
1909  int minbits = s->frame_bits - 8 *
1910  (s->vbv_delay_pos - 1);
1911  double bits = s->rc_context.buffer_index + minbits - inbits;
1912  uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
1913 
1914  if (bits < 0)
1916  "Internal error, negative bits\n");
1917 
1918  av_assert1(s->repeat_first_field == 0);
1919 
1920  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1921  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1922  avctx->rc_max_rate;
1923 
1924  vbv_delay = FFMAX(vbv_delay, min_delay);
1925 
1926  av_assert0(vbv_delay < 0xFFFF);
1927 
1928  vbv_delay_ptr[0] &= 0xF8;
1929  vbv_delay_ptr[0] |= vbv_delay >> 13;
1930  vbv_delay_ptr[1] = vbv_delay >> 5;
1931  vbv_delay_ptr[2] &= 0x07;
1932  vbv_delay_ptr[2] |= vbv_delay << 3;
1933 
1934  props = av_cpb_properties_alloc(&props_size);
1935  if (!props)
1936  return AVERROR(ENOMEM);
1937  props->vbv_delay = vbv_delay * 300;
1938 
1940  (uint8_t*)props, props_size);
1941  if (ret < 0) {
1942  av_freep(&props);
1943  return ret;
1944  }
1945  }
1946  s->total_bits += s->frame_bits;
1947 
1948  pkt->pts = s->cur_pic.ptr->f->pts;
1949  pkt->duration = s->cur_pic.ptr->f->duration;
1950  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1951  if (!s->cur_pic.ptr->coded_picture_number)
1952  pkt->dts = pkt->pts - s->dts_delta;
1953  else
1954  pkt->dts = s->reordered_pts;
1955  s->reordered_pts = pkt->pts;
1956  } else
1957  pkt->dts = pkt->pts;
1958 
1959  // the no-delay case is handled in generic code
1961  ret = ff_encode_reordered_opaque(avctx, pkt, s->cur_pic.ptr->f);
1962  if (ret < 0)
1963  return ret;
1964  }
1965 
1966  if (s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
1968  if (s->mb_info)
1970  } else {
1971  s->frame_bits = 0;
1972  }
1973 
1974  ff_mpv_unref_picture(&s->cur_pic);
1975 
1976  av_assert1((s->frame_bits & 7) == 0);
1977 
1978  pkt->size = s->frame_bits / 8;
1979  *got_packet = !!pkt->size;
1980  return 0;
1981 }
1982 
1984  int n, int threshold)
1985 {
1986  static const char tab[64] = {
1987  3, 2, 2, 1, 1, 1, 1, 1,
1988  1, 1, 1, 1, 1, 1, 1, 1,
1989  1, 1, 1, 1, 1, 1, 1, 1,
1990  0, 0, 0, 0, 0, 0, 0, 0,
1991  0, 0, 0, 0, 0, 0, 0, 0,
1992  0, 0, 0, 0, 0, 0, 0, 0,
1993  0, 0, 0, 0, 0, 0, 0, 0,
1994  0, 0, 0, 0, 0, 0, 0, 0
1995  };
1996  int score = 0;
1997  int run = 0;
1998  int i;
1999  int16_t *block = s->block[n];
2000  const int last_index = s->block_last_index[n];
2001  int skip_dc;
2002 
2003  if (threshold < 0) {
2004  skip_dc = 0;
2005  threshold = -threshold;
2006  } else
2007  skip_dc = 1;
2008 
2009  /* Are all we could set to zero already zero? */
2010  if (last_index <= skip_dc - 1)
2011  return;
2012 
2013  for (i = 0; i <= last_index; i++) {
2014  const int j = s->intra_scantable.permutated[i];
2015  const int level = FFABS(block[j]);
2016  if (level == 1) {
2017  if (skip_dc && i == 0)
2018  continue;
2019  score += tab[run];
2020  run = 0;
2021  } else if (level > 1) {
2022  return;
2023  } else {
2024  run++;
2025  }
2026  }
2027  if (score >= threshold)
2028  return;
2029  for (i = skip_dc; i <= last_index; i++) {
2030  const int j = s->intra_scantable.permutated[i];
2031  block[j] = 0;
2032  }
2033  if (block[0])
2034  s->block_last_index[n] = 0;
2035  else
2036  s->block_last_index[n] = -1;
2037 }
2038 
2039 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2040  int last_index)
2041 {
2042  int i;
2043  const int maxlevel = s->max_qcoeff;
2044  const int minlevel = s->min_qcoeff;
2045  int overflow = 0;
2046 
2047  if (s->mb_intra) {
2048  i = 1; // skip clipping of intra dc
2049  } else
2050  i = 0;
2051 
2052  for (; i <= last_index; i++) {
2053  const int j = s->intra_scantable.permutated[i];
2054  int level = block[j];
2055 
2056  if (level > maxlevel) {
2057  level = maxlevel;
2058  overflow++;
2059  } else if (level < minlevel) {
2060  level = minlevel;
2061  overflow++;
2062  }
2063 
2064  block[j] = level;
2065  }
2066 
2067  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2068  av_log(s->avctx, AV_LOG_INFO,
2069  "warning, clipping %d dct coefficients to %d..%d\n",
2070  overflow, minlevel, maxlevel);
2071 }
2072 
2073 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2074 {
2075  int x, y;
2076  // FIXME optimize
2077  for (y = 0; y < 8; y++) {
2078  for (x = 0; x < 8; x++) {
2079  int x2, y2;
2080  int sum = 0;
2081  int sqr = 0;
2082  int count = 0;
2083 
2084  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2085  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2086  int v = ptr[x2 + y2 * stride];
2087  sum += v;
2088  sqr += v * v;
2089  count++;
2090  }
2091  }
2092  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2093  }
2094  }
2095 }
2096 
2098  int motion_x, int motion_y,
2099  int mb_block_height,
2100  int mb_block_width,
2101  int mb_block_count,
2102  int chroma_x_shift,
2103  int chroma_y_shift,
2104  int chroma_format)
2105 {
2106 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2107  * and neither of these encoders currently supports 444. */
2108 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2109  (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2110  int16_t weight[12][64];
2111  int16_t orig[12][64];
2112  const int mb_x = s->mb_x;
2113  const int mb_y = s->mb_y;
2114  int i;
2115  int skip_dct[12];
2116  int dct_offset = s->linesize * 8; // default for progressive frames
2117  int uv_dct_offset = s->uvlinesize * 8;
2118  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2119  ptrdiff_t wrap_y, wrap_c;
2120 
2121  for (i = 0; i < mb_block_count; i++)
2122  skip_dct[i] = s->skipdct;
2123 
2124  if (s->adaptive_quant) {
2125  const int last_qp = s->qscale;
2126  const int mb_xy = mb_x + mb_y * s->mb_stride;
2127 
2128  s->lambda = s->lambda_table[mb_xy];
2129  update_qscale(s);
2130 
2131  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2132  s->qscale = s->cur_pic.qscale_table[mb_xy];
2133  s->dquant = s->qscale - last_qp;
2134 
2135  if (s->out_format == FMT_H263) {
2136  s->dquant = av_clip(s->dquant, -2, 2);
2137 
2138  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2139  if (!s->mb_intra) {
2140  if (s->pict_type == AV_PICTURE_TYPE_B) {
2141  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2142  s->dquant = 0;
2143  }
2144  if (s->mv_type == MV_TYPE_8X8)
2145  s->dquant = 0;
2146  }
2147  }
2148  }
2149  }
2150  ff_set_qscale(s, last_qp + s->dquant);
2151  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2152  ff_set_qscale(s, s->qscale + s->dquant);
2153 
2154  wrap_y = s->linesize;
2155  wrap_c = s->uvlinesize;
2156  ptr_y = s->new_pic->data[0] +
2157  (mb_y * 16 * wrap_y) + mb_x * 16;
2158  ptr_cb = s->new_pic->data[1] +
2159  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2160  ptr_cr = s->new_pic->data[2] +
2161  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2162 
2163  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2164  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2165  int cw = (s->width + chroma_x_shift) >> chroma_x_shift;
2166  int ch = (s->height + chroma_y_shift) >> chroma_y_shift;
2167  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2168  wrap_y, wrap_y,
2169  16, 16, mb_x * 16, mb_y * 16,
2170  s->width, s->height);
2171  ptr_y = ebuf;
2172  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2173  wrap_c, wrap_c,
2174  mb_block_width, mb_block_height,
2175  mb_x * mb_block_width, mb_y * mb_block_height,
2176  cw, ch);
2177  ptr_cb = ebuf + 16 * wrap_y;
2178  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2179  wrap_c, wrap_c,
2180  mb_block_width, mb_block_height,
2181  mb_x * mb_block_width, mb_y * mb_block_height,
2182  cw, ch);
2183  ptr_cr = ebuf + 16 * wrap_y + 16;
2184  }
2185 
2186  if (s->mb_intra) {
2187  if (INTERLACED_DCT(s)) {
2188  int progressive_score, interlaced_score;
2189 
2190  s->interlaced_dct = 0;
2191  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2192  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2193  NULL, wrap_y, 8) - 400;
2194 
2195  if (progressive_score > 0) {
2196  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2197  NULL, wrap_y * 2, 8) +
2198  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2199  NULL, wrap_y * 2, 8);
2200  if (progressive_score > interlaced_score) {
2201  s->interlaced_dct = 1;
2202 
2203  dct_offset = wrap_y;
2204  uv_dct_offset = wrap_c;
2205  wrap_y <<= 1;
2206  if (chroma_format == CHROMA_422 ||
2208  wrap_c <<= 1;
2209  }
2210  }
2211  }
2212 
2213  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2214  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2215  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2216  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2217 
2218  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2219  skip_dct[4] = 1;
2220  skip_dct[5] = 1;
2221  } else {
2222  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2223  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2224  if (chroma_format == CHROMA_422) {
2225  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2226  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2227  } else if (chroma_format == CHROMA_444) {
2228  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2229  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2230  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2231  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2232  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2233  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2234  }
2235  }
2236  } else {
2237  op_pixels_func (*op_pix)[4];
2238  qpel_mc_func (*op_qpix)[16];
2239  uint8_t *dest_y, *dest_cb, *dest_cr;
2240 
2241  dest_y = s->dest[0];
2242  dest_cb = s->dest[1];
2243  dest_cr = s->dest[2];
2244 
2245  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2246  op_pix = s->hdsp.put_pixels_tab;
2247  op_qpix = s->qdsp.put_qpel_pixels_tab;
2248  } else {
2249  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2250  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2251  }
2252 
2253  if (s->mv_dir & MV_DIR_FORWARD) {
2254  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2255  s->last_pic.data,
2256  op_pix, op_qpix);
2257  op_pix = s->hdsp.avg_pixels_tab;
2258  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2259  }
2260  if (s->mv_dir & MV_DIR_BACKWARD) {
2261  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2262  s->next_pic.data,
2263  op_pix, op_qpix);
2264  }
2265 
2266  if (INTERLACED_DCT(s)) {
2267  int progressive_score, interlaced_score;
2268 
2269  s->interlaced_dct = 0;
2270  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2271  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2272  ptr_y + wrap_y * 8,
2273  wrap_y, 8) - 400;
2274 
2275  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2276  progressive_score -= 400;
2277 
2278  if (progressive_score > 0) {
2279  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2280  wrap_y * 2, 8) +
2281  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2282  ptr_y + wrap_y,
2283  wrap_y * 2, 8);
2284 
2285  if (progressive_score > interlaced_score) {
2286  s->interlaced_dct = 1;
2287 
2288  dct_offset = wrap_y;
2289  uv_dct_offset = wrap_c;
2290  wrap_y <<= 1;
2291  if (chroma_format == CHROMA_422)
2292  wrap_c <<= 1;
2293  }
2294  }
2295  }
2296 
2297  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2298  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2299  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2300  dest_y + dct_offset, wrap_y);
2301  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2302  dest_y + dct_offset + 8, wrap_y);
2303 
2304  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2305  skip_dct[4] = 1;
2306  skip_dct[5] = 1;
2307  } else {
2308  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2309  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2310  if (!chroma_y_shift) { /* 422 */
2311  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2312  dest_cb + uv_dct_offset, wrap_c);
2313  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2314  dest_cr + uv_dct_offset, wrap_c);
2315  }
2316  }
2317  /* pre quantization */
2318  if (s->mc_mb_var[s->mb_stride * mb_y + mb_x] < 2 * s->qscale * s->qscale) {
2319  // FIXME optimize
2320  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2321  skip_dct[0] = 1;
2322  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2323  skip_dct[1] = 1;
2324  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2325  wrap_y, 8) < 20 * s->qscale)
2326  skip_dct[2] = 1;
2327  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2328  wrap_y, 8) < 20 * s->qscale)
2329  skip_dct[3] = 1;
2330  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2331  skip_dct[4] = 1;
2332  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2333  skip_dct[5] = 1;
2334  if (!chroma_y_shift) { /* 422 */
2335  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2336  dest_cb + uv_dct_offset,
2337  wrap_c, 8) < 20 * s->qscale)
2338  skip_dct[6] = 1;
2339  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2340  dest_cr + uv_dct_offset,
2341  wrap_c, 8) < 20 * s->qscale)
2342  skip_dct[7] = 1;
2343  }
2344  }
2345  }
2346 
2347  if (s->quantizer_noise_shaping) {
2348  if (!skip_dct[0])
2349  get_visual_weight(weight[0], ptr_y , wrap_y);
2350  if (!skip_dct[1])
2351  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2352  if (!skip_dct[2])
2353  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2354  if (!skip_dct[3])
2355  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2356  if (!skip_dct[4])
2357  get_visual_weight(weight[4], ptr_cb , wrap_c);
2358  if (!skip_dct[5])
2359  get_visual_weight(weight[5], ptr_cr , wrap_c);
2360  if (!chroma_y_shift) { /* 422 */
2361  if (!skip_dct[6])
2362  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2363  wrap_c);
2364  if (!skip_dct[7])
2365  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2366  wrap_c);
2367  }
2368  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2369  }
2370 
2371  /* DCT & quantize */
2372  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2373  {
2374  for (i = 0; i < mb_block_count; i++) {
2375  if (!skip_dct[i]) {
2376  int overflow;
2377  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2378  // FIXME we could decide to change to quantizer instead of
2379  // clipping
2380  // JS: I don't think that would be a good idea it could lower
2381  // quality instead of improve it. Just INTRADC clipping
2382  // deserves changes in quantizer
2383  if (overflow)
2384  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2385  } else
2386  s->block_last_index[i] = -1;
2387  }
2388  if (s->quantizer_noise_shaping) {
2389  for (i = 0; i < mb_block_count; i++) {
2390  if (!skip_dct[i]) {
2391  s->block_last_index[i] =
2392  dct_quantize_refine(s, s->block[i], weight[i],
2393  orig[i], i, s->qscale);
2394  }
2395  }
2396  }
2397 
2398  if (s->luma_elim_threshold && !s->mb_intra)
2399  for (i = 0; i < 4; i++)
2400  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2401  if (s->chroma_elim_threshold && !s->mb_intra)
2402  for (i = 4; i < mb_block_count; i++)
2403  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2404 
2405  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2406  for (i = 0; i < mb_block_count; i++) {
2407  if (s->block_last_index[i] == -1)
2408  s->coded_score[i] = INT_MAX / 256;
2409  }
2410  }
2411  }
2412 
2413  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2414  s->block_last_index[4] =
2415  s->block_last_index[5] = 0;
2416  s->block[4][0] =
2417  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2418  if (!chroma_y_shift) { /* 422 / 444 */
2419  for (i=6; i<12; i++) {
2420  s->block_last_index[i] = 0;
2421  s->block[i][0] = s->block[4][0];
2422  }
2423  }
2424  }
2425 
2426  // non c quantize code returns incorrect block_last_index FIXME
2427  if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
2428  for (i = 0; i < mb_block_count; i++) {
2429  int j;
2430  if (s->block_last_index[i] > 0) {
2431  for (j = 63; j > 0; j--) {
2432  if (s->block[i][s->intra_scantable.permutated[j]])
2433  break;
2434  }
2435  s->block_last_index[i] = j;
2436  }
2437  }
2438  }
2439 
2440  /* huffman encode */
2441  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2444  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2445  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2446  break;
2447  case AV_CODEC_ID_MPEG4:
2448  if (CONFIG_MPEG4_ENCODER)
2449  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2450  break;
2451  case AV_CODEC_ID_MSMPEG4V2:
2452  case AV_CODEC_ID_MSMPEG4V3:
2453  case AV_CODEC_ID_WMV1:
2454  if (CONFIG_MSMPEG4ENC)
2455  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2456  break;
2457  case AV_CODEC_ID_WMV2:
2458  if (CONFIG_WMV2_ENCODER)
2459  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2460  break;
2461  case AV_CODEC_ID_H261:
2462  if (CONFIG_H261_ENCODER)
2463  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2464  break;
2465  case AV_CODEC_ID_H263:
2466  case AV_CODEC_ID_H263P:
2467  case AV_CODEC_ID_FLV1:
2468  case AV_CODEC_ID_RV10:
2469  case AV_CODEC_ID_RV20:
2470  if (CONFIG_H263_ENCODER)
2471  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2472  break;
2473 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2474  case AV_CODEC_ID_MJPEG:
2475  case AV_CODEC_ID_AMV:
2476  ff_mjpeg_encode_mb(s, s->block);
2477  break;
2478 #endif
2479  case AV_CODEC_ID_SPEEDHQ:
2480  if (CONFIG_SPEEDHQ_ENCODER)
2481  ff_speedhq_encode_mb(s, s->block);
2482  break;
2483  default:
2484  av_assert1(0);
2485  }
2486 }
2487 
2488 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2489 {
2490  if (s->chroma_format == CHROMA_420)
2491  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2492  else if (s->chroma_format == CHROMA_422)
2493  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2494  else
2495  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2496 }
2497 
2499  const MpegEncContext *s)
2500 {
2501  int i;
2502 
2503  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2504 
2505  /* MPEG-1 */
2506  d->mb_skip_run= s->mb_skip_run;
2507  for(i=0; i<3; i++)
2508  d->last_dc[i] = s->last_dc[i];
2509 
2510  /* statistics */
2511  d->mv_bits= s->mv_bits;
2512  d->i_tex_bits= s->i_tex_bits;
2513  d->p_tex_bits= s->p_tex_bits;
2514  d->i_count= s->i_count;
2515  d->misc_bits= s->misc_bits;
2516  d->last_bits= 0;
2517 
2518  d->mb_skipped= 0;
2519  d->qscale= s->qscale;
2520  d->dquant= s->dquant;
2521 
2522  d->esc3_level_length= s->esc3_level_length;
2523 }
2524 
2526  const MpegEncContext *s)
2527 {
2528  int i;
2529 
2530  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2531  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2532 
2533  /* MPEG-1 */
2534  d->mb_skip_run= s->mb_skip_run;
2535  for(i=0; i<3; i++)
2536  d->last_dc[i] = s->last_dc[i];
2537 
2538  /* statistics */
2539  d->mv_bits= s->mv_bits;
2540  d->i_tex_bits= s->i_tex_bits;
2541  d->p_tex_bits= s->p_tex_bits;
2542  d->i_count= s->i_count;
2543  d->misc_bits= s->misc_bits;
2544 
2545  d->mb_intra= s->mb_intra;
2546  d->mb_skipped= s->mb_skipped;
2547  d->mv_type= s->mv_type;
2548  d->mv_dir= s->mv_dir;
2549  d->pb= s->pb;
2550  if(s->data_partitioning){
2551  d->pb2= s->pb2;
2552  d->tex_pb= s->tex_pb;
2553  }
2554  d->block= s->block;
2555  for(i=0; i<8; i++)
2556  d->block_last_index[i]= s->block_last_index[i];
2557  d->interlaced_dct= s->interlaced_dct;
2558  d->qscale= s->qscale;
2559 
2560  d->esc3_level_length= s->esc3_level_length;
2561 }
2562 
2563 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best,
2565  int *dmin, int *next_block, int motion_x, int motion_y)
2566 {
2567  int score;
2568  uint8_t *dest_backup[3];
2569 
2570  copy_context_before_encode(s, backup);
2571 
2572  s->block= s->blocks[*next_block];
2573  s->pb= pb[*next_block];
2574  if(s->data_partitioning){
2575  s->pb2 = pb2 [*next_block];
2576  s->tex_pb= tex_pb[*next_block];
2577  }
2578 
2579  if(*next_block){
2580  memcpy(dest_backup, s->dest, sizeof(s->dest));
2581  s->dest[0] = s->sc.rd_scratchpad;
2582  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2583  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2584  av_assert0(s->linesize >= 32); //FIXME
2585  }
2586 
2587  encode_mb(s, motion_x, motion_y);
2588 
2589  score= put_bits_count(&s->pb);
2590  if(s->data_partitioning){
2591  score+= put_bits_count(&s->pb2);
2592  score+= put_bits_count(&s->tex_pb);
2593  }
2594 
2595  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2596  mpv_reconstruct_mb(s, s->block);
2597 
2598  score *= s->lambda2;
2599  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2600  }
2601 
2602  if(*next_block){
2603  memcpy(s->dest, dest_backup, sizeof(s->dest));
2604  }
2605 
2606  if(score<*dmin){
2607  *dmin= score;
2608  *next_block^=1;
2609 
2611  }
2612 }
2613 
2614 static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride){
2615  const uint32_t *sq = ff_square_tab + 256;
2616  int acc=0;
2617  int x,y;
2618 
2619  if(w==16 && h==16)
2620  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2621  else if(w==8 && h==8)
2622  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2623 
2624  for(y=0; y<h; y++){
2625  for(x=0; x<w; x++){
2626  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2627  }
2628  }
2629 
2630  av_assert2(acc>=0);
2631 
2632  return acc;
2633 }
2634 
2635 static int sse_mb(MpegEncContext *s){
2636  int w= 16;
2637  int h= 16;
2638  int chroma_mb_w = w >> s->chroma_x_shift;
2639  int chroma_mb_h = h >> s->chroma_y_shift;
2640 
2641  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2642  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2643 
2644  if(w==16 && h==16)
2645  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2646  return s->mecc.nsse[0](s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2647  s->dest[0], s->linesize, 16) +
2648  s->mecc.nsse[1](s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2649  s->dest[1], s->uvlinesize, chroma_mb_h) +
2650  s->mecc.nsse[1](s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2651  s->dest[2], s->uvlinesize, chroma_mb_h);
2652  }else{
2653  return s->mecc.sse[0](NULL, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2654  s->dest[0], s->linesize, 16) +
2655  s->mecc.sse[1](NULL, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2656  s->dest[1], s->uvlinesize, chroma_mb_h) +
2657  s->mecc.sse[1](NULL, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2658  s->dest[2], s->uvlinesize, chroma_mb_h);
2659  }
2660  else
2661  return sse(s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2662  s->dest[0], w, h, s->linesize) +
2663  sse(s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2664  s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
2665  sse(s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2666  s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
2667 }
2668 
2670  MpegEncContext *s= *(void**)arg;
2671 
2672 
2673  s->me.pre_pass=1;
2674  s->me.dia_size= s->avctx->pre_dia_size;
2675  s->first_slice_line=1;
2676  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2677  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2678  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2679  }
2680  s->first_slice_line=0;
2681  }
2682 
2683  s->me.pre_pass=0;
2684 
2685  return 0;
2686 }
2687 
2689  MpegEncContext *s= *(void**)arg;
2690 
2691  s->me.dia_size= s->avctx->dia_size;
2692  s->first_slice_line=1;
2693  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2694  s->mb_x=0; //for block init below
2696  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2697  s->block_index[0]+=2;
2698  s->block_index[1]+=2;
2699  s->block_index[2]+=2;
2700  s->block_index[3]+=2;
2701 
2702  /* compute motion vector & mb_type and store in context */
2703  if(s->pict_type==AV_PICTURE_TYPE_B)
2704  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2705  else
2706  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2707  }
2708  s->first_slice_line=0;
2709  }
2710  return 0;
2711 }
2712 
2713 static int mb_var_thread(AVCodecContext *c, void *arg){
2714  MpegEncContext *s= *(void**)arg;
2715  int mb_x, mb_y;
2716 
2717  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2718  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2719  int xx = mb_x * 16;
2720  int yy = mb_y * 16;
2721  const uint8_t *pix = s->new_pic->data[0] + (yy * s->linesize) + xx;
2722  int varc;
2723  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2724 
2725  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2726  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2727 
2728  s->mb_var [s->mb_stride * mb_y + mb_x] = varc;
2729  s->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2730  s->me.mb_var_sum_temp += varc;
2731  }
2732  }
2733  return 0;
2734 }
2735 
2737  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2738  if(s->partitioned_frame){
2740  }
2741 
2742  ff_mpeg4_stuffing(&s->pb);
2743  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2744  s->out_format == FMT_MJPEG) {
2746  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2748  }
2749 
2750  flush_put_bits(&s->pb);
2751 
2752  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2753  s->misc_bits+= get_bits_diff(s);
2754 }
2755 
2757 {
2758  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2759  int offset = put_bits_count(&s->pb);
2760  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2761  int gobn = s->mb_y / s->gob_index;
2762  int pred_x, pred_y;
2763  if (CONFIG_H263_ENCODER)
2764  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2765  bytestream_put_le32(&ptr, offset);
2766  bytestream_put_byte(&ptr, s->qscale);
2767  bytestream_put_byte(&ptr, gobn);
2768  bytestream_put_le16(&ptr, mba);
2769  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2770  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2771  /* 4MV not implemented */
2772  bytestream_put_byte(&ptr, 0); /* hmv2 */
2773  bytestream_put_byte(&ptr, 0); /* vmv2 */
2774 }
2775 
2776 static void update_mb_info(MpegEncContext *s, int startcode)
2777 {
2778  if (!s->mb_info)
2779  return;
2780  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2781  s->mb_info_size += 12;
2782  s->prev_mb_info = s->last_mb_info;
2783  }
2784  if (startcode) {
2785  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2786  /* This might have incremented mb_info_size above, and we return without
2787  * actually writing any info into that slot yet. But in that case,
2788  * this will be called again at the start of the after writing the
2789  * start code, actually writing the mb info. */
2790  return;
2791  }
2792 
2793  s->last_mb_info = put_bytes_count(&s->pb, 0);
2794  if (!s->mb_info_size)
2795  s->mb_info_size += 12;
2796  write_mb_info(s);
2797 }
2798 
2799 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2800 {
2801  if (put_bytes_left(&s->pb, 0) < threshold
2802  && s->slice_context_count == 1
2803  && s->pb.buf == s->avctx->internal->byte_buffer) {
2804  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2805 
2806  uint8_t *new_buffer = NULL;
2807  int new_buffer_size = 0;
2808 
2809  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2810  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2811  return AVERROR(ENOMEM);
2812  }
2813 
2814  emms_c();
2815 
2816  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2817  s->avctx->internal->byte_buffer_size + size_increase);
2818  if (!new_buffer)
2819  return AVERROR(ENOMEM);
2820 
2821  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2822  av_free(s->avctx->internal->byte_buffer);
2823  s->avctx->internal->byte_buffer = new_buffer;
2824  s->avctx->internal->byte_buffer_size = new_buffer_size;
2825  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2826  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2827  }
2828  if (put_bytes_left(&s->pb, 0) < threshold)
2829  return AVERROR(EINVAL);
2830  return 0;
2831 }
2832 
2833 static int encode_thread(AVCodecContext *c, void *arg){
2834  MpegEncContext *s= *(void**)arg;
2835  int mb_x, mb_y, mb_y_order;
2836  int chr_h= 16>>s->chroma_y_shift;
2837  int i, j;
2838  MpegEncContext best_s = { 0 }, backup_s;
2839  uint8_t bit_buf[2][MAX_MB_BYTES];
2840  uint8_t bit_buf2[2][MAX_MB_BYTES];
2841  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2842  PutBitContext pb[2], pb2[2], tex_pb[2];
2843 
2844  for(i=0; i<2; i++){
2845  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2846  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2847  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2848  }
2849 
2850  s->last_bits= put_bits_count(&s->pb);
2851  s->mv_bits=0;
2852  s->misc_bits=0;
2853  s->i_tex_bits=0;
2854  s->p_tex_bits=0;
2855  s->i_count=0;
2856 
2857  for(i=0; i<3; i++){
2858  /* init last dc values */
2859  /* note: quant matrix value (8) is implied here */
2860  s->last_dc[i] = 128 << s->intra_dc_precision;
2861 
2862  s->encoding_error[i] = 0;
2863  }
2864  if(s->codec_id==AV_CODEC_ID_AMV){
2865  s->last_dc[0] = 128*8/13;
2866  s->last_dc[1] = 128*8/14;
2867  s->last_dc[2] = 128*8/14;
2868  }
2869  s->mb_skip_run = 0;
2870  memset(s->last_mv, 0, sizeof(s->last_mv));
2871 
2872  s->last_mv_dir = 0;
2873 
2874  switch(s->codec_id){
2875  case AV_CODEC_ID_H263:
2876  case AV_CODEC_ID_H263P:
2877  case AV_CODEC_ID_FLV1:
2878  if (CONFIG_H263_ENCODER)
2879  s->gob_index = H263_GOB_HEIGHT(s->height);
2880  break;
2881  case AV_CODEC_ID_MPEG4:
2882  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2884  break;
2885  }
2886 
2887  s->resync_mb_x=0;
2888  s->resync_mb_y=0;
2889  s->first_slice_line = 1;
2890  s->ptr_lastgob = s->pb.buf;
2891  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2892  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2893  int first_in_slice;
2894  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2895  if (first_in_slice && mb_y_order != s->start_mb_y)
2897  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2898  } else {
2899  mb_y = mb_y_order;
2900  }
2901  s->mb_x=0;
2902  s->mb_y= mb_y;
2903 
2904  ff_set_qscale(s, s->qscale);
2906 
2907  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2908  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2909  int mb_type= s->mb_type[xy];
2910 // int d;
2911  int dmin= INT_MAX;
2912  int dir;
2913  int size_increase = s->avctx->internal->byte_buffer_size/4
2914  + s->mb_width*MAX_MB_BYTES;
2915 
2917  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2918  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2919  return -1;
2920  }
2921  if(s->data_partitioning){
2922  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2923  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2924  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2925  return -1;
2926  }
2927  }
2928 
2929  s->mb_x = mb_x;
2930  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2931  ff_update_block_index(s, 8, 0, s->chroma_x_shift);
2932 
2933  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2935  xy= s->mb_y*s->mb_stride + s->mb_x;
2936  mb_type= s->mb_type[xy];
2937  }
2938 
2939  /* write gob / video packet header */
2940  if(s->rtp_mode){
2941  int current_packet_size, is_gob_start;
2942 
2943  current_packet_size = put_bytes_count(&s->pb, 1)
2944  - (s->ptr_lastgob - s->pb.buf);
2945 
2946  is_gob_start = s->rtp_payload_size &&
2947  current_packet_size >= s->rtp_payload_size &&
2948  mb_y + mb_x > 0;
2949 
2950  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2951 
2952  switch(s->codec_id){
2953  case AV_CODEC_ID_H263:
2954  case AV_CODEC_ID_H263P:
2955  if(!s->h263_slice_structured)
2956  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2957  break;
2959  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2961  if(s->mb_skip_run) is_gob_start=0;
2962  break;
2963  case AV_CODEC_ID_MJPEG:
2964  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2965  break;
2966  }
2967 
2968  if(is_gob_start){
2969  if(s->start_mb_y != mb_y || mb_x!=0){
2970  write_slice_end(s);
2971 
2972  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2974  }
2975  }
2976 
2977  av_assert2((put_bits_count(&s->pb)&7) == 0);
2978  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2979 
2980  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2981  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
2982  int d = 100 / s->error_rate;
2983  if(r % d == 0){
2984  current_packet_size=0;
2985  s->pb.buf_ptr= s->ptr_lastgob;
2986  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2987  }
2988  }
2989 
2990  switch(s->codec_id){
2991  case AV_CODEC_ID_MPEG4:
2992  if (CONFIG_MPEG4_ENCODER) {
2995  }
2996  break;
2999  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3002  }
3003  break;
3004  case AV_CODEC_ID_H263:
3005  case AV_CODEC_ID_H263P:
3006  if (CONFIG_H263_ENCODER) {
3007  update_mb_info(s, 1);
3009  }
3010  break;
3011  }
3012 
3013  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3014  int bits= put_bits_count(&s->pb);
3015  s->misc_bits+= bits - s->last_bits;
3016  s->last_bits= bits;
3017  }
3018 
3019  s->ptr_lastgob += current_packet_size;
3020  s->first_slice_line=1;
3021  s->resync_mb_x=mb_x;
3022  s->resync_mb_y=mb_y;
3023  }
3024  }
3025 
3026  if( (s->resync_mb_x == s->mb_x)
3027  && s->resync_mb_y+1 == s->mb_y){
3028  s->first_slice_line=0;
3029  }
3030 
3031  s->mb_skipped=0;
3032  s->dquant=0; //only for QP_RD
3033 
3034  update_mb_info(s, 0);
3035 
3036  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3037  int next_block=0;
3038  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3039 
3040  copy_context_before_encode(&backup_s, s);
3041  backup_s.pb= s->pb;
3042  best_s.data_partitioning= s->data_partitioning;
3043  best_s.partitioned_frame= s->partitioned_frame;
3044  if(s->data_partitioning){
3045  backup_s.pb2= s->pb2;
3046  backup_s.tex_pb= s->tex_pb;
3047  }
3048 
3050  s->mv_dir = MV_DIR_FORWARD;
3051  s->mv_type = MV_TYPE_16X16;
3052  s->mb_intra= 0;
3053  s->mv[0][0][0] = s->p_mv_table[xy][0];
3054  s->mv[0][0][1] = s->p_mv_table[xy][1];
3055  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3056  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3057  }
3059  s->mv_dir = MV_DIR_FORWARD;
3060  s->mv_type = MV_TYPE_FIELD;
3061  s->mb_intra= 0;
3062  for(i=0; i<2; i++){
3063  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3064  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3065  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3066  }
3067  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3068  &dmin, &next_block, 0, 0);
3069  }
3071  s->mv_dir = MV_DIR_FORWARD;
3072  s->mv_type = MV_TYPE_16X16;
3073  s->mb_intra= 0;
3074  s->mv[0][0][0] = 0;
3075  s->mv[0][0][1] = 0;
3076  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3077  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3078  }
3080  s->mv_dir = MV_DIR_FORWARD;
3081  s->mv_type = MV_TYPE_8X8;
3082  s->mb_intra= 0;
3083  for(i=0; i<4; i++){
3084  s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
3085  s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
3086  }
3087  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3088  &dmin, &next_block, 0, 0);
3089  }
3091  s->mv_dir = MV_DIR_FORWARD;
3092  s->mv_type = MV_TYPE_16X16;
3093  s->mb_intra= 0;
3094  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3095  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3096  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3097  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3098  }
3100  s->mv_dir = MV_DIR_BACKWARD;
3101  s->mv_type = MV_TYPE_16X16;
3102  s->mb_intra= 0;
3103  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3104  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3105  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3106  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3107  }
3109  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3110  s->mv_type = MV_TYPE_16X16;
3111  s->mb_intra= 0;
3112  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3113  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3114  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3115  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3116  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3117  &dmin, &next_block, 0, 0);
3118  }
3120  s->mv_dir = MV_DIR_FORWARD;
3121  s->mv_type = MV_TYPE_FIELD;
3122  s->mb_intra= 0;
3123  for(i=0; i<2; i++){
3124  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3125  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3126  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3127  }
3128  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3129  &dmin, &next_block, 0, 0);
3130  }
3132  s->mv_dir = MV_DIR_BACKWARD;
3133  s->mv_type = MV_TYPE_FIELD;
3134  s->mb_intra= 0;
3135  for(i=0; i<2; i++){
3136  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3137  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3138  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3139  }
3140  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3141  &dmin, &next_block, 0, 0);
3142  }
3144  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3145  s->mv_type = MV_TYPE_FIELD;
3146  s->mb_intra= 0;
3147  for(dir=0; dir<2; dir++){
3148  for(i=0; i<2; i++){
3149  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3150  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3151  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3152  }
3153  }
3154  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3155  &dmin, &next_block, 0, 0);
3156  }
3158  s->mv_dir = 0;
3159  s->mv_type = MV_TYPE_16X16;
3160  s->mb_intra= 1;
3161  s->mv[0][0][0] = 0;
3162  s->mv[0][0][1] = 0;
3163  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3164  &dmin, &next_block, 0, 0);
3165  if(s->h263_pred || s->h263_aic){
3166  if(best_s.mb_intra)
3167  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3168  else
3169  ff_clean_intra_table_entries(s); //old mode?
3170  }
3171  }
3172 
3173  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3174  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3175  const int last_qp= backup_s.qscale;
3176  int qpi, qp, dc[6];
3177  int16_t ac[6][16];
3178  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3179  static const int dquant_tab[4]={-1,1,-2,2};
3180  int storecoefs = s->mb_intra && s->dc_val[0];
3181 
3182  av_assert2(backup_s.dquant == 0);
3183 
3184  //FIXME intra
3185  s->mv_dir= best_s.mv_dir;
3186  s->mv_type = MV_TYPE_16X16;
3187  s->mb_intra= best_s.mb_intra;
3188  s->mv[0][0][0] = best_s.mv[0][0][0];
3189  s->mv[0][0][1] = best_s.mv[0][0][1];
3190  s->mv[1][0][0] = best_s.mv[1][0][0];
3191  s->mv[1][0][1] = best_s.mv[1][0][1];
3192 
3193  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3194  for(; qpi<4; qpi++){
3195  int dquant= dquant_tab[qpi];
3196  qp= last_qp + dquant;
3197  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3198  continue;
3199  backup_s.dquant= dquant;
3200  if(storecoefs){
3201  for(i=0; i<6; i++){
3202  dc[i]= s->dc_val[0][ s->block_index[i] ];
3203  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3204  }
3205  }
3206 
3207  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3208  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3209  if(best_s.qscale != qp){
3210  if(storecoefs){
3211  for(i=0; i<6; i++){
3212  s->dc_val[0][ s->block_index[i] ]= dc[i];
3213  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3214  }
3215  }
3216  }
3217  }
3218  }
3219  }
3220  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3221  int mx= s->b_direct_mv_table[xy][0];
3222  int my= s->b_direct_mv_table[xy][1];
3223 
3224  backup_s.dquant = 0;
3225  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3226  s->mb_intra= 0;
3227  ff_mpeg4_set_direct_mv(s, mx, my);
3228  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3229  &dmin, &next_block, mx, my);
3230  }
3231  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3232  backup_s.dquant = 0;
3233  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3234  s->mb_intra= 0;
3235  ff_mpeg4_set_direct_mv(s, 0, 0);
3236  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3237  &dmin, &next_block, 0, 0);
3238  }
3239  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3240  int coded=0;
3241  for(i=0; i<6; i++)
3242  coded |= s->block_last_index[i];
3243  if(coded){
3244  int mx,my;
3245  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3246  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3247  mx=my=0; //FIXME find the one we actually used
3248  ff_mpeg4_set_direct_mv(s, mx, my);
3249  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3250  mx= s->mv[1][0][0];
3251  my= s->mv[1][0][1];
3252  }else{
3253  mx= s->mv[0][0][0];
3254  my= s->mv[0][0][1];
3255  }
3256 
3257  s->mv_dir= best_s.mv_dir;
3258  s->mv_type = best_s.mv_type;
3259  s->mb_intra= 0;
3260 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3261  s->mv[0][0][1] = best_s.mv[0][0][1];
3262  s->mv[1][0][0] = best_s.mv[1][0][0];
3263  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3264  backup_s.dquant= 0;
3265  s->skipdct=1;
3266  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3267  &dmin, &next_block, mx, my);
3268  s->skipdct=0;
3269  }
3270  }
3271 
3272  s->cur_pic.qscale_table[xy] = best_s.qscale;
3273 
3274  copy_context_after_encode(s, &best_s);
3275 
3276  pb_bits_count= put_bits_count(&s->pb);
3277  flush_put_bits(&s->pb);
3278  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3279  s->pb= backup_s.pb;
3280 
3281  if(s->data_partitioning){
3282  pb2_bits_count= put_bits_count(&s->pb2);
3283  flush_put_bits(&s->pb2);
3284  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3285  s->pb2= backup_s.pb2;
3286 
3287  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3288  flush_put_bits(&s->tex_pb);
3289  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3290  s->tex_pb= backup_s.tex_pb;
3291  }
3292  s->last_bits= put_bits_count(&s->pb);
3293 
3294  if (CONFIG_H263_ENCODER &&
3295  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3297 
3298  if(next_block==0){ //FIXME 16 vs linesize16
3299  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3300  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3301  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3302  }
3303 
3304  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3305  mpv_reconstruct_mb(s, s->block);
3306  } else {
3307  int motion_x = 0, motion_y = 0;
3308  s->mv_type=MV_TYPE_16X16;
3309  // only one MB-Type possible
3310 
3311  switch(mb_type){
3313  s->mv_dir = 0;
3314  s->mb_intra= 1;
3315  motion_x= s->mv[0][0][0] = 0;
3316  motion_y= s->mv[0][0][1] = 0;
3317  break;
3319  s->mv_dir = MV_DIR_FORWARD;
3320  s->mb_intra= 0;
3321  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3322  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3323  break;
3325  s->mv_dir = MV_DIR_FORWARD;
3326  s->mv_type = MV_TYPE_FIELD;
3327  s->mb_intra= 0;
3328  for(i=0; i<2; i++){
3329  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3330  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3331  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3332  }
3333  break;
3335  s->mv_dir = MV_DIR_FORWARD;
3336  s->mv_type = MV_TYPE_8X8;
3337  s->mb_intra= 0;
3338  for(i=0; i<4; i++){
3339  s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
3340  s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
3341  }
3342  break;
3344  if (CONFIG_MPEG4_ENCODER) {
3346  s->mb_intra= 0;
3347  motion_x=s->b_direct_mv_table[xy][0];
3348  motion_y=s->b_direct_mv_table[xy][1];
3349  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3350  }
3351  break;
3353  if (CONFIG_MPEG4_ENCODER) {
3355  s->mb_intra= 0;
3356  ff_mpeg4_set_direct_mv(s, 0, 0);
3357  }
3358  break;
3360  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3361  s->mb_intra= 0;
3362  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3363  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3364  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3365  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3366  break;
3368  s->mv_dir = MV_DIR_BACKWARD;
3369  s->mb_intra= 0;
3370  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3371  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3372  break;
3374  s->mv_dir = MV_DIR_FORWARD;
3375  s->mb_intra= 0;
3376  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3377  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3378  break;
3380  s->mv_dir = MV_DIR_FORWARD;
3381  s->mv_type = MV_TYPE_FIELD;
3382  s->mb_intra= 0;
3383  for(i=0; i<2; i++){
3384  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3385  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3386  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3387  }
3388  break;
3390  s->mv_dir = MV_DIR_BACKWARD;
3391  s->mv_type = MV_TYPE_FIELD;
3392  s->mb_intra= 0;
3393  for(i=0; i<2; i++){
3394  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3395  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3396  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3397  }
3398  break;
3400  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3401  s->mv_type = MV_TYPE_FIELD;
3402  s->mb_intra= 0;
3403  for(dir=0; dir<2; dir++){
3404  for(i=0; i<2; i++){
3405  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3406  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3407  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3408  }
3409  }
3410  break;
3411  default:
3412  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3413  }
3414 
3415  encode_mb(s, motion_x, motion_y);
3416 
3417  // RAL: Update last macroblock type
3418  s->last_mv_dir = s->mv_dir;
3419 
3420  if (CONFIG_H263_ENCODER &&
3421  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3423 
3424  mpv_reconstruct_mb(s, s->block);
3425  }
3426 
3427  /* clean the MV table in IPS frames for direct mode in B-frames */
3428  if(s->mb_intra /* && I,P,S_TYPE */){
3429  s->p_mv_table[xy][0]=0;
3430  s->p_mv_table[xy][1]=0;
3431  }
3432 
3433  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3434  int w= 16;
3435  int h= 16;
3436 
3437  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3438  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3439 
3440  s->encoding_error[0] += sse(
3441  s, s->new_pic->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3442  s->dest[0], w, h, s->linesize);
3443  s->encoding_error[1] += sse(
3444  s, s->new_pic->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3445  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3446  s->encoding_error[2] += sse(
3447  s, s->new_pic->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3448  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3449  }
3450  if(s->loop_filter){
3451  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3453  }
3454  ff_dlog(s->avctx, "MB %d %d bits\n",
3455  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3456  }
3457  }
3458 
3459 #if CONFIG_MSMPEG4ENC
3460  //not beautiful here but we must write it before flushing so it has to be here
3461  if (s->msmpeg4_version != MSMP4_UNUSED && s->msmpeg4_version < MSMP4_WMV1 &&
3462  s->pict_type == AV_PICTURE_TYPE_I)
3464 #endif
3465 
3466  write_slice_end(s);
3467 
3468  return 0;
3469 }
3470 
3471 #define MERGE(field) dst->field += src->field; src->field=0
3473  MERGE(me.scene_change_score);
3474  MERGE(me.mc_mb_var_sum_temp);
3475  MERGE(me.mb_var_sum_temp);
3476 }
3477 
3479  int i;
3480 
3481  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3482  MERGE(dct_count[1]);
3483  MERGE(mv_bits);
3484  MERGE(i_tex_bits);
3485  MERGE(p_tex_bits);
3486  MERGE(i_count);
3487  MERGE(misc_bits);
3488  MERGE(encoding_error[0]);
3489  MERGE(encoding_error[1]);
3490  MERGE(encoding_error[2]);
3491 
3492  if (dst->noise_reduction){
3493  for(i=0; i<64; i++){
3494  MERGE(dct_error_sum[0][i]);
3495  MERGE(dct_error_sum[1][i]);
3496  }
3497  }
3498 
3499  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3500  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3501  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3502  flush_put_bits(&dst->pb);
3503 }
3504 
3505 static int estimate_qp(MpegEncContext *s, int dry_run){
3506  if (s->next_lambda){
3507  s->cur_pic.ptr->f->quality = s->next_lambda;
3508  if(!dry_run) s->next_lambda= 0;
3509  } else if (!s->fixed_qscale) {
3510  int quality = ff_rate_estimate_qscale(s, dry_run);
3511  s->cur_pic.ptr->f->quality = quality;
3512  if (s->cur_pic.ptr->f->quality < 0)
3513  return -1;
3514  }
3515 
3516  if(s->adaptive_quant){
3517  switch(s->codec_id){
3518  case AV_CODEC_ID_MPEG4:
3519  if (CONFIG_MPEG4_ENCODER)
3521  break;
3522  case AV_CODEC_ID_H263:
3523  case AV_CODEC_ID_H263P:
3524  case AV_CODEC_ID_FLV1:
3525  if (CONFIG_H263_ENCODER)
3527  break;
3528  default:
3530  }
3531 
3532  s->lambda= s->lambda_table[0];
3533  //FIXME broken
3534  }else
3535  s->lambda = s->cur_pic.ptr->f->quality;
3536  update_qscale(s);
3537  return 0;
3538 }
3539 
3540 /* must be called before writing the header */
3542  av_assert1(s->cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3543  s->time = s->cur_pic.ptr->f->pts * s->avctx->time_base.num;
3544 
3545  if(s->pict_type==AV_PICTURE_TYPE_B){
3546  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3547  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3548  }else{
3549  s->pp_time= s->time - s->last_non_b_time;
3550  s->last_non_b_time= s->time;
3551  av_assert1(s->picture_number==0 || s->pp_time > 0);
3552  }
3553 }
3554 
3556 {
3557  int i, ret;
3558  int bits;
3559  int context_count = s->slice_context_count;
3560 
3561  /* Reset the average MB variance */
3562  s->me.mb_var_sum_temp =
3563  s->me.mc_mb_var_sum_temp = 0;
3564 
3565  /* we need to initialize some time vars before we can encode B-frames */
3566  // RAL: Condition added for MPEG1VIDEO
3567  if (s->out_format == FMT_MPEG1 || (s->h263_pred && s->msmpeg4_version == MSMP4_UNUSED))
3569  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3571 
3572  s->me.scene_change_score=0;
3573 
3574 // s->lambda= s->cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3575 
3576  if(s->pict_type==AV_PICTURE_TYPE_I){
3577  s->no_rounding = s->msmpeg4_version >= MSMP4_V3;
3578  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3579  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3580  s->no_rounding ^= 1;
3581  }
3582 
3583  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3584  if (estimate_qp(s,1) < 0)
3585  return -1;
3587  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3588  if(s->pict_type==AV_PICTURE_TYPE_B)
3589  s->lambda= s->last_lambda_for[s->pict_type];
3590  else
3591  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3592  update_qscale(s);
3593  }
3594 
3595  if (s->out_format != FMT_MJPEG) {
3596  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3597  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3598  s->q_chroma_intra_matrix = s->q_intra_matrix;
3599  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3600  }
3601 
3602  if(ff_init_me(s)<0)
3603  return -1;
3604 
3605  s->mb_intra=0; //for the rate distortion & bit compare functions
3606  for (int i = 0; i < context_count; i++) {
3607  MpegEncContext *const slice = s->thread_context[i];
3608  uint8_t *start, *end;
3609  int h;
3610 
3611  if (i) {
3612  ret = ff_update_duplicate_context(slice, s);
3613  if (ret < 0)
3614  return ret;
3615  }
3616  slice->me.temp = slice->me.scratchpad = slice->sc.scratchpad_buf;
3617 
3618  h = s->mb_height;
3619  start = pkt->data + (size_t)(((int64_t) pkt->size) * slice->start_mb_y / h);
3620  end = pkt->data + (size_t)(((int64_t) pkt->size) * slice-> end_mb_y / h);
3621 
3622  init_put_bits(&s->thread_context[i]->pb, start, end - start);
3623  }
3624 
3625  /* Estimate motion for every MB */
3626  if(s->pict_type != AV_PICTURE_TYPE_I){
3627  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3628  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3629  if (s->pict_type != AV_PICTURE_TYPE_B) {
3630  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3631  s->me_pre == 2) {
3632  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3633  }
3634  }
3635 
3636  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3637  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3638  /* I-Frame */
3639  for(i=0; i<s->mb_stride*s->mb_height; i++)
3640  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3641 
3642  if(!s->fixed_qscale){
3643  /* finding spatial complexity for I-frame rate control */
3644  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3645  }
3646  }
3647  for(i=1; i<context_count; i++){
3648  merge_context_after_me(s, s->thread_context[i]);
3649  }
3650  s->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3651  s->mb_var_sum = s->me. mb_var_sum_temp;
3652  emms_c();
3653 
3654  if (s->me.scene_change_score > s->scenechange_threshold &&
3655  s->pict_type == AV_PICTURE_TYPE_P) {
3656  s->pict_type= AV_PICTURE_TYPE_I;
3657  for(i=0; i<s->mb_stride*s->mb_height; i++)
3658  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3659  if (s->msmpeg4_version >= MSMP4_V3)
3660  s->no_rounding=1;
3661  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3662  s->mb_var_sum, s->mc_mb_var_sum);
3663  }
3664 
3665  if(!s->umvplus){
3666  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3667  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3668 
3669  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3670  int a,b;
3671  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3672  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3673  s->f_code= FFMAX3(s->f_code, a, b);
3674  }
3675 
3677  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3678  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3679  int j;
3680  for(i=0; i<2; i++){
3681  for(j=0; j<2; j++)
3682  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3683  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3684  }
3685  }
3686  } else if (s->pict_type == AV_PICTURE_TYPE_B) {
3687  int a, b;
3688 
3689  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3690  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3691  s->f_code = FFMAX(a, b);
3692 
3693  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3694  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3695  s->b_code = FFMAX(a, b);
3696 
3697  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3698  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3699  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3700  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3701  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3702  int dir, j;
3703  for(dir=0; dir<2; dir++){
3704  for(i=0; i<2; i++){
3705  for(j=0; j<2; j++){
3708  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3709  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3710  }
3711  }
3712  }
3713  }
3714  }
3715  }
3716 
3717  if (estimate_qp(s, 0) < 0)
3718  return -1;
3719 
3720  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3721  s->pict_type == AV_PICTURE_TYPE_I &&
3722  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3723  s->qscale= 3; //reduce clipping problems
3724 
3725  if (s->out_format == FMT_MJPEG) {
3726  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3727  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3728 
3729  if (s->avctx->intra_matrix) {
3730  chroma_matrix =
3731  luma_matrix = s->avctx->intra_matrix;
3732  }
3733  if (s->avctx->chroma_intra_matrix)
3734  chroma_matrix = s->avctx->chroma_intra_matrix;
3735 
3736  /* for mjpeg, we do include qscale in the matrix */
3737  for(i=1;i<64;i++){
3738  int j = s->idsp.idct_permutation[i];
3739 
3740  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3741  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3742  }
3743  s->y_dc_scale_table=
3744  s->c_dc_scale_table = ff_mpeg12_dc_scale_table[s->intra_dc_precision];
3745  s->chroma_intra_matrix[0] =
3746  s->intra_matrix[0] = ff_mpeg12_dc_scale_table[s->intra_dc_precision][8];
3747  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3748  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3749  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3750  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3751  s->qscale= 8;
3752 
3753  if (s->codec_id == AV_CODEC_ID_AMV) {
3754  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3755  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3756  for (int i = 1; i < 64; i++) {
3757  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3758 
3759  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3760  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3761  }
3762  s->y_dc_scale_table = y;
3763  s->c_dc_scale_table = c;
3764  s->intra_matrix[0] = 13;
3765  s->chroma_intra_matrix[0] = 14;
3766  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3767  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3768  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3769  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3770  s->qscale = 8;
3771  }
3772  }
3773 
3774  if (s->pict_type == AV_PICTURE_TYPE_I) {
3775  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3776  } else {
3777  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3778  }
3779  s->cur_pic.ptr->f->pict_type = s->pict_type;
3780 
3781  if (s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3782  s->picture_in_gop_number=0;
3783 
3784  s->mb_x = s->mb_y = 0;
3785  s->last_bits= put_bits_count(&s->pb);
3786  switch(s->out_format) {
3787 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3788  case FMT_MJPEG:
3790  break;
3791 #endif
3792  case FMT_SPEEDHQ:
3793  if (CONFIG_SPEEDHQ_ENCODER)
3795  break;
3796  case FMT_H261:
3797  if (CONFIG_H261_ENCODER)
3799  break;
3800  case FMT_H263:
3801  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3803 #if CONFIG_MSMPEG4ENC
3804  else if (s->msmpeg4_version != MSMP4_UNUSED)
3806 #endif
3807  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3809  if (ret < 0)
3810  return ret;
3811  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3813  if (ret < 0)
3814  return ret;
3815  }
3816  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3818  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3820  else if (CONFIG_H263_ENCODER)
3822  break;
3823  case FMT_MPEG1:
3824  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3826  break;
3827  default:
3828  av_assert0(0);
3829  }
3830  bits= put_bits_count(&s->pb);
3831  s->header_bits= bits - s->last_bits;
3832 
3833  for(i=1; i<context_count; i++){
3834  update_duplicate_context_after_me(s->thread_context[i], s);
3835  }
3836  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3837  for(i=1; i<context_count; i++){
3838  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3839  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3840  merge_context_after_encode(s, s->thread_context[i]);
3841  }
3842  emms_c();
3843  return 0;
3844 }
3845 
3846 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3847  const int intra= s->mb_intra;
3848  int i;
3849 
3850  s->dct_count[intra]++;
3851 
3852  for(i=0; i<64; i++){
3853  int level= block[i];
3854 
3855  if(level){
3856  if(level>0){
3857  s->dct_error_sum[intra][i] += level;
3858  level -= s->dct_offset[intra][i];
3859  if(level<0) level=0;
3860  }else{
3861  s->dct_error_sum[intra][i] -= level;
3862  level += s->dct_offset[intra][i];
3863  if(level>0) level=0;
3864  }
3865  block[i]= level;
3866  }
3867  }
3868 }
3869 
3871  int16_t *block, int n,
3872  int qscale, int *overflow){
3873  const int *qmat;
3874  const uint16_t *matrix;
3875  const uint8_t *scantable;
3876  const uint8_t *perm_scantable;
3877  int max=0;
3878  unsigned int threshold1, threshold2;
3879  int bias=0;
3880  int run_tab[65];
3881  int level_tab[65];
3882  int score_tab[65];
3883  int survivor[65];
3884  int survivor_count;
3885  int last_run=0;
3886  int last_level=0;
3887  int last_score= 0;
3888  int last_i;
3889  int coeff[2][64];
3890  int coeff_count[64];
3891  int qmul, qadd, start_i, last_non_zero, i, dc;
3892  const int esc_length= s->ac_esc_length;
3893  uint8_t * length;
3894  uint8_t * last_length;
3895  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3896  int mpeg2_qscale;
3897 
3898  s->fdsp.fdct(block);
3899 
3900  if(s->dct_error_sum)
3901  s->denoise_dct(s, block);
3902  qmul= qscale*16;
3903  qadd= ((qscale-1)|1)*8;
3904 
3905  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3906  else mpeg2_qscale = qscale << 1;
3907 
3908  if (s->mb_intra) {
3909  int q;
3910  scantable= s->intra_scantable.scantable;
3911  perm_scantable= s->intra_scantable.permutated;
3912  if (!s->h263_aic) {
3913  if (n < 4)
3914  q = s->y_dc_scale;
3915  else
3916  q = s->c_dc_scale;
3917  q = q << 3;
3918  } else{
3919  /* For AIC we skip quant/dequant of INTRADC */
3920  q = 1 << 3;
3921  qadd=0;
3922  }
3923 
3924  /* note: block[0] is assumed to be positive */
3925  block[0] = (block[0] + (q >> 1)) / q;
3926  start_i = 1;
3927  last_non_zero = 0;
3928  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3929  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3930  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3931  bias= 1<<(QMAT_SHIFT-1);
3932 
3933  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3934  length = s->intra_chroma_ac_vlc_length;
3935  last_length= s->intra_chroma_ac_vlc_last_length;
3936  } else {
3937  length = s->intra_ac_vlc_length;
3938  last_length= s->intra_ac_vlc_last_length;
3939  }
3940  } else {
3941  scantable= s->inter_scantable.scantable;
3942  perm_scantable= s->inter_scantable.permutated;
3943  start_i = 0;
3944  last_non_zero = -1;
3945  qmat = s->q_inter_matrix[qscale];
3946  matrix = s->inter_matrix;
3947  length = s->inter_ac_vlc_length;
3948  last_length= s->inter_ac_vlc_last_length;
3949  }
3950  last_i= start_i;
3951 
3952  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3953  threshold2= (threshold1<<1);
3954 
3955  for(i=63; i>=start_i; i--) {
3956  const int j = scantable[i];
3957  int level = block[j] * qmat[j];
3958 
3959  if(((unsigned)(level+threshold1))>threshold2){
3960  last_non_zero = i;
3961  break;
3962  }
3963  }
3964 
3965  for(i=start_i; i<=last_non_zero; i++) {
3966  const int j = scantable[i];
3967  int level = block[j] * qmat[j];
3968 
3969 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3970 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3971  if(((unsigned)(level+threshold1))>threshold2){
3972  if(level>0){
3973  level= (bias + level)>>QMAT_SHIFT;
3974  coeff[0][i]= level;
3975  coeff[1][i]= level-1;
3976 // coeff[2][k]= level-2;
3977  }else{
3978  level= (bias - level)>>QMAT_SHIFT;
3979  coeff[0][i]= -level;
3980  coeff[1][i]= -level+1;
3981 // coeff[2][k]= -level+2;
3982  }
3983  coeff_count[i]= FFMIN(level, 2);
3984  av_assert2(coeff_count[i]);
3985  max |=level;
3986  }else{
3987  coeff[0][i]= (level>>31)|1;
3988  coeff_count[i]= 1;
3989  }
3990  }
3991 
3992  *overflow= s->max_qcoeff < max; //overflow might have happened
3993 
3994  if(last_non_zero < start_i){
3995  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3996  return last_non_zero;
3997  }
3998 
3999  score_tab[start_i]= 0;
4000  survivor[0]= start_i;
4001  survivor_count= 1;
4002 
4003  for(i=start_i; i<=last_non_zero; i++){
4004  int level_index, j, zero_distortion;
4005  int dct_coeff= FFABS(block[ scantable[i] ]);
4006  int best_score=256*256*256*120;
4007 
4008  if (s->fdsp.fdct == ff_fdct_ifast)
4009  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4010  zero_distortion= dct_coeff*dct_coeff;
4011 
4012  for(level_index=0; level_index < coeff_count[i]; level_index++){
4013  int distortion;
4014  int level= coeff[level_index][i];
4015  const int alevel= FFABS(level);
4016  int unquant_coeff;
4017 
4018  av_assert2(level);
4019 
4020  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4021  unquant_coeff= alevel*qmul + qadd;
4022  } else if(s->out_format == FMT_MJPEG) {
4023  j = s->idsp.idct_permutation[scantable[i]];
4024  unquant_coeff = alevel * matrix[j] * 8;
4025  }else{ // MPEG-1
4026  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4027  if(s->mb_intra){
4028  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4029  unquant_coeff = (unquant_coeff - 1) | 1;
4030  }else{
4031  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4032  unquant_coeff = (unquant_coeff - 1) | 1;
4033  }
4034  unquant_coeff<<= 3;
4035  }
4036 
4037  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4038  level+=64;
4039  if((level&(~127)) == 0){
4040  for(j=survivor_count-1; j>=0; j--){
4041  int run= i - survivor[j];
4042  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4043  score += score_tab[i-run];
4044 
4045  if(score < best_score){
4046  best_score= score;
4047  run_tab[i+1]= run;
4048  level_tab[i+1]= level-64;
4049  }
4050  }
4051 
4052  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4053  for(j=survivor_count-1; j>=0; j--){
4054  int run= i - survivor[j];
4055  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4056  score += score_tab[i-run];
4057  if(score < last_score){
4058  last_score= score;
4059  last_run= run;
4060  last_level= level-64;
4061  last_i= i+1;
4062  }
4063  }
4064  }
4065  }else{
4066  distortion += esc_length*lambda;
4067  for(j=survivor_count-1; j>=0; j--){
4068  int run= i - survivor[j];
4069  int score= distortion + score_tab[i-run];
4070 
4071  if(score < best_score){
4072  best_score= score;
4073  run_tab[i+1]= run;
4074  level_tab[i+1]= level-64;
4075  }
4076  }
4077 
4078  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4079  for(j=survivor_count-1; j>=0; j--){
4080  int run= i - survivor[j];
4081  int score= distortion + score_tab[i-run];
4082  if(score < last_score){
4083  last_score= score;
4084  last_run= run;
4085  last_level= level-64;
4086  last_i= i+1;
4087  }
4088  }
4089  }
4090  }
4091  }
4092 
4093  score_tab[i+1]= best_score;
4094 
4095  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4096  if(last_non_zero <= 27){
4097  for(; survivor_count; survivor_count--){
4098  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4099  break;
4100  }
4101  }else{
4102  for(; survivor_count; survivor_count--){
4103  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4104  break;
4105  }
4106  }
4107 
4108  survivor[ survivor_count++ ]= i+1;
4109  }
4110 
4111  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4112  last_score= 256*256*256*120;
4113  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4114  int score= score_tab[i];
4115  if (i)
4116  score += lambda * 2; // FIXME more exact?
4117 
4118  if(score < last_score){
4119  last_score= score;
4120  last_i= i;
4121  last_level= level_tab[i];
4122  last_run= run_tab[i];
4123  }
4124  }
4125  }
4126 
4127  s->coded_score[n] = last_score;
4128 
4129  dc= FFABS(block[0]);
4130  last_non_zero= last_i - 1;
4131  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4132 
4133  if(last_non_zero < start_i)
4134  return last_non_zero;
4135 
4136  if(last_non_zero == 0 && start_i == 0){
4137  int best_level= 0;
4138  int best_score= dc * dc;
4139 
4140  for(i=0; i<coeff_count[0]; i++){
4141  int level= coeff[i][0];
4142  int alevel= FFABS(level);
4143  int unquant_coeff, score, distortion;
4144 
4145  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4146  unquant_coeff= (alevel*qmul + qadd)>>3;
4147  } else{ // MPEG-1
4148  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4149  unquant_coeff = (unquant_coeff - 1) | 1;
4150  }
4151  unquant_coeff = (unquant_coeff + 4) >> 3;
4152  unquant_coeff<<= 3 + 3;
4153 
4154  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4155  level+=64;
4156  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4157  else score= distortion + esc_length*lambda;
4158 
4159  if(score < best_score){
4160  best_score= score;
4161  best_level= level - 64;
4162  }
4163  }
4164  block[0]= best_level;
4165  s->coded_score[n] = best_score - dc*dc;
4166  if(best_level == 0) return -1;
4167  else return last_non_zero;
4168  }
4169 
4170  i= last_i;
4171  av_assert2(last_level);
4172 
4173  block[ perm_scantable[last_non_zero] ]= last_level;
4174  i -= last_run + 1;
4175 
4176  for(; i>start_i; i -= run_tab[i] + 1){
4177  block[ perm_scantable[i-1] ]= level_tab[i];
4178  }
4179 
4180  return last_non_zero;
4181 }
4182 
4183 static int16_t basis[64][64];
4184 
4185 static void build_basis(uint8_t *perm){
4186  int i, j, x, y;
4187  emms_c();
4188  for(i=0; i<8; i++){
4189  for(j=0; j<8; j++){
4190  for(y=0; y<8; y++){
4191  for(x=0; x<8; x++){
4192  double s= 0.25*(1<<BASIS_SHIFT);
4193  int index= 8*i + j;
4194  int perm_index= perm[index];
4195  if(i==0) s*= sqrt(0.5);
4196  if(j==0) s*= sqrt(0.5);
4197  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4198  }
4199  }
4200  }
4201  }
4202 }
4203 
4204 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4205  int16_t *block, int16_t *weight, int16_t *orig,
4206  int n, int qscale){
4207  int16_t rem[64];
4208  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4209  const uint8_t *scantable;
4210  const uint8_t *perm_scantable;
4211 // unsigned int threshold1, threshold2;
4212 // int bias=0;
4213  int run_tab[65];
4214  int prev_run=0;
4215  int prev_level=0;
4216  int qmul, qadd, start_i, last_non_zero, i, dc;
4217  const uint8_t *length;
4218  const uint8_t *last_length;
4219  int lambda;
4220  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4221 
4222  if(basis[0][0] == 0)
4223  build_basis(s->idsp.idct_permutation);
4224 
4225  qmul= qscale*2;
4226  qadd= (qscale-1)|1;
4227  if (s->mb_intra) {
4228  scantable= s->intra_scantable.scantable;
4229  perm_scantable= s->intra_scantable.permutated;
4230  if (!s->h263_aic) {
4231  if (n < 4)
4232  q = s->y_dc_scale;
4233  else
4234  q = s->c_dc_scale;
4235  } else{
4236  /* For AIC we skip quant/dequant of INTRADC */
4237  q = 1;
4238  qadd=0;
4239  }
4240  q <<= RECON_SHIFT-3;
4241  /* note: block[0] is assumed to be positive */
4242  dc= block[0]*q;
4243 // block[0] = (block[0] + (q >> 1)) / q;
4244  start_i = 1;
4245 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4246 // bias= 1<<(QMAT_SHIFT-1);
4247  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4248  length = s->intra_chroma_ac_vlc_length;
4249  last_length= s->intra_chroma_ac_vlc_last_length;
4250  } else {
4251  length = s->intra_ac_vlc_length;
4252  last_length= s->intra_ac_vlc_last_length;
4253  }
4254  } else {
4255  scantable= s->inter_scantable.scantable;
4256  perm_scantable= s->inter_scantable.permutated;
4257  dc= 0;
4258  start_i = 0;
4259  length = s->inter_ac_vlc_length;
4260  last_length= s->inter_ac_vlc_last_length;
4261  }
4262  last_non_zero = s->block_last_index[n];
4263 
4264  dc += (1<<(RECON_SHIFT-1));
4265  for(i=0; i<64; i++){
4266  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4267  }
4268 
4269  sum=0;
4270  for(i=0; i<64; i++){
4271  int one= 36;
4272  int qns=4;
4273  int w;
4274 
4275  w= FFABS(weight[i]) + qns*one;
4276  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4277 
4278  weight[i] = w;
4279 // w=weight[i] = (63*qns + (w/2)) / w;
4280 
4281  av_assert2(w>0);
4282  av_assert2(w<(1<<6));
4283  sum += w*w;
4284  }
4285  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4286 
4287  run=0;
4288  rle_index=0;
4289  for(i=start_i; i<=last_non_zero; i++){
4290  int j= perm_scantable[i];
4291  const int level= block[j];
4292  int coeff;
4293 
4294  if(level){
4295  if(level<0) coeff= qmul*level - qadd;
4296  else coeff= qmul*level + qadd;
4297  run_tab[rle_index++]=run;
4298  run=0;
4299 
4300  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4301  }else{
4302  run++;
4303  }
4304  }
4305 
4306  for(;;){
4307  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4308  int best_coeff=0;
4309  int best_change=0;
4310  int run2, best_unquant_change=0, analyze_gradient;
4311  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4312 
4313  if(analyze_gradient){
4314  for(i=0; i<64; i++){
4315  int w= weight[i];
4316 
4317  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4318  }
4319  s->fdsp.fdct(d1);
4320  }
4321 
4322  if(start_i){
4323  const int level= block[0];
4324  int change, old_coeff;
4325 
4326  av_assert2(s->mb_intra);
4327 
4328  old_coeff= q*level;
4329 
4330  for(change=-1; change<=1; change+=2){
4331  int new_level= level + change;
4332  int score, new_coeff;
4333 
4334  new_coeff= q*new_level;
4335  if(new_coeff >= 2048 || new_coeff < 0)
4336  continue;
4337 
4338  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4339  new_coeff - old_coeff);
4340  if(score<best_score){
4341  best_score= score;
4342  best_coeff= 0;
4343  best_change= change;
4344  best_unquant_change= new_coeff - old_coeff;
4345  }
4346  }
4347  }
4348 
4349  run=0;
4350  rle_index=0;
4351  run2= run_tab[rle_index++];
4352  prev_level=0;
4353  prev_run=0;
4354 
4355  for(i=start_i; i<64; i++){
4356  int j= perm_scantable[i];
4357  const int level= block[j];
4358  int change, old_coeff;
4359 
4360  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4361  break;
4362 
4363  if(level){
4364  if(level<0) old_coeff= qmul*level - qadd;
4365  else old_coeff= qmul*level + qadd;
4366  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4367  }else{
4368  old_coeff=0;
4369  run2--;
4370  av_assert2(run2>=0 || i >= last_non_zero );
4371  }
4372 
4373  for(change=-1; change<=1; change+=2){
4374  int new_level= level + change;
4375  int score, new_coeff, unquant_change;
4376 
4377  score=0;
4378  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4379  continue;
4380 
4381  if(new_level){
4382  if(new_level<0) new_coeff= qmul*new_level - qadd;
4383  else new_coeff= qmul*new_level + qadd;
4384  if(new_coeff >= 2048 || new_coeff <= -2048)
4385  continue;
4386  //FIXME check for overflow
4387 
4388  if(level){
4389  if(level < 63 && level > -63){
4390  if(i < last_non_zero)
4391  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4392  - length[UNI_AC_ENC_INDEX(run, level+64)];
4393  else
4394  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4395  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4396  }
4397  }else{
4398  av_assert2(FFABS(new_level)==1);
4399 
4400  if(analyze_gradient){
4401  int g= d1[ scantable[i] ];
4402  if(g && (g^new_level) >= 0)
4403  continue;
4404  }
4405 
4406  if(i < last_non_zero){
4407  int next_i= i + run2 + 1;
4408  int next_level= block[ perm_scantable[next_i] ] + 64;
4409 
4410  if(next_level&(~127))
4411  next_level= 0;
4412 
4413  if(next_i < last_non_zero)
4414  score += length[UNI_AC_ENC_INDEX(run, 65)]
4415  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4416  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4417  else
4418  score += length[UNI_AC_ENC_INDEX(run, 65)]
4419  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4420  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4421  }else{
4422  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4423  if(prev_level){
4424  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4425  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4426  }
4427  }
4428  }
4429  }else{
4430  new_coeff=0;
4431  av_assert2(FFABS(level)==1);
4432 
4433  if(i < last_non_zero){
4434  int next_i= i + run2 + 1;
4435  int next_level= block[ perm_scantable[next_i] ] + 64;
4436 
4437  if(next_level&(~127))
4438  next_level= 0;
4439 
4440  if(next_i < last_non_zero)
4441  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4442  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4443  - length[UNI_AC_ENC_INDEX(run, 65)];
4444  else
4445  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4446  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4447  - length[UNI_AC_ENC_INDEX(run, 65)];
4448  }else{
4449  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4450  if(prev_level){
4451  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4452  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4453  }
4454  }
4455  }
4456 
4457  score *= lambda;
4458 
4459  unquant_change= new_coeff - old_coeff;
4460  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4461 
4462  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4463  unquant_change);
4464  if(score<best_score){
4465  best_score= score;
4466  best_coeff= i;
4467  best_change= change;
4468  best_unquant_change= unquant_change;
4469  }
4470  }
4471  if(level){
4472  prev_level= level + 64;
4473  if(prev_level&(~127))
4474  prev_level= 0;
4475  prev_run= run;
4476  run=0;
4477  }else{
4478  run++;
4479  }
4480  }
4481 
4482  if(best_change){
4483  int j= perm_scantable[ best_coeff ];
4484 
4485  block[j] += best_change;
4486 
4487  if(best_coeff > last_non_zero){
4488  last_non_zero= best_coeff;
4489  av_assert2(block[j]);
4490  }else{
4491  for(; last_non_zero>=start_i; last_non_zero--){
4492  if(block[perm_scantable[last_non_zero]])
4493  break;
4494  }
4495  }
4496 
4497  run=0;
4498  rle_index=0;
4499  for(i=start_i; i<=last_non_zero; i++){
4500  int j= perm_scantable[i];
4501  const int level= block[j];
4502 
4503  if(level){
4504  run_tab[rle_index++]=run;
4505  run=0;
4506  }else{
4507  run++;
4508  }
4509  }
4510 
4511  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4512  }else{
4513  break;
4514  }
4515  }
4516 
4517  return last_non_zero;
4518 }
4519 
4520 /**
4521  * Permute an 8x8 block according to permutation.
4522  * @param block the block which will be permuted according to
4523  * the given permutation vector
4524  * @param permutation the permutation vector
4525  * @param last the last non zero coefficient in scantable order, used to
4526  * speed the permutation up
4527  * @param scantable the used scantable, this is only used to speed the
4528  * permutation up, the block is not (inverse) permutated
4529  * to scantable order!
4530  */
4531 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4532  const uint8_t *scantable, int last)
4533 {
4534  int i;
4535  int16_t temp[64];
4536 
4537  if (last <= 0)
4538  return;
4539  //FIXME it is ok but not clean and might fail for some permutations
4540  // if (permutation[1] == 1)
4541  // return;
4542 
4543  for (i = 0; i <= last; i++) {
4544  const int j = scantable[i];
4545  temp[j] = block[j];
4546  block[j] = 0;
4547  }
4548 
4549  for (i = 0; i <= last; i++) {
4550  const int j = scantable[i];
4551  const int perm_j = permutation[j];
4552  block[perm_j] = temp[j];
4553  }
4554 }
4555 
4557  int16_t *block, int n,
4558  int qscale, int *overflow)
4559 {
4560  int i, j, level, last_non_zero, q, start_i;
4561  const int *qmat;
4562  const uint8_t *scantable;
4563  int bias;
4564  int max=0;
4565  unsigned int threshold1, threshold2;
4566 
4567  s->fdsp.fdct(block);
4568 
4569  if(s->dct_error_sum)
4570  s->denoise_dct(s, block);
4571 
4572  if (s->mb_intra) {
4573  scantable= s->intra_scantable.scantable;
4574  if (!s->h263_aic) {
4575  if (n < 4)
4576  q = s->y_dc_scale;
4577  else
4578  q = s->c_dc_scale;
4579  q = q << 3;
4580  } else
4581  /* For AIC we skip quant/dequant of INTRADC */
4582  q = 1 << 3;
4583 
4584  /* note: block[0] is assumed to be positive */
4585  block[0] = (block[0] + (q >> 1)) / q;
4586  start_i = 1;
4587  last_non_zero = 0;
4588  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4589  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4590  } else {
4591  scantable= s->inter_scantable.scantable;
4592  start_i = 0;
4593  last_non_zero = -1;
4594  qmat = s->q_inter_matrix[qscale];
4595  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4596  }
4597  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4598  threshold2= (threshold1<<1);
4599  for(i=63;i>=start_i;i--) {
4600  j = scantable[i];
4601  level = block[j] * qmat[j];
4602 
4603  if(((unsigned)(level+threshold1))>threshold2){
4604  last_non_zero = i;
4605  break;
4606  }else{
4607  block[j]=0;
4608  }
4609  }
4610  for(i=start_i; i<=last_non_zero; i++) {
4611  j = scantable[i];
4612  level = block[j] * qmat[j];
4613 
4614 // if( bias+level >= (1<<QMAT_SHIFT)
4615 // || bias-level >= (1<<QMAT_SHIFT)){
4616  if(((unsigned)(level+threshold1))>threshold2){
4617  if(level>0){
4618  level= (bias + level)>>QMAT_SHIFT;
4619  block[j]= level;
4620  }else{
4621  level= (bias - level)>>QMAT_SHIFT;
4622  block[j]= -level;
4623  }
4624  max |=level;
4625  }else{
4626  block[j]=0;
4627  }
4628  }
4629  *overflow= s->max_qcoeff < max; //overflow might have happened
4630 
4631  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4632  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4633  ff_block_permute(block, s->idsp.idct_permutation,
4634  scantable, last_non_zero);
4635 
4636  return last_non_zero;
4637 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1319
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:346
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:30
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:699
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:155
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:235
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:427
encode_picture
static int encode_picture(MpegEncContext *s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3555
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegvideoenc.h)
Definition: mpegvideo.h:291
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:48
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:98
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:105
level
uint8_t level
Definition: svq3.c:205
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:390
av_clip
#define av_clip
Definition: common.h:100
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:426
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3541
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideoenc.h:158
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:541
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:201
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
r
const char * r
Definition: vf_curves.c:127
acc
int acc
Definition: yuv2rgb.c:553
ff_h261_encode_init
av_cold int ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:371
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:819
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:425
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:222
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:230
mem_internal.h
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:495
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:288
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1299
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1656
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:143
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s)
Definition: wmv2enc.c:97
thread.h
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2488
matrix
Definition: vc1dsp.c:43
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s)
Definition: h261enc.c:55
src1
const pixel * src1
Definition: h264pred_template.c:421
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
mpegvideoenc.h
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2635
mpv_reconstruct_mb_template.c
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
COPY
#define COPY(a)
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4183
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:974
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2688
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:834
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1717
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:264
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:391
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:41
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:332
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:471
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:206
w
uint8_t w
Definition: llviddspenc.c:38
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.h:356
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:48
internal.h
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:234
AVPacket::data
uint8_t * data
Definition: packet.h:520
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:374
AVOption
AVOption.
Definition: opt.h:357
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
data
const char data[16]
Definition: mxf.c:148
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:207
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
prepare_picture
static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1108
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:219
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:294
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:808
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:832
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:386
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:538
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:58
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:491
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2669
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2073
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:148
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:962
wmv2enc.h
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:64
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1263
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s)
Definition: flvenc.c:28
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:601
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:575
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:52
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:127
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:36
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:904
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
MpegEncContext::encoding_error
uint64_t encoding_error[MPV_MAX_PLANES]
Definition: mpegvideo.h:256
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1753
skip_check
static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1278
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:84
sp5x.h
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:66
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3505
FDCTDSPContext
Definition: fdctdsp.h:28
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:831
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
mpeg12enc.h
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3472
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:179
frame_start
static void frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1738
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:465
fail
#define fail()
Definition: checkasm.h:185
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:139
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:67
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1008
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:43
perm
perm
Definition: f_perms.c:75
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1231
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:61
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:318
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:269
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:451
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:330
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:45
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2799
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:50
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:523
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:889
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:344
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:786
RateControlContext
rate control context.
Definition: ratecontrol.h:60
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:240
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2776
av_cold
#define av_cold
Definition: attributes.h:90
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:97
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_enc.c:1050
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4185
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1067
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:462
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:261
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:863
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1491
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1575
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:224
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1292
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4531
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1527
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:270
PutBitContext
Definition: put_bits.h:50
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:46
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:855
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2713
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
ff_mpv_alloc_pic_pool
av_cold FFRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1277
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:394
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:51
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:486
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2756
run
uint8_t run
Definition: svq3.c:204
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:288
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:330
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:229
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:47
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo_enc.c:253
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:865
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:275
ff_dct_encode_init
av_cold void ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:294
mathops.h
dct_quantize_c
static int dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4556
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:347
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3471
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:894
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:703
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:990
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:963
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:142
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1327
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:282
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1314
ff_mjpeg_amv_encode_picture_header
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:93
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
AVOnce
#define AVOnce
Definition: thread.h:202
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1050
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1563
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:273
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:841
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:279
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3846
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:411
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1327
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:544
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:818
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1402
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:131
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1334
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2097
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
f
f
Definition: af_crystalizer.c:121
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3478
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:290
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:54
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:521
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1031
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1140
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:1983
shift
static int shift(int a, int b)
Definition: bonk.c:261
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:604
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1065
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:202
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:55
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:331
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:422
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:281
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:40
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:99
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:88
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:60
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:280
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:519
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:265
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:495
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:526
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:285
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:89
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:37
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:53
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MpegEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:42
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:196
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:138
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:463
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
Definition: msmpeg4enc.c:219
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:300
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:252
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:467
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:461
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:513
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3870
get_intra_count
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1081
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2833
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:35
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1340
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:997
src2
const pixel * src2
Definition: h264pred_template.c:422
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:393
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:275
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:288
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:105
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:833
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:633
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:197
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:905
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2498
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:308
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:247
AVCodecContext::height
int height
Definition: avcodec.h:618
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:508
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:352
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:291
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1379
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2563
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:427
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:94
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:52
mpeg12data.h
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:871
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1705
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:205
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:464
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:862
AVCodecContext
main external API structure.
Definition: avcodec.h:445
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:96
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:890
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1353
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1256
AVRational::den
int den
Denominator.
Definition: rational.h:60
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1622
set_bframe_chain_length
static int set_bframe_chain_length(MpegEncContext *s)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1492
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:848
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1681
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:263
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:863
flvenc.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:965
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s)
Definition: ituh263enc.c:109
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:37
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:280
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:795
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:547
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:112
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:310
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:541
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1306
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
ff_set_cmp
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:476
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4204
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s)
Definition: rv10enc.c:34
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2525
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1047
ff_h263_update_mb
void ff_h263_update_mb(MpegEncContext *s)
Definition: ituh263enc.c:692
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:964
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:497
mpeg4videodata.h
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2039
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:983
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:51
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
d
d
Definition: ffmpeg_filter.c:424
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2614
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:347
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:607
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:878
h
h
Definition: vp9dsp_template.c:2038
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:880
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:151
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:59
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:150
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
int
int
Definition: ffmpeg_filter.c:424
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:282
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_refstruct_unref
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:220
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:170
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:345
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s)
Definition: rv20enc.c:37
ff_refstruct_pool_get
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
pixblockdsp.h
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1603
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:975
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:460
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2736
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:694
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:424
intmath.h