FFmpeg
snowenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/emms.h"
22 #include "libavutil/intmath.h"
23 #include "libavutil/libm.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "avcodec.h"
29 #include "codec_internal.h"
30 #include "encode.h"
31 #include "internal.h" //For AVCodecInternal.recon_frame
32 #include "me_cmp.h"
33 #include "qpeldsp.h"
34 #include "snow_dwt.h"
35 #include "snow.h"
36 
37 #include "rangecoder.h"
38 #include "mathops.h"
39 
40 #include "mpegvideo.h"
41 #include "h263enc.h"
42 
43 #define FF_ME_ITER 3
44 
45 typedef struct SnowEncContext {
49 
50  int lambda;
51  int lambda2;
52  int pass1_rc;
53 
54  int pred;
55  int memc_only;
61 
63  MPVMainEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MPVEncContext, so this will be removed then (FIXME/XXX)
65 #define ME_CACHE_SIZE 1024
68 
70 
71  uint8_t *emu_edge_buffer;
72 
75 
76 static void init_ref(MotionEstContext *c, const uint8_t *const src[3],
77  uint8_t *const ref[3], uint8_t *const ref2[3],
78  int x, int y, int ref_index)
79 {
80  SnowContext *s = c->avctx->priv_data;
81  const int offset[3] = {
82  y*c-> stride + x,
83  ((y*c->uvstride + x) >> s->chroma_h_shift),
84  ((y*c->uvstride + x) >> s->chroma_h_shift),
85  };
86  for (int i = 0; i < 3; i++) {
87  c->src[0][i] = src [i];
88  c->ref[0][i] = ref [i] + offset[i];
89  }
90  av_assert2(!ref_index);
91 }
92 
93 static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
94 {
95  if (v) {
96  const int a = FFABS(v);
97  const int e = av_log2(a);
98  const int el = FFMIN(e, 10);
99  int i;
100 
101  put_rac(c, state + 0, 0);
102 
103  for (i = 0; i < el; i++)
104  put_rac(c, state + 1 + i, 1); //1..10
105  for(; i < e; i++)
106  put_rac(c, state + 1 + 9, 1); //1..10
107  put_rac(c, state + 1 + FFMIN(i, 9), 0);
108 
109  for (i = e - 1; i >= el; i--)
110  put_rac(c, state + 22 + 9, (a >> i) & 1); //22..31
111  for(; i >= 0; i--)
112  put_rac(c, state + 22 + i, (a >> i) & 1); //22..31
113 
114  if (is_signed)
115  put_rac(c, state + 11 + el, v < 0); //11..21
116  } else {
117  put_rac(c, state + 0, 1);
118  }
119 }
120 
121 static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
122 {
123  int r = log2 >= 0 ? 1<<log2 : 1;
124 
125  av_assert2(v >= 0);
126  av_assert2(log2 >= -4);
127 
128  while (v >= r) {
129  put_rac(c, state + 4 + log2, 1);
130  v -= r;
131  log2++;
132  if (log2 > 0) r += r;
133  }
134  put_rac(c, state + 4 + log2, 0);
135 
136  for (int i = log2 - 1; i >= 0; i--)
137  put_rac(c, state + 31 - i, (v >> i) & 1);
138 }
139 
141 {
142  int ret;
143 
144  frame->width = s->avctx->width + 2 * EDGE_WIDTH;
145  frame->height = s->avctx->height + 2 * EDGE_WIDTH;
146 
147  ret = ff_encode_alloc_frame(s->avctx, frame);
148  if (ret < 0)
149  return ret;
150  for (int i = 0; frame->data[i]; i++) {
151  int offset = (EDGE_WIDTH >> (i ? s->chroma_v_shift : 0)) *
152  frame->linesize[i] +
153  (EDGE_WIDTH >> (i ? s->chroma_h_shift : 0));
154  frame->data[i] += offset;
155  }
156  frame->width = s->avctx->width;
157  frame->height = s->avctx->height;
158 
159  return 0;
160 }
161 
163 {
164  SnowEncContext *const enc = avctx->priv_data;
165  SnowContext *const s = &enc->com;
166  MPVEncContext *const mpv = &enc->m.s;
167  int plane_index, ret;
168  int i;
169 
170  if (enc->pred == DWT_97
171  && (avctx->flags & AV_CODEC_FLAG_QSCALE)
172  && avctx->global_quality == 0){
173  av_log(avctx, AV_LOG_ERROR, "The 9/7 wavelet is incompatible with lossless mode.\n");
174  return AVERROR(EINVAL);
175  }
176 
177  s->spatial_decomposition_type = enc->pred; //FIXME add decorrelator type r transform_type
178 
179  s->mv_scale = (avctx->flags & AV_CODEC_FLAG_QPEL) ? 2 : 4;
180  s->block_max_depth= (avctx->flags & AV_CODEC_FLAG_4MV ) ? 1 : 0;
181 
182  for(plane_index=0; plane_index<3; plane_index++){
183  s->plane[plane_index].diag_mc= 1;
184  s->plane[plane_index].htaps= 6;
185  s->plane[plane_index].hcoeff[0]= 40;
186  s->plane[plane_index].hcoeff[1]= -10;
187  s->plane[plane_index].hcoeff[2]= 2;
188  s->plane[plane_index].fast_mc= 1;
189  }
190 
191  // Must be before ff_snow_common_init()
192  ff_hpeldsp_init(&s->hdsp, avctx->flags);
193  if ((ret = ff_snow_common_init(avctx)) < 0) {
194  return ret;
195  }
196 
197 #define mcf(dx,dy)\
198  enc->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
199  enc->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
200  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
201  enc->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
202  enc->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
203  s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
204 
205  mcf( 0, 0)
206  mcf( 4, 0)
207  mcf( 8, 0)
208  mcf(12, 0)
209  mcf( 0, 4)
210  mcf( 4, 4)
211  mcf( 8, 4)
212  mcf(12, 4)
213  mcf( 0, 8)
214  mcf( 4, 8)
215  mcf( 8, 8)
216  mcf(12, 8)
217  mcf( 0,12)
218  mcf( 4,12)
219  mcf( 8,12)
220  mcf(12,12)
221 
222  ff_me_cmp_init(&enc->mecc, avctx);
223  ret = ff_me_init(&mpv->me, avctx, &enc->mecc, 0);
224  if (ret < 0)
225  return ret;
226  ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx);
227 
229 
230  s->version=0;
231 
232  mpv->c.avctx = avctx;
233  enc->m.bit_rate = avctx->bit_rate;
234  enc->m.lmin = avctx->mb_lmin;
235  enc->m.lmax = avctx->mb_lmax;
236  mpv->c.mb_num = (avctx->width * avctx->height + 255) / 256; // For ratecontrol
237 
238  mpv->me.temp =
239  mpv->me.scratchpad = av_calloc(avctx->width + 64, 2*16*2*sizeof(uint8_t));
240  if (!mpv->me.scratchpad)
241  return AVERROR(ENOMEM);
242 
244 
245  s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES);
246 
247  if(avctx->flags&AV_CODEC_FLAG_PASS1){
248  if(!avctx->stats_out)
249  avctx->stats_out = av_mallocz(256);
250 
251  if (!avctx->stats_out)
252  return AVERROR(ENOMEM);
253  }
254  if((avctx->flags&AV_CODEC_FLAG_PASS2) || !(avctx->flags&AV_CODEC_FLAG_QSCALE)){
255  ret = ff_rate_control_init(&enc->m);
256  if(ret < 0)
257  return ret;
258  }
260 
261  switch(avctx->pix_fmt){
262  case AV_PIX_FMT_YUV444P:
263 // case AV_PIX_FMT_YUV422P:
264  case AV_PIX_FMT_YUV420P:
265 // case AV_PIX_FMT_YUV411P:
266  case AV_PIX_FMT_YUV410P:
267  s->nb_planes = 3;
268  s->colorspace_type= 0;
269  break;
270  case AV_PIX_FMT_GRAY8:
271  s->nb_planes = 1;
272  s->colorspace_type = 1;
273  break;
274 /* case AV_PIX_FMT_RGB32:
275  s->colorspace= 1;
276  break;*/
277  }
278 
279  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift,
280  &s->chroma_v_shift);
281  if (ret)
282  return ret;
283 
284  s->input_picture = av_frame_alloc();
285  if (!s->input_picture)
286  return AVERROR(ENOMEM);
287 
288  if ((ret = get_encode_buffer(s, s->input_picture)) < 0)
289  return ret;
290 
291  enc->emu_edge_buffer = av_calloc(avctx->width + 128, 2 * (2 * MB_SIZE + HTAPS_MAX - 1));
292  if (!enc->emu_edge_buffer)
293  return AVERROR(ENOMEM);
294 
295  if (enc->motion_est == FF_ME_ITER) {
296  int size= s->b_width * s->b_height << 2*s->block_max_depth;
297  for(i=0; i<s->max_ref_frames; i++){
298  s->ref_mvs[i] = av_calloc(size, sizeof(*s->ref_mvs[i]));
299  s->ref_scores[i] = av_calloc(size, sizeof(*s->ref_scores[i]));
300  if (!s->ref_mvs[i] || !s->ref_scores[i])
301  return AVERROR(ENOMEM);
302  }
303  }
304 
305  return 0;
306 }
307 
308 //near copy & paste from dsputil, FIXME
309 static int pix_sum(const uint8_t * pix, int line_size, int w, int h)
310 {
311  int s, i, j;
312 
313  s = 0;
314  for (i = 0; i < h; i++) {
315  for (j = 0; j < w; j++) {
316  s += pix[0];
317  pix ++;
318  }
319  pix += line_size - w;
320  }
321  return s;
322 }
323 
324 //near copy & paste from dsputil, FIXME
325 static int pix_norm1(const uint8_t * pix, int line_size, int w)
326 {
327  int s, i, j;
328  const uint32_t *sq = ff_square_tab + 256;
329 
330  s = 0;
331  for (i = 0; i < w; i++) {
332  for (j = 0; j < w; j ++) {
333  s += sq[pix[0]];
334  pix ++;
335  }
336  pix += line_size - w;
337  }
338  return s;
339 }
340 
341 static inline int get_penalty_factor(int lambda, int lambda2, int type){
342  switch(type&0xFF){
343  default:
344  case FF_CMP_SAD:
345  return lambda>>FF_LAMBDA_SHIFT;
346  case FF_CMP_DCT:
347  return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
348  case FF_CMP_W53:
349  return (4*lambda)>>(FF_LAMBDA_SHIFT);
350  case FF_CMP_W97:
351  return (2*lambda)>>(FF_LAMBDA_SHIFT);
352  case FF_CMP_SATD:
353  case FF_CMP_DCT264:
354  return (2*lambda)>>FF_LAMBDA_SHIFT;
355  case FF_CMP_RD:
356  case FF_CMP_PSNR:
357  case FF_CMP_SSE:
358  case FF_CMP_NSSE:
359  return lambda2>>FF_LAMBDA_SHIFT;
360  case FF_CMP_BIT:
361  return 1;
362  }
363 }
364 
365 //FIXME copy&paste
366 #define P_LEFT P[1]
367 #define P_TOP P[2]
368 #define P_TOPRIGHT P[3]
369 #define P_MEDIAN P[4]
370 #define P_MV1 P[9]
371 #define FLAG_QPEL 1 //must be 1
372 
373 static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
374 {
375  SnowContext *const s = &enc->com;
376  MotionEstContext *const c = &enc->m.s.me;
377  uint8_t p_buffer[1024];
378  uint8_t i_buffer[1024];
379  uint8_t p_state[sizeof(s->block_state)];
380  uint8_t i_state[sizeof(s->block_state)];
381  RangeCoder pc, ic;
382  uint8_t *pbbak= s->c.bytestream;
383  uint8_t *pbbak_start= s->c.bytestream_start;
384  int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
385  const int w= s->b_width << s->block_max_depth;
386  const int h= s->b_height << s->block_max_depth;
387  const int rem_depth= s->block_max_depth - level;
388  const int index= (x + y*w) << rem_depth;
389  const int block_w= 1<<(LOG2_MB_SIZE - level);
390  int trx= (x+1)<<rem_depth;
391  int try= (y+1)<<rem_depth;
392  const BlockNode *left = x ? &s->block[index-1] : &null_block;
393  const BlockNode *top = y ? &s->block[index-w] : &null_block;
394  const BlockNode *right = trx<w ? &s->block[index+1] : &null_block;
395  const BlockNode *bottom= try<h ? &s->block[index+w] : &null_block;
396  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
397  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
398  int pl = left->color[0];
399  int pcb= left->color[1];
400  int pcr= left->color[2];
401  int pmx, pmy;
402  int mx=0, my=0;
403  int l,cr,cb;
404  const int stride= s->current_picture->linesize[0];
405  const int uvstride= s->current_picture->linesize[1];
406  const uint8_t *const current_data[3] = { s->input_picture->data[0] + (x + y* stride)*block_w,
407  s->input_picture->data[1] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift),
408  s->input_picture->data[2] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)};
409  int P[10][2];
410  int16_t last_mv[3][2];
411  int qpel= !!(s->avctx->flags & AV_CODEC_FLAG_QPEL); //unused
412  const int shift= 1+qpel;
413  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
414  int mx_context= av_log2(2*FFABS(left->mx - top->mx));
415  int my_context= av_log2(2*FFABS(left->my - top->my));
416  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
417  int ref, best_ref, ref_score, ref_mx, ref_my;
418  int range = MAX_MV >> (1 + qpel);
419 
420  av_assert0(sizeof(s->block_state) >= 256);
421  if(s->keyframe){
422  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
423  return 0;
424  }
425 
426 // clip predictors / edge ?
427 
428  P_LEFT[0]= left->mx;
429  P_LEFT[1]= left->my;
430  P_TOP [0]= top->mx;
431  P_TOP [1]= top->my;
432  P_TOPRIGHT[0]= tr->mx;
433  P_TOPRIGHT[1]= tr->my;
434 
435  last_mv[0][0]= s->block[index].mx;
436  last_mv[0][1]= s->block[index].my;
437  last_mv[1][0]= right->mx;
438  last_mv[1][1]= right->my;
439  last_mv[2][0]= bottom->mx;
440  last_mv[2][1]= bottom->my;
441 
442  enc->m.s.c.mb_stride = 2;
443  enc->m.s.c.mb_x =
444  enc->m.s.c.mb_y = 0;
445  c->skip= 0;
446 
447  av_assert1(c-> stride == stride);
448  av_assert1(c->uvstride == uvstride);
449 
450  c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
451  c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
452  c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
453  c->current_mv_penalty = c->mv_penalty[enc->m.s.f_code=1] + MAX_DMV;
454 
455  c->xmin = - x*block_w - 16+3;
456  c->ymin = - y*block_w - 16+3;
457  c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
458  c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
459 
460  c->xmin = FFMAX(c->xmin,-range);
461  c->xmax = FFMIN(c->xmax, range);
462  c->ymin = FFMAX(c->ymin,-range);
463  c->ymax = FFMIN(c->ymax, range);
464 
465  if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
466  if(P_LEFT[1] > (c->ymax<<shift)) P_LEFT[1] = (c->ymax<<shift);
467  if(P_TOP[0] > (c->xmax<<shift)) P_TOP[0] = (c->xmax<<shift);
468  if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
469  if(P_TOPRIGHT[0] < (c->xmin * (1<<shift))) P_TOPRIGHT[0]= (c->xmin * (1<<shift));
470  if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); //due to pmx no clip
471  if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
472 
473  P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
474  P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
475 
476  if (!y) {
477  c->pred_x= P_LEFT[0];
478  c->pred_y= P_LEFT[1];
479  } else {
480  c->pred_x = P_MEDIAN[0];
481  c->pred_y = P_MEDIAN[1];
482  }
483 
484  score= INT_MAX;
485  best_ref= 0;
486  for(ref=0; ref<s->ref_frames; ref++){
487  init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
488 
489  ref_score = ff_epzs_motion_search(&enc->m.s, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
490  (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
491 
492  av_assert2(ref_mx >= c->xmin);
493  av_assert2(ref_mx <= c->xmax);
494  av_assert2(ref_my >= c->ymin);
495  av_assert2(ref_my <= c->ymax);
496 
497  ref_score = c->sub_motion_search(&enc->m.s, &ref_mx, &ref_my, ref_score,
498  0, 0, level-LOG2_MB_SIZE+4, block_w);
499  ref_score = ff_get_mb_score(&enc->m.s, ref_mx, ref_my, 0, 0,
500  level-LOG2_MB_SIZE+4, block_w, 0);
501  ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
502  if(s->ref_mvs[ref]){
503  s->ref_mvs[ref][index][0]= ref_mx;
504  s->ref_mvs[ref][index][1]= ref_my;
505  s->ref_scores[ref][index]= ref_score;
506  }
507  if(score > ref_score){
508  score= ref_score;
509  best_ref= ref;
510  mx= ref_mx;
511  my= ref_my;
512  }
513  }
514  //FIXME if mb_cmp != SSE then intra cannot be compared currently and mb_penalty vs. lambda2
515 
516  // subpel search
517  base_bits= get_rac_count(&s->c) - 8*(s->c.bytestream - s->c.bytestream_start);
518  pc= s->c;
519  pc.bytestream_start=
520  pc.bytestream= p_buffer; //FIXME end/start? and at the other stoo
521  memcpy(p_state, s->block_state, sizeof(s->block_state));
522 
523  if(level!=s->block_max_depth)
524  put_rac(&pc, &p_state[4 + s_context], 1);
525  put_rac(&pc, &p_state[1 + left->type + top->type], 0);
526  if(s->ref_frames > 1)
527  put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
528  pred_mv(s, &pmx, &pmy, best_ref, left, top, tr);
529  put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
530  put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
531  p_len= pc.bytestream - pc.bytestream_start;
532  score += (enc->lambda2*(get_rac_count(&pc)-base_bits))>>FF_LAMBDA_SHIFT;
533 
534  block_s= block_w*block_w;
535  sum = pix_sum(current_data[0], stride, block_w, block_w);
536  l= (sum + block_s/2)/block_s;
537  iscore = pix_norm1(current_data[0], stride, block_w) - 2*l*sum + l*l*block_s;
538 
539  if (s->nb_planes > 2) {
540  block_s= block_w*block_w>>(s->chroma_h_shift + s->chroma_v_shift);
541  sum = pix_sum(current_data[1], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
542  cb= (sum + block_s/2)/block_s;
543  // iscore += pix_norm1(&current_mb[1][0], uvstride, block_w>>1) - 2*cb*sum + cb*cb*block_s;
544  sum = pix_sum(current_data[2], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
545  cr= (sum + block_s/2)/block_s;
546  // iscore += pix_norm1(&current_mb[2][0], uvstride, block_w>>1) - 2*cr*sum + cr*cr*block_s;
547  }else
548  cb = cr = 0;
549 
550  ic= s->c;
551  ic.bytestream_start=
552  ic.bytestream= i_buffer; //FIXME end/start? and at the other stoo
553  memcpy(i_state, s->block_state, sizeof(s->block_state));
554  if(level!=s->block_max_depth)
555  put_rac(&ic, &i_state[4 + s_context], 1);
556  put_rac(&ic, &i_state[1 + left->type + top->type], 1);
557  put_symbol(&ic, &i_state[32], l-pl , 1);
558  if (s->nb_planes > 2) {
559  put_symbol(&ic, &i_state[64], cb-pcb, 1);
560  put_symbol(&ic, &i_state[96], cr-pcr, 1);
561  }
562  i_len= ic.bytestream - ic.bytestream_start;
563  iscore += (enc->lambda2*(get_rac_count(&ic)-base_bits))>>FF_LAMBDA_SHIFT;
564 
565  av_assert1(iscore < 255*255*256 + enc->lambda2*10);
566  av_assert1(iscore >= 0);
567  av_assert1(l>=0 && l<=255);
568  av_assert1(pl>=0 && pl<=255);
569 
570  if(level==0){
571  int varc= iscore >> 8;
572  int vard= score >> 8;
573  if (vard <= 64 || vard < varc)
574  c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
575  else
576  c->scene_change_score += enc->m.s.c.qscale;
577  }
578 
579  if(level!=s->block_max_depth){
580  put_rac(&s->c, &s->block_state[4 + s_context], 0);
581  score2 = encode_q_branch(enc, level+1, 2*x+0, 2*y+0);
582  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+0);
583  score2+= encode_q_branch(enc, level+1, 2*x+0, 2*y+1);
584  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+1);
585  score2+= enc->lambda2>>FF_LAMBDA_SHIFT; //FIXME exact split overhead
586 
587  if(score2 < score && score2 < iscore)
588  return score2;
589  }
590 
591  if(iscore < score){
592  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
593  memcpy(pbbak, i_buffer, i_len);
594  s->c= ic;
595  s->c.bytestream_start= pbbak_start;
596  s->c.bytestream= pbbak + i_len;
597  set_blocks(s, level, x, y, l, cb, cr, pmx, pmy, 0, BLOCK_INTRA);
598  memcpy(s->block_state, i_state, sizeof(s->block_state));
599  return iscore;
600  }else{
601  memcpy(pbbak, p_buffer, p_len);
602  s->c= pc;
603  s->c.bytestream_start= pbbak_start;
604  s->c.bytestream= pbbak + p_len;
605  set_blocks(s, level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
606  memcpy(s->block_state, p_state, sizeof(s->block_state));
607  return score;
608  }
609 }
610 
611 static void encode_q_branch2(SnowContext *s, int level, int x, int y){
612  const int w= s->b_width << s->block_max_depth;
613  const int rem_depth= s->block_max_depth - level;
614  const int index= (x + y*w) << rem_depth;
615  int trx= (x+1)<<rem_depth;
616  BlockNode *b= &s->block[index];
617  const BlockNode *left = x ? &s->block[index-1] : &null_block;
618  const BlockNode *top = y ? &s->block[index-w] : &null_block;
619  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
620  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
621  int pl = left->color[0];
622  int pcb= left->color[1];
623  int pcr= left->color[2];
624  int pmx, pmy;
625  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
626  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
627  int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
628  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
629 
630  if(s->keyframe){
631  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
632  return;
633  }
634 
635  if(level!=s->block_max_depth){
636  if(same_block(b,b+1) && same_block(b,b+w) && same_block(b,b+w+1)){
637  put_rac(&s->c, &s->block_state[4 + s_context], 1);
638  }else{
639  put_rac(&s->c, &s->block_state[4 + s_context], 0);
640  encode_q_branch2(s, level+1, 2*x+0, 2*y+0);
641  encode_q_branch2(s, level+1, 2*x+1, 2*y+0);
642  encode_q_branch2(s, level+1, 2*x+0, 2*y+1);
643  encode_q_branch2(s, level+1, 2*x+1, 2*y+1);
644  return;
645  }
646  }
647  if(b->type & BLOCK_INTRA){
648  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
649  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 1);
650  put_symbol(&s->c, &s->block_state[32], b->color[0]-pl , 1);
651  if (s->nb_planes > 2) {
652  put_symbol(&s->c, &s->block_state[64], b->color[1]-pcb, 1);
653  put_symbol(&s->c, &s->block_state[96], b->color[2]-pcr, 1);
654  }
655  set_blocks(s, level, x, y, b->color[0], b->color[1], b->color[2], pmx, pmy, 0, BLOCK_INTRA);
656  }else{
657  pred_mv(s, &pmx, &pmy, b->ref, left, top, tr);
658  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 0);
659  if(s->ref_frames > 1)
660  put_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], b->ref, 0);
661  put_symbol(&s->c, &s->block_state[128 + 32*mx_context], b->mx - pmx, 1);
662  put_symbol(&s->c, &s->block_state[128 + 32*my_context], b->my - pmy, 1);
663  set_blocks(s, level, x, y, pl, pcb, pcr, b->mx, b->my, b->ref, 0);
664  }
665 }
666 
667 static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
668 {
669  SnowContext *const s = &enc->com;
670  int i, x2, y2;
671  Plane *p= &s->plane[plane_index];
672  const int block_size = MB_SIZE >> s->block_max_depth;
673  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
674  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
675  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
676  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
677  const int ref_stride= s->current_picture->linesize[plane_index];
678  const uint8_t *src = s->input_picture->data[plane_index];
679  IDWTELEM *dst = enc->obmc_scratchpad + plane_index * block_size * block_size * 4; //FIXME change to unsigned
680  const int b_stride = s->b_width << s->block_max_depth;
681  const int w= p->width;
682  const int h= p->height;
683  int index= mb_x + mb_y*b_stride;
684  BlockNode *b= &s->block[index];
685  BlockNode backup= *b;
686  int ab=0;
687  int aa=0;
688 
689  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc stuff above
690 
691  b->type|= BLOCK_INTRA;
692  b->color[plane_index]= 0;
693  memset(dst, 0, obmc_stride*obmc_stride*sizeof(IDWTELEM));
694 
695  for(i=0; i<4; i++){
696  int mb_x2= mb_x + (i &1) - 1;
697  int mb_y2= mb_y + (i>>1) - 1;
698  int x= block_w*mb_x2 + block_w/2;
699  int y= block_h*mb_y2 + block_h/2;
700 
701  add_yblock(s, 0, NULL, dst + (i&1)*block_w + (i>>1)*obmc_stride*block_h, NULL, obmc,
702  x, y, block_w, block_h, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
703 
704  for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_h); y2++){
705  for(x2= FFMAX(x, 0); x2<FFMIN(w, x+block_w); x2++){
706  int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
707  int obmc_v= obmc[index];
708  int d;
709  if(y<0) obmc_v += obmc[index + block_h*obmc_stride];
710  if(x<0) obmc_v += obmc[index + block_w];
711  if(y+block_h>h) obmc_v += obmc[index - block_h*obmc_stride];
712  if(x+block_w>w) obmc_v += obmc[index - block_w];
713  //FIXME precalculate this or simplify it somehow else
714 
715  d = -dst[index] + (1<<(FRAC_BITS-1));
716  dst[index] = d;
717  ab += (src[x2 + y2*ref_stride] - (d>>FRAC_BITS)) * obmc_v;
718  aa += obmc_v * obmc_v; //FIXME precalculate this
719  }
720  }
721  }
722  *b= backup;
723 
724  return av_clip_uint8( ROUNDED_DIV((int64_t)ab<<LOG2_OBMC_MAX, aa) ); //FIXME we should not need clipping
725 }
726 
727 static inline int get_block_bits(SnowContext *s, int x, int y, int w){
728  const int b_stride = s->b_width << s->block_max_depth;
729  const int b_height = s->b_height<< s->block_max_depth;
730  int index= x + y*b_stride;
731  const BlockNode *b = &s->block[index];
732  const BlockNode *left = x ? &s->block[index-1] : &null_block;
733  const BlockNode *top = y ? &s->block[index-b_stride] : &null_block;
734  const BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
735  const BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
736  int dmx, dmy;
737 // int mx_context= av_log2(2*FFABS(left->mx - top->mx));
738 // int my_context= av_log2(2*FFABS(left->my - top->my));
739 
740  if(x<0 || x>=b_stride || y>=b_height)
741  return 0;
742 /*
743 1 0 0
744 01X 1-2 1
745 001XX 3-6 2-3
746 0001XXX 7-14 4-7
747 00001XXXX 15-30 8-15
748 */
749 //FIXME try accurate rate
750 //FIXME intra and inter predictors if surrounding blocks are not the same type
751  if(b->type & BLOCK_INTRA){
752  return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
753  + av_log2(2*FFABS(left->color[1] - b->color[1]))
754  + av_log2(2*FFABS(left->color[2] - b->color[2])));
755  }else{
756  pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
757  dmx-= b->mx;
758  dmy-= b->my;
759  return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
760  + av_log2(2*FFABS(dmy))
761  + av_log2(2*b->ref));
762  }
763 }
764 
765 static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
766  int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2])
767 {
768  SnowContext *const s = &enc->com;
769  Plane *p= &s->plane[plane_index];
770  const int block_size = MB_SIZE >> s->block_max_depth;
771  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
772  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
773  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
774  const int ref_stride= s->current_picture->linesize[plane_index];
775  uint8_t *dst= s->current_picture->data[plane_index];
776  const uint8_t *src = s->input_picture->data[plane_index];
777  IDWTELEM *pred = enc->obmc_scratchpad + plane_index * block_size * block_size * 4;
778  uint8_t *cur = s->scratchbuf;
779  uint8_t *tmp = enc->emu_edge_buffer;
780  const int b_stride = s->b_width << s->block_max_depth;
781  const int b_height = s->b_height<< s->block_max_depth;
782  const int w= p->width;
783  const int h= p->height;
784  int distortion;
785  int rate= 0;
786  const int penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
787  int sx= block_w*mb_x - block_w/2;
788  int sy= block_h*mb_y - block_h/2;
789  int x0= FFMAX(0,-sx);
790  int y0= FFMAX(0,-sy);
791  int x1= FFMIN(block_w*2, w-sx);
792  int y1= FFMIN(block_h*2, h-sy);
793  int i,x,y;
794 
795  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumptions below chckinhg only block_w
796 
797  ff_snow_pred_block(s, cur, tmp, ref_stride, sx, sy, block_w*2, block_h*2, &s->block[mb_x + mb_y*b_stride], plane_index, w, h);
798 
799  for(y=y0; y<y1; y++){
800  const uint8_t *obmc1= obmc_edged[y];
801  const IDWTELEM *pred1 = pred + y*obmc_stride;
802  uint8_t *cur1 = cur + y*ref_stride;
803  uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
804  for(x=x0; x<x1; x++){
805 #if FRAC_BITS >= LOG2_OBMC_MAX
806  int v = (cur1[x] * obmc1[x]) << (FRAC_BITS - LOG2_OBMC_MAX);
807 #else
808  int v = (cur1[x] * obmc1[x] + (1<<(LOG2_OBMC_MAX - FRAC_BITS-1))) >> (LOG2_OBMC_MAX - FRAC_BITS);
809 #endif
810  v = (v + pred1[x]) >> FRAC_BITS;
811  if(v&(~255)) v= ~(v>>31);
812  dst1[x] = v;
813  }
814  }
815 
816  /* copy the regions where obmc[] = (uint8_t)256 */
817  if(LOG2_OBMC_MAX == 8
818  && (mb_x == 0 || mb_x == b_stride-1)
819  && (mb_y == 0 || mb_y == b_height-1)){
820  if(mb_x == 0)
821  x1 = block_w;
822  else
823  x0 = block_w;
824  if(mb_y == 0)
825  y1 = block_h;
826  else
827  y0 = block_h;
828  for(y=y0; y<y1; y++)
829  memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
830  }
831 
832  if(block_w==16){
833  /* FIXME rearrange dsputil to fit 32x32 cmp functions */
834  /* FIXME check alignment of the cmp wavelet vs the encoding wavelet */
835  /* FIXME cmps overlap but do not cover the wavelet's whole support.
836  * So improving the score of one block is not strictly guaranteed
837  * to improve the score of the whole frame, thus iterative motion
838  * estimation does not always converge. */
839  if(s->avctx->me_cmp == FF_CMP_W97)
840  distortion = ff_w97_32_c(&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
841  else if(s->avctx->me_cmp == FF_CMP_W53)
842  distortion = ff_w53_32_c(&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
843  else{
844  distortion = 0;
845  for(i=0; i<4; i++){
846  int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
847  distortion += enc->m.s.me.me_cmp[0](&enc->m.s, src + off, dst + off, ref_stride, 16);
848  }
849  }
850  }else{
851  av_assert2(block_w==8);
852  distortion = enc->m.s.me.me_cmp[0](&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
853  }
854 
855  if(plane_index==0){
856  for(i=0; i<4; i++){
857 /* ..RRr
858  * .RXx.
859  * rxx..
860  */
861  rate += get_block_bits(s, mb_x + (i&1) - (i>>1), mb_y + (i>>1), 1);
862  }
863  if(mb_x == b_stride-2)
864  rate += get_block_bits(s, mb_x + 1, mb_y + 1, 1);
865  }
866  return distortion + rate*penalty_factor;
867 }
868 
869 static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
870 {
871  SnowContext *const s = &enc->com;
872  int i, y2;
873  Plane *p= &s->plane[plane_index];
874  const int block_size = MB_SIZE >> s->block_max_depth;
875  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
876  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
877  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
878  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
879  const int ref_stride= s->current_picture->linesize[plane_index];
880  uint8_t *dst= s->current_picture->data[plane_index];
881  const uint8_t *src = s->input_picture->data[plane_index];
882  //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst
883  // const has only been removed from zero_dst to suppress a warning
884  static IDWTELEM zero_dst[4096]; //FIXME
885  const int b_stride = s->b_width << s->block_max_depth;
886  const int w= p->width;
887  const int h= p->height;
888  int distortion= 0;
889  int rate= 0;
890  const int penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
891 
892  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumptions below
893 
894  for(i=0; i<9; i++){
895  int mb_x2= mb_x + (i%3) - 1;
896  int mb_y2= mb_y + (i/3) - 1;
897  int x= block_w*mb_x2 + block_w/2;
898  int y= block_h*mb_y2 + block_h/2;
899 
900  add_yblock(s, 0, NULL, zero_dst, dst, obmc,
901  x, y, block_w, block_h, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
902 
903  //FIXME find a cleaner/simpler way to skip the outside stuff
904  for(y2= y; y2<0; y2++)
905  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
906  for(y2= h; y2<y+block_h; y2++)
907  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
908  if(x<0){
909  for(y2= y; y2<y+block_h; y2++)
910  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, -x);
911  }
912  if(x+block_w > w){
913  for(y2= y; y2<y+block_h; y2++)
914  memcpy(dst + w + y2*ref_stride, src + w + y2*ref_stride, x+block_w - w);
915  }
916 
917  av_assert1(block_w== 8 || block_w==16);
918  distortion += enc->m.s.me.me_cmp[block_w==8](&enc->m.s, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
919  }
920 
921  if(plane_index==0){
922  BlockNode *b= &s->block[mb_x+mb_y*b_stride];
923  int merged= same_block(b,b+1) && same_block(b,b+b_stride) && same_block(b,b+b_stride+1);
924 
925 /* ..RRRr
926  * .RXXx.
927  * .RXXx.
928  * rxxx.
929  */
930  if(merged)
931  rate = get_block_bits(s, mb_x, mb_y, 2);
932  for(i=merged?4:0; i<9; i++){
933  static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
934  rate += get_block_bits(s, mb_x + dxy[i][0], mb_y + dxy[i][1], 1);
935  }
936  }
937  return distortion + rate*penalty_factor;
938 }
939 
940 static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
941  const int w= b->width;
942  const int h= b->height;
943  int x, y;
944 
945  if(1){
946  int run=0;
947  int *runs = s->run_buffer;
948  int run_index=0;
949  int max_index;
950 
951  for(y=0; y<h; y++){
952  for(x=0; x<w; x++){
953  int v, p=0;
954  int /*ll=0, */l=0, lt=0, t=0, rt=0;
955  v= src[x + y*stride];
956 
957  if(y){
958  t= src[x + (y-1)*stride];
959  if(x){
960  lt= src[x - 1 + (y-1)*stride];
961  }
962  if(x + 1 < w){
963  rt= src[x + 1 + (y-1)*stride];
964  }
965  }
966  if(x){
967  l= src[x - 1 + y*stride];
968  /*if(x > 1){
969  if(orientation==1) ll= src[y + (x-2)*stride];
970  else ll= src[x - 2 + y*stride];
971  }*/
972  }
973  if(parent){
974  int px= x>>1;
975  int py= y>>1;
976  if(px<b->parent->width && py<b->parent->height)
977  p= parent[px + py*2*stride];
978  }
979  if(!(/*ll|*/l|lt|t|rt|p)){
980  if(v){
981  runs[run_index++]= run;
982  run=0;
983  }else{
984  run++;
985  }
986  }
987  }
988  }
989  max_index= run_index;
990  runs[run_index++]= run;
991  run_index=0;
992  run= runs[run_index++];
993 
994  put_symbol2(&s->c, b->state[30], max_index, 0);
995  if(run_index <= max_index)
996  put_symbol2(&s->c, b->state[1], run, 3);
997 
998  for(y=0; y<h; y++){
999  if(s->c.bytestream_end - s->c.bytestream < w*40){
1000  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1001  return AVERROR(ENOMEM);
1002  }
1003  for(x=0; x<w; x++){
1004  int v, p=0;
1005  int /*ll=0, */l=0, lt=0, t=0, rt=0;
1006  v= src[x + y*stride];
1007 
1008  if(y){
1009  t= src[x + (y-1)*stride];
1010  if(x){
1011  lt= src[x - 1 + (y-1)*stride];
1012  }
1013  if(x + 1 < w){
1014  rt= src[x + 1 + (y-1)*stride];
1015  }
1016  }
1017  if(x){
1018  l= src[x - 1 + y*stride];
1019  /*if(x > 1){
1020  if(orientation==1) ll= src[y + (x-2)*stride];
1021  else ll= src[x - 2 + y*stride];
1022  }*/
1023  }
1024  if(parent){
1025  int px= x>>1;
1026  int py= y>>1;
1027  if(px<b->parent->width && py<b->parent->height)
1028  p= parent[px + py*2*stride];
1029  }
1030  if(/*ll|*/l|lt|t|rt|p){
1031  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1032 
1033  put_rac(&s->c, &b->state[0][context], !!v);
1034  }else{
1035  if(!run){
1036  run= runs[run_index++];
1037 
1038  if(run_index <= max_index)
1039  put_symbol2(&s->c, b->state[1], run, 3);
1040  av_assert2(v);
1041  }else{
1042  run--;
1043  av_assert2(!v);
1044  }
1045  }
1046  if(v){
1047  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1048  int l2= 2*FFABS(l) + (l<0);
1049  int t2= 2*FFABS(t) + (t<0);
1050 
1051  put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
1052  put_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l2&0xFF] + 3*ff_quant3bA[t2&0xFF]], v<0);
1053  }
1054  }
1055  }
1056  }
1057  return 0;
1058 }
1059 
1060 static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
1061 // encode_subband_qtree(s, b, src, parent, stride, orientation);
1062 // encode_subband_z0run(s, b, src, parent, stride, orientation);
1063  return encode_subband_c0run(s, b, src, parent, stride, orientation);
1064 // encode_subband_dzr(s, b, src, parent, stride, orientation);
1065 }
1066 
1067 static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3],
1068  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1069 {
1070  SnowContext *const s = &enc->com;
1071  const int b_stride= s->b_width << s->block_max_depth;
1072  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1073  BlockNode backup= *block;
1074  int rd;
1075 
1076  av_assert2(mb_x>=0 && mb_y>=0);
1077  av_assert2(mb_x<b_stride);
1078 
1079  block->color[0] = p[0];
1080  block->color[1] = p[1];
1081  block->color[2] = p[2];
1082  block->type |= BLOCK_INTRA;
1083 
1084  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged) + enc->intra_penalty;
1085 
1086 //FIXME chroma
1087  if(rd < *best_rd){
1088  *best_rd= rd;
1089  return 1;
1090  }else{
1091  *block= backup;
1092  return 0;
1093  }
1094 }
1095 
1096 /* special case for int[2] args we discard afterwards,
1097  * fixes compilation problem with gcc 2.95 */
1099  int mb_x, int mb_y, int p0, int p1,
1100  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1101 {
1102  SnowContext *const s = &enc->com;
1103  const int b_stride = s->b_width << s->block_max_depth;
1104  BlockNode *block = &s->block[mb_x + mb_y * b_stride];
1105  BlockNode backup = *block;
1106  unsigned value;
1107  int rd, index;
1108 
1109  av_assert2(mb_x >= 0 && mb_y >= 0);
1110  av_assert2(mb_x < b_stride);
1111 
1112  index = (p0 + 31 * p1) & (ME_CACHE_SIZE-1);
1113  value = enc->me_cache_generation + (p0 >> 10) + p1 * (1 << 6) + (block->ref << 12);
1114  if (enc->me_cache[index] == value)
1115  return 0;
1116  enc->me_cache[index] = value;
1117 
1118  block->mx = p0;
1119  block->my = p1;
1120  block->type &= ~BLOCK_INTRA;
1121 
1122  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged);
1123 
1124 //FIXME chroma
1125  if (rd < *best_rd) {
1126  *best_rd = rd;
1127  return 1;
1128  } else {
1129  *block = backup;
1130  return 0;
1131  }
1132 }
1133 
1134 static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y,
1135  int p0, int p1, int ref, int *best_rd)
1136 {
1137  SnowContext *const s = &enc->com;
1138  const int b_stride= s->b_width << s->block_max_depth;
1139  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1140  BlockNode backup[4];
1141  unsigned value;
1142  int rd, index;
1143 
1144  /* We don't initialize backup[] during variable declaration, because
1145  * that fails to compile on MSVC: "cannot convert from 'BlockNode' to
1146  * 'int16_t'". */
1147  backup[0] = block[0];
1148  backup[1] = block[1];
1149  backup[2] = block[b_stride];
1150  backup[3] = block[b_stride + 1];
1151 
1152  av_assert2(mb_x>=0 && mb_y>=0);
1153  av_assert2(mb_x<b_stride);
1154  av_assert2(((mb_x|mb_y)&1) == 0);
1155 
1156  index= (p0 + 31*p1) & (ME_CACHE_SIZE-1);
1157  value = enc->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12);
1158  if (enc->me_cache[index] == value)
1159  return 0;
1160  enc->me_cache[index] = value;
1161 
1162  block->mx= p0;
1163  block->my= p1;
1164  block->ref= ref;
1165  block->type &= ~BLOCK_INTRA;
1166  block[1]= block[b_stride]= block[b_stride+1]= *block;
1167 
1168  rd = get_4block_rd(enc, mb_x, mb_y, 0);
1169 
1170 //FIXME chroma
1171  if(rd < *best_rd){
1172  *best_rd= rd;
1173  return 1;
1174  }else{
1175  block[0]= backup[0];
1176  block[1]= backup[1];
1177  block[b_stride]= backup[2];
1178  block[b_stride+1]= backup[3];
1179  return 0;
1180  }
1181 }
1182 
1183 static void iterative_me(SnowEncContext *enc)
1184 {
1185  SnowContext *const s = &enc->com;
1186  int pass, mb_x, mb_y;
1187  const int b_width = s->b_width << s->block_max_depth;
1188  const int b_height= s->b_height << s->block_max_depth;
1189  const int b_stride= b_width;
1190  int color[3];
1191 
1192  {
1193  RangeCoder r = s->c;
1194  uint8_t state[sizeof(s->block_state)];
1195  memcpy(state, s->block_state, sizeof(s->block_state));
1196  for(mb_y= 0; mb_y<s->b_height; mb_y++)
1197  for(mb_x= 0; mb_x<s->b_width; mb_x++)
1198  encode_q_branch(enc, 0, mb_x, mb_y);
1199  s->c = r;
1200  memcpy(s->block_state, state, sizeof(s->block_state));
1201  }
1202 
1203  for(pass=0; pass<25; pass++){
1204  int change= 0;
1205 
1206  for(mb_y= 0; mb_y<b_height; mb_y++){
1207  for(mb_x= 0; mb_x<b_width; mb_x++){
1208  int dia_change, i, j, ref;
1209  int best_rd= INT_MAX, ref_rd;
1210  BlockNode backup, ref_b;
1211  const int index= mb_x + mb_y * b_stride;
1212  BlockNode *block= &s->block[index];
1213  BlockNode *tb = mb_y ? &s->block[index-b_stride ] : NULL;
1214  BlockNode *lb = mb_x ? &s->block[index -1] : NULL;
1215  BlockNode *rb = mb_x+1<b_width ? &s->block[index +1] : NULL;
1216  BlockNode *bb = mb_y+1<b_height ? &s->block[index+b_stride ] : NULL;
1217  BlockNode *tlb= mb_x && mb_y ? &s->block[index-b_stride-1] : NULL;
1218  BlockNode *trb= mb_x+1<b_width && mb_y ? &s->block[index-b_stride+1] : NULL;
1219  BlockNode *blb= mb_x && mb_y+1<b_height ? &s->block[index+b_stride-1] : NULL;
1220  BlockNode *brb= mb_x+1<b_width && mb_y+1<b_height ? &s->block[index+b_stride+1] : NULL;
1221  const int b_w= (MB_SIZE >> s->block_max_depth);
1222  uint8_t obmc_edged[MB_SIZE * 2][MB_SIZE * 2];
1223 
1224  if(pass && (block->type & BLOCK_OPT))
1225  continue;
1226  block->type |= BLOCK_OPT;
1227 
1228  backup= *block;
1229 
1230  if (!enc->me_cache_generation)
1231  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1232  enc->me_cache_generation += 1<<22;
1233 
1234  //FIXME precalculate
1235  {
1236  int x, y;
1237  for (y = 0; y < b_w * 2; y++)
1238  memcpy(obmc_edged[y], ff_obmc_tab[s->block_max_depth] + y * b_w * 2, b_w * 2);
1239  if(mb_x==0)
1240  for(y=0; y<b_w*2; y++)
1241  memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
1242  if(mb_x==b_stride-1)
1243  for(y=0; y<b_w*2; y++)
1244  memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
1245  if(mb_y==0){
1246  for(x=0; x<b_w*2; x++)
1247  obmc_edged[0][x] += obmc_edged[b_w-1][x];
1248  for(y=1; y<b_w; y++)
1249  memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
1250  }
1251  if(mb_y==b_height-1){
1252  for(x=0; x<b_w*2; x++)
1253  obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
1254  for(y=b_w; y<b_w*2-1; y++)
1255  memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
1256  }
1257  }
1258 
1259  //skip stuff outside the picture
1260  if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
1261  const uint8_t *src = s->input_picture->data[0];
1262  uint8_t *dst= s->current_picture->data[0];
1263  const int stride= s->current_picture->linesize[0];
1264  const int block_w= MB_SIZE >> s->block_max_depth;
1265  const int block_h= MB_SIZE >> s->block_max_depth;
1266  const int sx= block_w*mb_x - block_w/2;
1267  const int sy= block_h*mb_y - block_h/2;
1268  const int w= s->plane[0].width;
1269  const int h= s->plane[0].height;
1270  int y;
1271 
1272  for(y=sy; y<0; y++)
1273  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1274  for(y=h; y<sy+block_h*2; y++)
1275  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1276  if(sx<0){
1277  for(y=sy; y<sy+block_h*2; y++)
1278  memcpy(dst + sx + y*stride, src + sx + y*stride, -sx);
1279  }
1280  if(sx+block_w*2 > w){
1281  for(y=sy; y<sy+block_h*2; y++)
1282  memcpy(dst + w + y*stride, src + w + y*stride, sx+block_w*2 - w);
1283  }
1284  }
1285 
1286  // intra(black) = neighbors' contribution to the current block
1287  for(i=0; i < s->nb_planes; i++)
1288  color[i]= get_dc(enc, mb_x, mb_y, i);
1289 
1290  // get previous score (cannot be cached due to OBMC)
1291  if(pass > 0 && (block->type&BLOCK_INTRA)){
1292  int color0[3]= {block->color[0], block->color[1], block->color[2]};
1293  check_block_intra(enc, mb_x, mb_y, color0, obmc_edged, &best_rd);
1294  }else
1295  check_block_inter(enc, mb_x, mb_y, block->mx, block->my, obmc_edged, &best_rd);
1296 
1297  ref_b= *block;
1298  ref_rd= best_rd;
1299  for(ref=0; ref < s->ref_frames; ref++){
1300  int16_t (*mvr)[2]= &s->ref_mvs[ref][index];
1301  if(s->ref_scores[ref][index] > s->ref_scores[ref_b.ref][index]*3/2) //FIXME tune threshold
1302  continue;
1303  block->ref= ref;
1304  best_rd= INT_MAX;
1305 
1306  check_block_inter(enc, mb_x, mb_y, mvr[0][0], mvr[0][1], obmc_edged, &best_rd);
1307  check_block_inter(enc, mb_x, mb_y, 0, 0, obmc_edged, &best_rd);
1308  if(tb)
1309  check_block_inter(enc, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
1310  if(lb)
1311  check_block_inter(enc, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
1312  if(rb)
1313  check_block_inter(enc, mb_x, mb_y, mvr[1][0], mvr[1][1], obmc_edged, &best_rd);
1314  if(bb)
1315  check_block_inter(enc, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
1316 
1317  /* fullpel ME */
1318  //FIXME avoid subpel interpolation / round to nearest integer
1319  do{
1320  int newx = block->mx;
1321  int newy = block->my;
1322  int dia_size = enc->iterative_dia_size ? enc->iterative_dia_size : FFMAX(s->avctx->dia_size, 1);
1323  dia_change=0;
1324  for(i=0; i < dia_size; i++){
1325  for(j=0; j<i; j++){
1326  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+4*(i-j), newy+(4*j), obmc_edged, &best_rd);
1327  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-4*(i-j), newy-(4*j), obmc_edged, &best_rd);
1328  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-(4*j), newy+4*(i-j), obmc_edged, &best_rd);
1329  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+(4*j), newy-4*(i-j), obmc_edged, &best_rd);
1330  }
1331  }
1332  }while(dia_change);
1333  /* subpel ME */
1334  do{
1335  static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
1336  dia_change=0;
1337  for(i=0; i<8; i++)
1338  dia_change |= check_block_inter(enc, mb_x, mb_y, block->mx+square[i][0], block->my+square[i][1], obmc_edged, &best_rd);
1339  }while(dia_change);
1340  //FIXME or try the standard 2 pass qpel or similar
1341 
1342  mvr[0][0]= block->mx;
1343  mvr[0][1]= block->my;
1344  if(ref_rd > best_rd){
1345  ref_rd= best_rd;
1346  ref_b= *block;
1347  }
1348  }
1349  best_rd= ref_rd;
1350  *block= ref_b;
1351  check_block_intra(enc, mb_x, mb_y, color, obmc_edged, &best_rd);
1352  //FIXME RD style color selection
1353  if(!same_block(block, &backup)){
1354  if(tb ) tb ->type &= ~BLOCK_OPT;
1355  if(lb ) lb ->type &= ~BLOCK_OPT;
1356  if(rb ) rb ->type &= ~BLOCK_OPT;
1357  if(bb ) bb ->type &= ~BLOCK_OPT;
1358  if(tlb) tlb->type &= ~BLOCK_OPT;
1359  if(trb) trb->type &= ~BLOCK_OPT;
1360  if(blb) blb->type &= ~BLOCK_OPT;
1361  if(brb) brb->type &= ~BLOCK_OPT;
1362  change ++;
1363  }
1364  }
1365  }
1366  av_log(s->avctx, AV_LOG_DEBUG, "pass:%d changed:%d\n", pass, change);
1367  if(!change)
1368  break;
1369  }
1370 
1371  if(s->block_max_depth == 1){
1372  int change= 0;
1373  for(mb_y= 0; mb_y<b_height; mb_y+=2){
1374  for(mb_x= 0; mb_x<b_width; mb_x+=2){
1375  int i;
1376  int best_rd, init_rd;
1377  const int index= mb_x + mb_y * b_stride;
1378  BlockNode *b[4];
1379 
1380  b[0]= &s->block[index];
1381  b[1]= b[0]+1;
1382  b[2]= b[0]+b_stride;
1383  b[3]= b[2]+1;
1384  if(same_block(b[0], b[1]) &&
1385  same_block(b[0], b[2]) &&
1386  same_block(b[0], b[3]))
1387  continue;
1388 
1389  if (!enc->me_cache_generation)
1390  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1391  enc->me_cache_generation += 1<<22;
1392 
1393  init_rd = best_rd = get_4block_rd(enc, mb_x, mb_y, 0);
1394 
1395  //FIXME more multiref search?
1396  check_4block_inter(enc, mb_x, mb_y,
1397  (b[0]->mx + b[1]->mx + b[2]->mx + b[3]->mx + 2) >> 2,
1398  (b[0]->my + b[1]->my + b[2]->my + b[3]->my + 2) >> 2, 0, &best_rd);
1399 
1400  for(i=0; i<4; i++)
1401  if(!(b[i]->type&BLOCK_INTRA))
1402  check_4block_inter(enc, mb_x, mb_y, b[i]->mx, b[i]->my, b[i]->ref, &best_rd);
1403 
1404  if(init_rd != best_rd)
1405  change++;
1406  }
1407  }
1408  av_log(s->avctx, AV_LOG_ERROR, "pass:4mv changed:%d\n", change*4);
1409  }
1410 }
1411 
1412 static void encode_blocks(SnowEncContext *enc, int search)
1413 {
1414  SnowContext *const s = &enc->com;
1415  int x, y;
1416  int w= s->b_width;
1417  int h= s->b_height;
1418 
1419  if (enc->motion_est == FF_ME_ITER && !s->keyframe && search)
1420  iterative_me(enc);
1421 
1422  for(y=0; y<h; y++){
1423  if(s->c.bytestream_end - s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
1424  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1425  return;
1426  }
1427  for(x=0; x<w; x++){
1428  if (enc->motion_est == FF_ME_ITER || !search)
1429  encode_q_branch2(s, 0, x, y);
1430  else
1431  encode_q_branch (enc, 0, x, y);
1432  }
1433  }
1434 }
1435 
1436 static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias){
1437  const int w= b->width;
1438  const int h= b->height;
1439  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1440  const int qmul= ff_qexp[qlog&(QROOT-1)]<<((qlog>>QSHIFT) + ENCODER_EXTRA_BITS);
1441  int x,y, thres1, thres2;
1442 
1443  if(s->qlog == LOSSLESS_QLOG){
1444  for(y=0; y<h; y++)
1445  for(x=0; x<w; x++)
1446  dst[x + y*stride]= src[x + y*stride];
1447  return;
1448  }
1449 
1450  bias= bias ? 0 : (3*qmul)>>3;
1451  thres1= ((qmul - bias)>>QEXPSHIFT) - 1;
1452  thres2= 2*thres1;
1453 
1454  if(!bias){
1455  for(y=0; y<h; y++){
1456  for(x=0; x<w; x++){
1457  int i= src[x + y*stride];
1458 
1459  if((unsigned)(i+thres1) > thres2){
1460  if(i>=0){
1461  i<<= QEXPSHIFT;
1462  i/= qmul; //FIXME optimize
1463  dst[x + y*stride]= i;
1464  }else{
1465  i= -i;
1466  i<<= QEXPSHIFT;
1467  i/= qmul; //FIXME optimize
1468  dst[x + y*stride]= -i;
1469  }
1470  }else
1471  dst[x + y*stride]= 0;
1472  }
1473  }
1474  }else{
1475  for(y=0; y<h; y++){
1476  for(x=0; x<w; x++){
1477  int i= src[x + y*stride];
1478 
1479  if((unsigned)(i+thres1) > thres2){
1480  if(i>=0){
1481  i<<= QEXPSHIFT;
1482  i= (i + bias) / qmul; //FIXME optimize
1483  dst[x + y*stride]= i;
1484  }else{
1485  i= -i;
1486  i<<= QEXPSHIFT;
1487  i= (i + bias) / qmul; //FIXME optimize
1488  dst[x + y*stride]= -i;
1489  }
1490  }else
1491  dst[x + y*stride]= 0;
1492  }
1493  }
1494  }
1495 }
1496 
1498  const int w= b->width;
1499  const int h= b->height;
1500  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1501  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1502  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
1503  int x,y;
1504 
1505  if(s->qlog == LOSSLESS_QLOG) return;
1506 
1507  for(y=0; y<h; y++){
1508  for(x=0; x<w; x++){
1509  int i= src[x + y*stride];
1510  if(i<0){
1511  src[x + y*stride]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
1512  }else if(i>0){
1513  src[x + y*stride]= (( i*qmul + qadd)>>(QEXPSHIFT));
1514  }
1515  }
1516  }
1517 }
1518 
1519 static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1520  const int w= b->width;
1521  const int h= b->height;
1522  int x,y;
1523 
1524  for(y=h-1; y>=0; y--){
1525  for(x=w-1; x>=0; x--){
1526  int i= x + y*stride;
1527 
1528  if(x){
1529  if(use_median){
1530  if(y && x+1<w) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1531  else src[i] -= src[i - 1];
1532  }else{
1533  if(y) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1534  else src[i] -= src[i - 1];
1535  }
1536  }else{
1537  if(y) src[i] -= src[i - stride];
1538  }
1539  }
1540  }
1541 }
1542 
1543 static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1544  const int w= b->width;
1545  const int h= b->height;
1546  int x,y;
1547 
1548  for(y=0; y<h; y++){
1549  for(x=0; x<w; x++){
1550  int i= x + y*stride;
1551 
1552  if(x){
1553  if(use_median){
1554  if(y && x+1<w) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1555  else src[i] += src[i - 1];
1556  }else{
1557  if(y) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1558  else src[i] += src[i - 1];
1559  }
1560  }else{
1561  if(y) src[i] += src[i - stride];
1562  }
1563  }
1564  }
1565 }
1566 
1568  int plane_index, level, orientation;
1569 
1570  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1571  for(level=0; level<s->spatial_decomposition_count; level++){
1572  for(orientation=level ? 1:0; orientation<4; orientation++){
1573  if(orientation==2) continue;
1574  put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1);
1575  }
1576  }
1577  }
1578 }
1579 
1581  int plane_index, i;
1582  uint8_t kstate[32];
1583 
1584  memset(kstate, MID_STATE, sizeof(kstate));
1585 
1586  put_rac(&s->c, kstate, s->keyframe);
1587  if(s->keyframe || s->always_reset){
1589  s->last_spatial_decomposition_type=
1590  s->last_qlog=
1591  s->last_qbias=
1592  s->last_mv_scale=
1593  s->last_block_max_depth= 0;
1594  for(plane_index=0; plane_index<2; plane_index++){
1595  Plane *p= &s->plane[plane_index];
1596  p->last_htaps=0;
1597  p->last_diag_mc=0;
1598  memset(p->last_hcoeff, 0, sizeof(p->last_hcoeff));
1599  }
1600  }
1601  if(s->keyframe){
1602  put_symbol(&s->c, s->header_state, s->version, 0);
1603  put_rac(&s->c, s->header_state, s->always_reset);
1604  put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0);
1605  put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0);
1606  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1607  put_symbol(&s->c, s->header_state, s->colorspace_type, 0);
1608  if (s->nb_planes > 2) {
1609  put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0);
1610  put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0);
1611  }
1612  put_rac(&s->c, s->header_state, s->spatial_scalability);
1613 // put_rac(&s->c, s->header_state, s->rate_scalability);
1614  put_symbol(&s->c, s->header_state, s->max_ref_frames-1, 0);
1615 
1616  encode_qlogs(s);
1617  }
1618 
1619  if(!s->keyframe){
1620  int update_mc=0;
1621  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1622  Plane *p= &s->plane[plane_index];
1623  update_mc |= p->last_htaps != p->htaps;
1624  update_mc |= p->last_diag_mc != p->diag_mc;
1625  update_mc |= !!memcmp(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1626  }
1627  put_rac(&s->c, s->header_state, update_mc);
1628  if(update_mc){
1629  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1630  Plane *p= &s->plane[plane_index];
1631  put_rac(&s->c, s->header_state, p->diag_mc);
1632  put_symbol(&s->c, s->header_state, p->htaps/2-1, 0);
1633  for(i= p->htaps/2; i; i--)
1634  put_symbol(&s->c, s->header_state, FFABS(p->hcoeff[i]), 0);
1635  }
1636  }
1637  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1638  put_rac(&s->c, s->header_state, 1);
1639  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1640  encode_qlogs(s);
1641  }else
1642  put_rac(&s->c, s->header_state, 0);
1643  }
1644 
1645  put_symbol(&s->c, s->header_state, s->spatial_decomposition_type - s->last_spatial_decomposition_type, 1);
1646  put_symbol(&s->c, s->header_state, s->qlog - s->last_qlog , 1);
1647  put_symbol(&s->c, s->header_state, s->mv_scale - s->last_mv_scale, 1);
1648  put_symbol(&s->c, s->header_state, s->qbias - s->last_qbias , 1);
1649  put_symbol(&s->c, s->header_state, s->block_max_depth - s->last_block_max_depth, 1);
1650 
1651 }
1652 
1654  int plane_index;
1655 
1656  if(!s->keyframe){
1657  for(plane_index=0; plane_index<2; plane_index++){
1658  Plane *p= &s->plane[plane_index];
1659  p->last_diag_mc= p->diag_mc;
1660  p->last_htaps = p->htaps;
1661  memcpy(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1662  }
1663  }
1664 
1665  s->last_spatial_decomposition_type = s->spatial_decomposition_type;
1666  s->last_qlog = s->qlog;
1667  s->last_qbias = s->qbias;
1668  s->last_mv_scale = s->mv_scale;
1669  s->last_block_max_depth = s->block_max_depth;
1670  s->last_spatial_decomposition_count = s->spatial_decomposition_count;
1671 }
1672 
1673 static int qscale2qlog(int qscale){
1674  return lrint(QROOT*log2(qscale / (float)FF_QP2LAMBDA))
1675  + 61*QROOT/8; ///< 64 > 60
1676 }
1677 
1679 {
1680  SnowContext *const s = &enc->com;
1681  /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
1682  * FIXME we know exact mv bits at this point,
1683  * but ratecontrol isn't set up to include them. */
1684  uint32_t coef_sum= 0;
1685  int level, orientation, delta_qlog;
1686 
1687  for(level=0; level<s->spatial_decomposition_count; level++){
1688  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1689  SubBand *b= &s->plane[0].band[level][orientation];
1690  IDWTELEM *buf= b->ibuf;
1691  const int w= b->width;
1692  const int h= b->height;
1693  const int stride= b->stride;
1694  const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
1695  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1696  const int qdiv= (1<<16)/qmul;
1697  int x, y;
1698  //FIXME this is ugly
1699  for(y=0; y<h; y++)
1700  for(x=0; x<w; x++)
1701  buf[x+y*stride]= b->buf[x+y*stride];
1702  if(orientation==0)
1703  decorrelate(s, b, buf, stride, 1, 0);
1704  for(y=0; y<h; y++)
1705  for(x=0; x<w; x++)
1706  coef_sum+= abs(buf[x+y*stride]) * qdiv >> 16;
1707  }
1708  }
1709  emms_c();
1710 
1711  /* ugly, ratecontrol just takes a sqrt again */
1712  av_assert0(coef_sum < INT_MAX);
1713  coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
1714 
1715  if(pict->pict_type == AV_PICTURE_TYPE_I){
1716  enc->m.mb_var_sum = coef_sum;
1717  enc->m.mc_mb_var_sum = 0;
1718  }else{
1719  enc->m.mc_mb_var_sum = coef_sum;
1720  enc->m.mb_var_sum = 0;
1721  }
1722 
1723  pict->quality= ff_rate_estimate_qscale(&enc->m, 1);
1724  if (pict->quality < 0)
1725  return INT_MIN;
1726  enc->lambda= pict->quality * 3/2;
1727  delta_qlog= qscale2qlog(pict->quality) - s->qlog;
1728  s->qlog+= delta_qlog;
1729  return delta_qlog;
1730 }
1731 
1733  int width = p->width;
1734  int height= p->height;
1735  int level, orientation, x, y;
1736 
1737  for(level=0; level<s->spatial_decomposition_count; level++){
1738  int64_t error=0;
1739  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1740  SubBand *b= &p->band[level][orientation];
1741  IDWTELEM *ibuf= b->ibuf;
1742 
1743  memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
1744  ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
1745  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
1746  for(y=0; y<height; y++){
1747  for(x=0; x<width; x++){
1748  int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
1749  error += d*d;
1750  }
1751  }
1752  if (orientation == 2)
1753  error /= 2;
1754  b->qlog= (int)(QROOT * log2(352256.0/sqrt(error)) + 0.5);
1755  if (orientation != 1)
1756  error = 0;
1757  }
1758  p->band[level][1].qlog = p->band[level][2].qlog;
1759  }
1760 }
1761 
1763  const AVFrame *pict, int *got_packet)
1764 {
1765  SnowEncContext *const enc = avctx->priv_data;
1766  SnowContext *const s = &enc->com;
1767  MPVEncContext *const mpv = &enc->m.s;
1768  RangeCoder * const c= &s->c;
1769  AVCodecInternal *avci = avctx->internal;
1770  AVFrame *pic;
1771  const int width= s->avctx->width;
1772  const int height= s->avctx->height;
1773  int level, orientation, plane_index, i, y, ret;
1774  uint8_t rc_header_bak[sizeof(s->header_state)];
1775  uint8_t rc_block_bak[sizeof(s->block_state)];
1776 
1777  if ((ret = ff_alloc_packet(avctx, pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
1778  return ret;
1779 
1781  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1782 
1783  for(i=0; i < s->nb_planes; i++){
1784  int hshift= i ? s->chroma_h_shift : 0;
1785  int vshift= i ? s->chroma_v_shift : 0;
1786  for(y=0; y<AV_CEIL_RSHIFT(height, vshift); y++)
1787  memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]],
1788  &pict->data[i][y * pict->linesize[i]],
1789  AV_CEIL_RSHIFT(width, hshift));
1790  enc->mpvencdsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i],
1791  AV_CEIL_RSHIFT(width, hshift), AV_CEIL_RSHIFT(height, vshift),
1792  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1793  EDGE_TOP | EDGE_BOTTOM);
1794 
1795  }
1796  pic = s->input_picture;
1797  pic->pict_type = pict->pict_type;
1798  pic->quality = pict->quality;
1799 
1800  mpv->picture_number = avctx->frame_num;
1801  if(avctx->flags&AV_CODEC_FLAG_PASS2){
1802  mpv->c.pict_type = pic->pict_type = enc->m.rc_context.entry[avctx->frame_num].new_pict_type;
1803  s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
1804  if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
1805  pic->quality = ff_rate_estimate_qscale(&enc->m, 0);
1806  if (pic->quality < 0)
1807  return -1;
1808  }
1809  }else{
1810  s->keyframe= avctx->gop_size==0 || avctx->frame_num % avctx->gop_size == 0;
1811  mpv->c.pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1812  }
1813 
1814  if (enc->pass1_rc && avctx->frame_num == 0)
1815  pic->quality = 2*FF_QP2LAMBDA;
1816  if (pic->quality) {
1817  s->qlog = qscale2qlog(pic->quality);
1818  enc->lambda = pic->quality * 3/2;
1819  }
1820  if (s->qlog < 0 || (!pic->quality && (avctx->flags & AV_CODEC_FLAG_QSCALE))) {
1821  s->qlog= LOSSLESS_QLOG;
1822  enc->lambda = 0;
1823  }//else keep previous frame's qlog until after motion estimation
1824 
1825  if (s->current_picture->data[0]) {
1826  int w = s->avctx->width;
1827  int h = s->avctx->height;
1828 
1829  enc->mpvencdsp.draw_edges(s->current_picture->data[0],
1830  s->current_picture->linesize[0], w , h ,
1832  if (s->current_picture->data[2]) {
1833  enc->mpvencdsp.draw_edges(s->current_picture->data[1],
1834  s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1835  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1836  enc->mpvencdsp.draw_edges(s->current_picture->data[2],
1837  s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1838  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1839  }
1840  }
1841 
1843  ret = get_encode_buffer(s, s->current_picture);
1844  if (ret < 0)
1845  return ret;
1846 
1847  mpv->c.cur_pic.ptr = &enc->cur_pic;
1848  mpv->c.cur_pic.ptr->f = s->current_picture;
1849  mpv->c.cur_pic.ptr->f->pts = pict->pts;
1850  if(pic->pict_type == AV_PICTURE_TYPE_P){
1851  int block_width = (width +15)>>4;
1852  int block_height= (height+15)>>4;
1853  int stride= s->current_picture->linesize[0];
1854 
1855  av_assert0(s->current_picture->data[0]);
1856  av_assert0(s->last_picture[0]->data[0]);
1857 
1858  mpv->c.avctx = s->avctx;
1859  mpv->c.last_pic.ptr = &enc->last_pic;
1860  mpv->c.last_pic.ptr->f = s->last_picture[0];
1861  mpv-> new_pic = s->input_picture;
1862  mpv->c.linesize = stride;
1863  mpv->c.uvlinesize = s->current_picture->linesize[1];
1864  mpv->c.width = width;
1865  mpv->c.height = height;
1866  mpv->c.mb_width = block_width;
1867  mpv->c.mb_height = block_height;
1868  mpv->c.mb_stride = mpv->c.mb_width + 1;
1869  mpv->c.b8_stride = 2 * mpv->c.mb_width + 1;
1870  mpv->f_code = 1;
1871  mpv->c.pict_type = pic->pict_type;
1872  mpv->me.motion_est = enc->motion_est;
1873  mpv->me.dia_size = avctx->dia_size;
1874  mpv->c.quarter_sample = (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0;
1875  mpv->c.out_format = FMT_H263;
1876  mpv->me.unrestricted_mv = 1;
1877 
1878  mpv->lambda = enc->lambda;
1879  mpv->c.qscale = (mpv->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
1880  enc->lambda2 = mpv->lambda2 = (mpv->lambda*mpv->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
1881 
1882  mpv->c.qdsp = enc->qdsp; //move
1883  mpv->c.hdsp = s->hdsp;
1884  ff_me_init_pic(mpv);
1885  s->hdsp = mpv->c.hdsp;
1886  }
1887 
1888  if (enc->pass1_rc) {
1889  memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
1890  memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
1891  }
1892 
1893 redo_frame:
1894 
1895  s->spatial_decomposition_count= 5;
1896 
1897  while( !(width >>(s->chroma_h_shift + s->spatial_decomposition_count))
1898  || !(height>>(s->chroma_v_shift + s->spatial_decomposition_count)))
1899  s->spatial_decomposition_count--;
1900 
1901  if (s->spatial_decomposition_count <= 0) {
1902  av_log(avctx, AV_LOG_ERROR, "Resolution too low\n");
1903  return AVERROR(EINVAL);
1904  }
1905 
1906  mpv->c.pict_type = pic->pict_type;
1907  s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
1908 
1910 
1911  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1912  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1913  calculate_visual_weight(s, &s->plane[plane_index]);
1914  }
1915  }
1916 
1917  encode_header(s);
1918  mpv->misc_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
1919  encode_blocks(enc, 1);
1920  mpv->mv_bits = 8 * (s->c.bytestream - s->c.bytestream_start) - mpv->misc_bits;
1921 
1922  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1923  Plane *p= &s->plane[plane_index];
1924  int w= p->width;
1925  int h= p->height;
1926  int x, y;
1927 // int bits= put_bits_count(&s->c.pb);
1928 
1929  if (!enc->memc_only) {
1930  //FIXME optimize
1931  if(pict->data[plane_index]) //FIXME gray hack
1932  for(y=0; y<h; y++){
1933  for(x=0; x<w; x++){
1934  s->spatial_idwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<FRAC_BITS;
1935  }
1936  }
1937  predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
1938 
1939  if( plane_index==0
1940  && pic->pict_type == AV_PICTURE_TYPE_P
1941  && !(avctx->flags&AV_CODEC_FLAG_PASS2)
1942  && mpv->me.scene_change_score > enc->scenechange_threshold) {
1944  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1946  s->keyframe=1;
1947  s->current_picture->flags |= AV_FRAME_FLAG_KEY;
1948  goto redo_frame;
1949  }
1950 
1951  if(s->qlog == LOSSLESS_QLOG){
1952  for(y=0; y<h; y++){
1953  for(x=0; x<w; x++){
1954  s->spatial_dwt_buffer[y*w + x]= (s->spatial_idwt_buffer[y*w + x] + (1<<(FRAC_BITS-1))-1)>>FRAC_BITS;
1955  }
1956  }
1957  }else{
1958  for(y=0; y<h; y++){
1959  for(x=0; x<w; x++){
1960  s->spatial_dwt_buffer[y*w + x]= s->spatial_idwt_buffer[y*w + x] * (1 << ENCODER_EXTRA_BITS);
1961  }
1962  }
1963  }
1964 
1965  ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
1966 
1967  if (enc->pass1_rc && plane_index==0) {
1968  int delta_qlog = ratecontrol_1pass(enc, pic);
1969  if (delta_qlog <= INT_MIN)
1970  return -1;
1971  if(delta_qlog){
1972  //reordering qlog in the bitstream would eliminate this reset
1974  memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
1975  memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
1976  encode_header(s);
1977  encode_blocks(enc, 0);
1978  }
1979  }
1980 
1981  for(level=0; level<s->spatial_decomposition_count; level++){
1982  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1983  SubBand *b= &p->band[level][orientation];
1984 
1985  quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
1986  if(orientation==0)
1987  decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
1988  if (!enc->no_bitstream)
1989  encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
1990  av_assert0(b->parent==NULL || b->parent->stride == b->stride*2);
1991  if(orientation==0)
1992  correlate(s, b, b->ibuf, b->stride, 1, 0);
1993  }
1994  }
1995 
1996  for(level=0; level<s->spatial_decomposition_count; level++){
1997  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1998  SubBand *b= &p->band[level][orientation];
1999 
2000  dequantize(s, b, b->ibuf, b->stride);
2001  }
2002  }
2003 
2004  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
2005  if(s->qlog == LOSSLESS_QLOG){
2006  for(y=0; y<h; y++){
2007  for(x=0; x<w; x++){
2008  s->spatial_idwt_buffer[y*w + x] *= 1 << FRAC_BITS;
2009  }
2010  }
2011  }
2012  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2013  }else{
2014  //ME/MC only
2015  if(pic->pict_type == AV_PICTURE_TYPE_I){
2016  for(y=0; y<h; y++){
2017  for(x=0; x<w; x++){
2018  s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]=
2019  pict->data[plane_index][y*pict->linesize[plane_index] + x];
2020  }
2021  }
2022  }else{
2023  memset(s->spatial_idwt_buffer, 0, sizeof(IDWTELEM)*w*h);
2024  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2025  }
2026  }
2027  if(s->avctx->flags&AV_CODEC_FLAG_PSNR){
2028  int64_t error= 0;
2029 
2030  if(pict->data[plane_index]) //FIXME gray hack
2031  for(y=0; y<h; y++){
2032  for(x=0; x<w; x++){
2033  int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x];
2034  error += d*d;
2035  }
2036  }
2037  s->avctx->error[plane_index] += error;
2038  enc->encoding_error[plane_index] = error;
2039  }
2040 
2041  }
2042  emms_c();
2043 
2045 
2046  av_frame_unref(s->last_picture[s->max_ref_frames - 1]);
2047 
2048  s->current_picture->pict_type = pic->pict_type;
2049  s->current_picture->quality = pic->quality;
2050  enc->m.frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
2051  mpv->p_tex_bits = enc->m.frame_bits - mpv->misc_bits - mpv->mv_bits;
2052  enc->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
2054  enc->cur_pic.coded_picture_number = avctx->frame_num;
2055  enc->cur_pic.f->quality = pic->quality;
2056  if (enc->pass1_rc) {
2057  ret = ff_rate_estimate_qscale(&enc->m, 0);
2058  if (ret < 0)
2059  return ret;
2060  }
2061  if(avctx->flags&AV_CODEC_FLAG_PASS1)
2062  ff_write_pass1_stats(&enc->m);
2063  enc->m.last_pict_type = mpv->c.pict_type;
2064 
2065  ff_encode_add_stats_side_data(pkt, s->current_picture->quality,
2066  enc->encoding_error,
2067  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? SNOW_MAX_PLANES : 0,
2068  s->current_picture->pict_type);
2069  if (s->avctx->flags & AV_CODEC_FLAG_RECON_FRAME) {
2070  av_frame_replace(avci->recon_frame, s->current_picture);
2071  }
2072 
2073  pkt->size = ff_rac_terminate(c, 0);
2074  if (s->current_picture->flags & AV_FRAME_FLAG_KEY)
2076  *got_packet = 1;
2077 
2078  return 0;
2079 }
2080 
2082 {
2083  SnowEncContext *const enc = avctx->priv_data;
2084  SnowContext *const s = &enc->com;
2085 
2088  av_frame_free(&s->input_picture);
2089 
2090  for (int i = 0; i < MAX_REF_FRAMES; i++) {
2091  av_freep(&s->ref_mvs[i]);
2092  av_freep(&s->ref_scores[i]);
2093  }
2094 
2095  enc->m.s.me.temp = NULL;
2096  av_freep(&enc->m.s.me.scratchpad);
2097  av_freep(&enc->emu_edge_buffer);
2098 
2099  av_freep(&avctx->stats_out);
2100 
2101  return 0;
2102 }
2103 
2104 #define OFFSET(x) offsetof(SnowEncContext, x)
2105 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2106 static const AVOption options[] = {
2107  {"motion_est", "motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_ITER, VE, .unit = "motion_est" },
2108  { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, VE, .unit = "motion_est" },
2109  { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, VE, .unit = "motion_est" },
2110  { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, VE, .unit = "motion_est" },
2111  { "iter", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ITER }, 0, 0, VE, .unit = "motion_est" },
2112  { "memc_only", "Only do ME/MC (I frames -> ref, P frame -> ME+MC).", OFFSET(memc_only), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2113  { "no_bitstream", "Skip final bitstream writeout.", OFFSET(no_bitstream), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2114  { "intra_penalty", "Penalty for intra blocks in block decision", OFFSET(intra_penalty), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2115  { "iterative_dia_size", "Dia size for the iterative ME", OFFSET(iterative_dia_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2116  { "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, VE },
2117  { "pred", "Spatial decomposition type", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, DWT_97, DWT_53, VE, .unit = "pred" },
2118  { "dwt97", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2119  { "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2120  { "rc_eq", "Set rate control equation. When computing the expression, besides the standard functions "
2121  "defined in the section 'Expression Evaluation', the following functions are available: "
2122  "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv "
2123  "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.",
2124  OFFSET(m.rc_context.rc_eq), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VE },
2125  { NULL },
2126 };
2127 
2128 static const AVClass snowenc_class = {
2129  .class_name = "snow encoder",
2130  .item_name = av_default_item_name,
2131  .option = options,
2132  .version = LIBAVUTIL_VERSION_INT,
2133 };
2134 
2136  .p.name = "snow",
2137  CODEC_LONG_NAME("Snow"),
2138  .p.type = AVMEDIA_TYPE_VIDEO,
2139  .p.id = AV_CODEC_ID_SNOW,
2140  .p.capabilities = AV_CODEC_CAP_DR1 |
2143  .priv_data_size = sizeof(SnowEncContext),
2144  .init = encode_init,
2146  .close = encode_end,
2149  .color_ranges = AVCOL_RANGE_MPEG,
2150  .p.priv_class = &snowenc_class,
2151  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2152 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
CODEC_PIXFMTS
#define CODEC_PIXFMTS(...)
Definition: codec_internal.h:392
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:254
encode_subband
static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:1060
MPVEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideoenc.h:137
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1519
MpegEncContext::hdsp
HpelDSPContext hdsp
Definition: mpegvideo.h:159
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:405
P_LEFT
#define P_LEFT
Definition: snowenc.c:366
level
uint8_t level
Definition: svq3.c:208
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:191
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:432
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
SnowEncContext::lambda
int lambda
Definition: snowenc.c:50
libm.h
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MID_STATE
#define MID_STATE
Definition: snow.h:39
color
Definition: vf_paletteuse.c:513
ratecontrol_1pass
static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
Definition: snowenc.c:1678
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:247
FF_ME_EPZS
#define FF_ME_EPZS
Definition: motion_est.h:43
inverse
inverse
Definition: af_crystalizer.c:122
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: snowenc.c:2081
SnowEncContext::scenechange_threshold
int scenechange_threshold
Definition: snowenc.c:60
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
LOG2_MB_SIZE
#define LOG2_MB_SIZE
Definition: snow.h:72
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MotionEstContext
Motion estimation context.
Definition: motion_est.h:49
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:255
int64_t
long long int64_t
Definition: coverity.c:34
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
AV_CODEC_CAP_ENCODER_RECON_FRAME
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Definition: codec.h:159
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:160
h263enc.h
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
MPVEncContext::mv_bits
int mv_bits
Definition: mpegvideoenc.h:133
DWT_97
#define DWT_97
Definition: snow_dwt.h:70
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:39
update_last_header_values
static void update_last_header_values(SnowContext *s)
Definition: snowenc.c:1653
MpegEncContext::pict_type
enum AVPictureType pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:154
internal.h
iterative_me
static void iterative_me(SnowEncContext *enc)
Definition: snowenc.c:1183
AVPacket::data
uint8_t * data
Definition: packet.h:588
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
SnowEncContext::qdsp
QpelDSPContext qdsp
Definition: snowenc.c:47
DWT_53
#define DWT_53
Definition: snow_dwt.h:71
get_penalty_factor
static int get_penalty_factor(int lambda, int lambda2, int type)
Definition: snowenc.c:341
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
encode_subband_c0run
static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:940
rangecoder.h
FFCodec
Definition: codec_internal.h:127
MpegEncContext::b8_stride
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:98
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
SnowContext
Definition: snow.h:113
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: snowenc.c:1762
QSHIFT
#define QSHIFT
Definition: snow.h:42
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:46
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:269
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:643
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:630
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
ff_spatial_dwt
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:320
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:55
px
#define px
Definition: ops_tmpl_float.c:35
check_4block_inter
static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd)
Definition: snowenc.c:1134
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
ff_spatial_idwt
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:732
SnowEncContext::me_cache_generation
unsigned me_cache_generation
Definition: snowenc.c:67
encode_blocks
static void encode_blocks(SnowEncContext *enc, int search)
Definition: snowenc.c:1412
ff_init_range_encoder
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
Definition: rangecoder.c:42
LOG2_OBMC_MAX
#define LOG2_OBMC_MAX
Definition: snow.h:48
BlockNode
Definition: snow.h:50
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:697
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:102
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
check_block_intra
static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3], uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1067
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
OFFSET
#define OFFSET(x)
Definition: snowenc.c:2104
ff_snow_pred_block
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
Definition: snow.c:372
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
get_4block_rd
static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:869
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:919
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:359
FF_CMP_SSE
#define FF_CMP_SSE
Definition: avcodec.h:878
ff_sqrt
#define ff_sqrt
Definition: mathops.h:220
SnowEncContext
Definition: snowenc.c:45
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:538
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:106
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MpegEncContext::qdsp
QpelDSPContext qdsp
Definition: mpegvideo.h:161
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:132
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
encode_q_branch
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
Definition: snowenc.c:373
FF_CMP_BIT
#define FF_CMP_BIT
Definition: avcodec.h:882
emms_c
#define emms_c()
Definition: emms.h:63
SnowEncContext::mecc
MECmpContext mecc
Definition: snowenc.c:62
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1225
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:270
BLOCK_OPT
#define BLOCK_OPT
Block needs no checks in this round of iterative motion estiation.
Definition: snow.h:58
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:44
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:264
calculate_visual_weight
static void calculate_visual_weight(SnowContext *s, Plane *p)
Definition: snowenc.c:1732
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:144
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
MpegEncContext::mb_num
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:100
P
#define P
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:222
pix_norm1
static int pix_norm1(const uint8_t *pix, int line_size, int w)
Definition: snowenc.c:325
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:482
get_encode_buffer
static int get_encode_buffer(SnowContext *s, AVFrame *frame)
Definition: snowenc.c:140
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
SnowEncContext::encoding_error
uint64_t encoding_error[SNOW_MAX_PLANES]
Definition: snowenc.c:69
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
MotionEstContext::dia_size
int dia_size
Definition: motion_est.h:71
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
MECmpContext
Definition: me_cmp.h:50
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:152
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
NULL
#define NULL
Definition: coverity.c:32
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:236
run
uint8_t run
Definition: svq3.c:207
SnowEncContext::me_cache
unsigned me_cache[ME_CACHE_SIZE]
Definition: snowenc.c:66
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
MpegvideoEncDSPContext::draw_edges
void(* draw_edges)(uint8_t *buf, ptrdiff_t wrap, int width, int height, int w, int h, int sides)
Definition: mpegvideoencdsp.h:46
snow.h
state
static struct @553 state
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:908
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:52
get_block_rd
static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index, uint8_t(*obmc_edged)[MB_SIZE *2])
Definition: snowenc.c:765
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
VE
#define VE
Definition: snowenc.c:2105
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:489
ff_rac_terminate
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
Definition: rangecoder.c:109
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
mathops.h
options
Definition: swscale.c:43
SnowEncContext::obmc_scratchpad
IDWTELEM obmc_scratchpad[MB_SIZE *MB_SIZE *12 *2]
Definition: snowenc.c:73
qpeldsp.h
abs
#define abs(x)
Definition: cuda_runtime.h:35
correlate
static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1543
QROOT
#define QROOT
Definition: snow.h:43
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MpegEncContext::mb_width
int mb_width
Definition: mpegvideo.h:96
MPVMainEncContext
Definition: mpegvideoenc.h:202
ff_h263_get_mv_penalty
const uint8_t(* ff_h263_get_mv_penalty(void))[MAX_DMV *2+1]
Definition: ituh263enc.c:148
FF_ME_XONE
#define FF_ME_XONE
Definition: motion_est.h:44
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
init_ref
static void init_ref(MotionEstContext *c, const uint8_t *const src[3], uint8_t *const ref[3], uint8_t *const ref2[3], int x, int y, int ref_index)
Definition: snowenc.c:76
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:54
put_symbol
static void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
Definition: snowenc.c:93
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:838
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:230
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1320
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:262
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:120
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
AVPacket::size
int size
Definition: packet.h:589
SNOW_MAX_PLANES
#define SNOW_MAX_PLANES
Definition: snow.h:37
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1013
height
#define height
Definition: dsp.h:89
encode_header
static void encode_header(SnowContext *s)
Definition: snowenc.c:1580
codec_internal.h
FF_CMP_PSNR
#define FF_CMP_PSNR
Definition: avcodec.h:881
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:549
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
SnowEncContext::pass1_rc
int pass1_rc
Definition: snowenc.c:52
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:97
FF_CMP_W53
#define FF_CMP_W53
Definition: avcodec.h:888
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
MotionEstContext::mv_penalty
const uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:100
pix_sum
static int pix_sum(const uint8_t *pix, int line_size, int w, int h)
Definition: snowenc.c:309
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:256
SnowEncContext::motion_est
int motion_est
Definition: snowenc.c:58
ff_snow_encoder
const FFCodec ff_snow_encoder
Definition: snowenc.c:2135
SubBand
Definition: cfhd.h:116
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2594
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
FF_CMP_SATD
#define FF_CMP_SATD
Definition: avcodec.h:879
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
SnowEncContext::intra_penalty
int intra_penalty
Definition: snowenc.c:57
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
snow_dwt.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:594
AVCodecInternal
Definition: internal.h:49
FF_CMP_SAD
#define FF_CMP_SAD
Definition: avcodec.h:877
encode_q_branch2
static void encode_q_branch2(SnowContext *s, int level, int x, int y)
Definition: snowenc.c:611
SnowEncContext::iterative_dia_size
int iterative_dia_size
Definition: snowenc.c:59
ff_quant3bA
const int8_t ff_quant3bA[256]
Definition: snowdata.h:104
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
emms.h
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
ENCODER_EXTRA_BITS
#define ENCODER_EXTRA_BITS
Definition: snow.h:74
AV_CODEC_FLAG_RECON_FRAME
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
Definition: avcodec.h:244
log.h
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1392
MPVEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideoenc.h:80
FF_CMP_RD
#define FF_CMP_RD
Definition: avcodec.h:883
get_block_bits
static int get_block_bits(SnowContext *s, int x, int y, int w)
Definition: snowenc.c:727
ff_get_mb_score
int ff_get_mb_score(MPVEncContext *s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate)
Definition: motion_est_template.c:192
ff_w53_32_c
int ff_w53_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:833
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:57
MotionEstContext::motion_est
int motion_est
ME algorithm.
Definition: motion_est.h:51
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
qscale2qlog
static int qscale2qlog(int qscale)
Definition: snowenc.c:1673
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:496
av_always_inline
#define av_always_inline
Definition: attributes.h:63
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AVCodecContext::dia_size
int dia_size
ME diamond size & shape.
Definition: avcodec.h:900
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:887
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodecContext::mb_lmin
int mb_lmin
minimum MB Lagrange multiplier
Definition: avcodec.h:986
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:37
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:398
SnowEncContext::no_bitstream
int no_bitstream
Definition: snowenc.c:56
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
ME_CACHE_SIZE
#define ME_CACHE_SIZE
Definition: snowenc.c:65
SnowEncContext::com
SnowContext com
Definition: snowenc.c:46
FF_ME_ITER
#define FF_ME_ITER
Definition: snowenc.c:43
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
get_dc
static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:667
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
SnowEncContext::m
MPVMainEncContext m
Definition: snowenc.c:63
log2
#define log2(x)
Definition: libm.h:406
avcodec.h
ff_w97_32_c
int ff_w97_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:838
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1886
mid_pred
#define mid_pred
Definition: mathops.h:115
ret
ret
Definition: filter_design.txt:187
SnowEncContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: snowenc.c:48
pred
static const float pred[4]
Definition: siprdata.h:259
search
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:152
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: snowenc.c:162
options
static const AVOption options[]
Definition: snowenc.c:2106
AVCodecInternal::recon_frame
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
Definition: internal.h:114
square
static int square(int x)
Definition: roqvideoenc.c:196
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
put_rac
#define put_rac(C, S, B)
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:150
me_cmp.h
encode_qlogs
static void encode_qlogs(SnowContext *s)
Definition: snowenc.c:1567
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:376
QpelDSPContext
quarterpel DSP context
Definition: qpeldsp.h:72
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:267
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
SnowEncContext::cur_pic
MPVPicture cur_pic
Definition: snowenc.c:64
SnowEncContext::last_pic
MPVPicture last_pic
Definition: snowenc.c:64
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:236
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
FF_CMP_DCT
#define FF_CMP_DCT
Definition: avcodec.h:880
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:84
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
get_rac_count
static int get_rac_count(RangeCoder *c)
Definition: rangecoder.h:79
AVCodecContext::mb_lmax
int mb_lmax
maximum MB Lagrange multiplier
Definition: avcodec.h:993
put_symbol2
static void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
Definition: snowenc.c:121
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
Plane
Definition: cfhd.h:125
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
BlockNode::level
uint8_t level
Definition: snow.h:60
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
same_block
static av_always_inline int same_block(BlockNode *a, BlockNode *b)
Definition: snow.h:212
mem.h
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:51
w
uint8_t w
Definition: llvidencdsp.c:39
ff_epzs_motion_search
int ff_epzs_motion_search(MPVEncContext *s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, const int16_t(*last_mv)[2], int ref_mv_scale, int size, int h)
Definition: motion_est_template.c:977
mcf
#define mcf(dx, dy)
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:248
ff_snow_frames_prepare
int ff_snow_frames_prepare(SnowContext *s)
Definition: snow.c:599
FF_CMP_DCT264
#define FF_CMP_DCT264
Definition: avcodec.h:891
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
SnowEncContext::emu_edge_buffer
uint8_t * emu_edge_buffer
Definition: snowenc.c:71
quantize
static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias)
Definition: snowenc.c:1436
SnowEncContext::memc_only
int memc_only
Definition: snowenc.c:55
dequantize
static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride)
Definition: snowenc.c:1497
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:276
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:75
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
null_block
static const BlockNode null_block
Definition: snow.h:63
MotionEstContext::scene_change_score
int scene_change_score
Definition: motion_est.h:86
MPVEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideoenc.h:135
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
h
h
Definition: vp9dsp_template.c:2070
RangeCoder
Definition: mss3.c:63
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:85
stride
#define stride
Definition: h264pred_template.c:536
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
snowenc_class
static const AVClass snowenc_class
Definition: snowenc.c:2128
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
SnowEncContext::pred
int pred
Definition: snowenc.c:54
P_TOP
#define P_TOP
Definition: snowenc.c:367
check_block_inter
static av_always_inline int check_block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1098
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:164
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:53
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:203
P_TOPRIGHT
#define P_TOPRIGHT
Definition: snowenc.c:368
MpegEncContext::width
int width
Definition: mpegvideo.h:84
src
#define src
Definition: vp8dsp.c:248
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:101
MPVEncContext::picture_number
int picture_number
Definition: mpegvideoenc.h:130
MotionEstContext::me_cmp
me_cmp_func me_cmp[6]
Definition: motion_est.h:89
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:96
P_MEDIAN
#define P_MEDIAN
Definition: snowenc.c:369
FF_ME_ZERO
#define FF_ME_ZERO
Definition: motion_est.h:42
SnowEncContext::lambda2
int lambda2
Definition: snowenc.c:51
FF_CMP_W97
#define FF_CMP_W97
Definition: avcodec.h:889
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:709
MotionEstContext::unrestricted_mv
int unrestricted_mv
mv can point outside of the coded picture
Definition: motion_est.h:72
intmath.h