FFmpeg
snowenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/emms.h"
22 #include "libavutil/intmath.h"
23 #include "libavutil/libm.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "avcodec.h"
29 #include "codec_internal.h"
30 #include "encode.h"
31 #include "internal.h" //For AVCodecInternal.recon_frame
32 #include "me_cmp.h"
33 #include "packet_internal.h"
34 #include "qpeldsp.h"
35 #include "snow_dwt.h"
36 #include "snow.h"
37 
38 #include "rangecoder.h"
39 #include "mathops.h"
40 
41 #include "mpegvideo.h"
42 #include "h263enc.h"
43 
44 #define FF_ME_ITER 3
45 
46 typedef struct SnowEncContext {
50 
51  int lambda;
52  int lambda2;
53  int pass1_rc;
54 
55  int pred;
56  int memc_only;
62 
64  MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
66 #define ME_CACHE_SIZE 1024
69 
72 
73 static void init_ref(MotionEstContext *c, const uint8_t *const src[3],
74  uint8_t *const ref[3], uint8_t *const ref2[3],
75  int x, int y, int ref_index)
76 {
77  SnowContext *s = c->avctx->priv_data;
78  const int offset[3] = {
79  y*c-> stride + x,
80  ((y*c->uvstride + x) >> s->chroma_h_shift),
81  ((y*c->uvstride + x) >> s->chroma_h_shift),
82  };
83  for (int i = 0; i < 3; i++) {
84  c->src[0][i] = src [i];
85  c->ref[0][i] = ref [i] + offset[i];
86  }
87  av_assert2(!ref_index);
88 }
89 
90 static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
91 {
92  if (v) {
93  const int a = FFABS(v);
94  const int e = av_log2(a);
95  const int el = FFMIN(e, 10);
96  int i;
97 
98  put_rac(c, state + 0, 0);
99 
100  for (i = 0; i < el; i++)
101  put_rac(c, state + 1 + i, 1); //1..10
102  for(; i < e; i++)
103  put_rac(c, state + 1 + 9, 1); //1..10
104  put_rac(c, state + 1 + FFMIN(i, 9), 0);
105 
106  for (i = e - 1; i >= el; i--)
107  put_rac(c, state + 22 + 9, (a >> i) & 1); //22..31
108  for(; i >= 0; i--)
109  put_rac(c, state + 22 + i, (a >> i) & 1); //22..31
110 
111  if (is_signed)
112  put_rac(c, state + 11 + el, v < 0); //11..21
113  } else {
114  put_rac(c, state + 0, 1);
115  }
116 }
117 
118 static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
119 {
120  int r = log2 >= 0 ? 1<<log2 : 1;
121 
122  av_assert2(v >= 0);
123  av_assert2(log2 >= -4);
124 
125  while (v >= r) {
126  put_rac(c, state + 4 + log2, 1);
127  v -= r;
128  log2++;
129  if (log2 > 0) r += r;
130  }
131  put_rac(c, state + 4 + log2, 0);
132 
133  for (int i = log2 - 1; i >= 0; i--)
134  put_rac(c, state + 31 - i, (v >> i) & 1);
135 }
136 
138 {
139  int ret;
140 
141  frame->width = s->avctx->width + 2 * EDGE_WIDTH;
142  frame->height = s->avctx->height + 2 * EDGE_WIDTH;
143 
144  ret = ff_encode_alloc_frame(s->avctx, frame);
145  if (ret < 0)
146  return ret;
147  for (int i = 0; frame->data[i]; i++) {
148  int offset = (EDGE_WIDTH >> (i ? s->chroma_v_shift : 0)) *
149  frame->linesize[i] +
150  (EDGE_WIDTH >> (i ? s->chroma_h_shift : 0));
151  frame->data[i] += offset;
152  }
153  frame->width = s->avctx->width;
154  frame->height = s->avctx->height;
155 
156  return 0;
157 }
158 
160 {
161  SnowEncContext *const enc = avctx->priv_data;
162  SnowContext *const s = &enc->com;
163  MpegEncContext *const mpv = &enc->m;
164  int plane_index, ret;
165  int i;
166 
167  if (enc->pred == DWT_97
168  && (avctx->flags & AV_CODEC_FLAG_QSCALE)
169  && avctx->global_quality == 0){
170  av_log(avctx, AV_LOG_ERROR, "The 9/7 wavelet is incompatible with lossless mode.\n");
171  return AVERROR(EINVAL);
172  }
173 
174  s->spatial_decomposition_type = enc->pred; //FIXME add decorrelator type r transform_type
175 
176  s->mv_scale = (avctx->flags & AV_CODEC_FLAG_QPEL) ? 2 : 4;
177  s->block_max_depth= (avctx->flags & AV_CODEC_FLAG_4MV ) ? 1 : 0;
178 
179  for(plane_index=0; plane_index<3; plane_index++){
180  s->plane[plane_index].diag_mc= 1;
181  s->plane[plane_index].htaps= 6;
182  s->plane[plane_index].hcoeff[0]= 40;
183  s->plane[plane_index].hcoeff[1]= -10;
184  s->plane[plane_index].hcoeff[2]= 2;
185  s->plane[plane_index].fast_mc= 1;
186  }
187 
188  // Must be before ff_snow_common_init()
189  ff_hpeldsp_init(&s->hdsp, avctx->flags);
190  if ((ret = ff_snow_common_init(avctx)) < 0) {
191  return ret;
192  }
193 
194 #define mcf(dx,dy)\
195  enc->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
196  enc->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
197  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
198  enc->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
199  enc->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
200  s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
201 
202  mcf( 0, 0)
203  mcf( 4, 0)
204  mcf( 8, 0)
205  mcf(12, 0)
206  mcf( 0, 4)
207  mcf( 4, 4)
208  mcf( 8, 4)
209  mcf(12, 4)
210  mcf( 0, 8)
211  mcf( 4, 8)
212  mcf( 8, 8)
213  mcf(12, 8)
214  mcf( 0,12)
215  mcf( 4,12)
216  mcf( 8,12)
217  mcf(12,12)
218 
219  ff_me_cmp_init(&enc->mecc, avctx);
220  ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx);
221 
223 
224  s->version=0;
225 
226  mpv->avctx = avctx;
227  mpv->bit_rate= avctx->bit_rate;
228  mpv->lmin = avctx->mb_lmin;
229  mpv->lmax = avctx->mb_lmax;
230  mpv->mb_num = (avctx->width * avctx->height + 255) / 256; // For ratecontrol
231 
232  mpv->me.temp =
233  mpv->me.scratchpad = av_calloc(avctx->width + 64, 2*16*2*sizeof(uint8_t));
234  mpv->sc.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
235  mpv->me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*mpv->me.map));
236  if (!mpv->me.scratchpad || !mpv->me.map || !mpv->sc.obmc_scratchpad)
237  return AVERROR(ENOMEM);
238  mpv->me.score_map = mpv->me.map + ME_MAP_SIZE;
239 
240  ff_h263_encode_init(mpv); //mv_penalty
241 
242  s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES);
243 
244  if(avctx->flags&AV_CODEC_FLAG_PASS1){
245  if(!avctx->stats_out)
246  avctx->stats_out = av_mallocz(256);
247 
248  if (!avctx->stats_out)
249  return AVERROR(ENOMEM);
250  }
251  if((avctx->flags&AV_CODEC_FLAG_PASS2) || !(avctx->flags&AV_CODEC_FLAG_QSCALE)){
252  ret = ff_rate_control_init(mpv);
253  if(ret < 0)
254  return ret;
255  }
257 
258  switch(avctx->pix_fmt){
259  case AV_PIX_FMT_YUV444P:
260 // case AV_PIX_FMT_YUV422P:
261  case AV_PIX_FMT_YUV420P:
262 // case AV_PIX_FMT_YUV411P:
263  case AV_PIX_FMT_YUV410P:
264  s->nb_planes = 3;
265  s->colorspace_type= 0;
266  break;
267  case AV_PIX_FMT_GRAY8:
268  s->nb_planes = 1;
269  s->colorspace_type = 1;
270  break;
271 /* case AV_PIX_FMT_RGB32:
272  s->colorspace= 1;
273  break;*/
274  }
275 
276  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift,
277  &s->chroma_v_shift);
278  if (ret)
279  return ret;
280 
281  ret = ff_set_cmp(&enc->mecc, enc->mecc.me_cmp, s->avctx->me_cmp);
282  ret |= ff_set_cmp(&enc->mecc, enc->mecc.me_sub_cmp, s->avctx->me_sub_cmp);
283  if (ret < 0)
284  return AVERROR(EINVAL);
285 
286  s->input_picture = av_frame_alloc();
287  if (!s->input_picture)
288  return AVERROR(ENOMEM);
289 
290  if ((ret = get_encode_buffer(s, s->input_picture)) < 0)
291  return ret;
292 
293  if (enc->motion_est == FF_ME_ITER) {
294  int size= s->b_width * s->b_height << 2*s->block_max_depth;
295  for(i=0; i<s->max_ref_frames; i++){
296  s->ref_mvs[i] = av_calloc(size, sizeof(*s->ref_mvs[i]));
297  s->ref_scores[i] = av_calloc(size, sizeof(*s->ref_scores[i]));
298  if (!s->ref_mvs[i] || !s->ref_scores[i])
299  return AVERROR(ENOMEM);
300  }
301  }
302 
303  return 0;
304 }
305 
306 //near copy & paste from dsputil, FIXME
307 static int pix_sum(const uint8_t * pix, int line_size, int w, int h)
308 {
309  int s, i, j;
310 
311  s = 0;
312  for (i = 0; i < h; i++) {
313  for (j = 0; j < w; j++) {
314  s += pix[0];
315  pix ++;
316  }
317  pix += line_size - w;
318  }
319  return s;
320 }
321 
322 //near copy & paste from dsputil, FIXME
323 static int pix_norm1(const uint8_t * pix, int line_size, int w)
324 {
325  int s, i, j;
326  const uint32_t *sq = ff_square_tab + 256;
327 
328  s = 0;
329  for (i = 0; i < w; i++) {
330  for (j = 0; j < w; j ++) {
331  s += sq[pix[0]];
332  pix ++;
333  }
334  pix += line_size - w;
335  }
336  return s;
337 }
338 
339 static inline int get_penalty_factor(int lambda, int lambda2, int type){
340  switch(type&0xFF){
341  default:
342  case FF_CMP_SAD:
343  return lambda>>FF_LAMBDA_SHIFT;
344  case FF_CMP_DCT:
345  return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
346  case FF_CMP_W53:
347  return (4*lambda)>>(FF_LAMBDA_SHIFT);
348  case FF_CMP_W97:
349  return (2*lambda)>>(FF_LAMBDA_SHIFT);
350  case FF_CMP_SATD:
351  case FF_CMP_DCT264:
352  return (2*lambda)>>FF_LAMBDA_SHIFT;
353  case FF_CMP_RD:
354  case FF_CMP_PSNR:
355  case FF_CMP_SSE:
356  case FF_CMP_NSSE:
357  return lambda2>>FF_LAMBDA_SHIFT;
358  case FF_CMP_BIT:
359  return 1;
360  }
361 }
362 
363 //FIXME copy&paste
364 #define P_LEFT P[1]
365 #define P_TOP P[2]
366 #define P_TOPRIGHT P[3]
367 #define P_MEDIAN P[4]
368 #define P_MV1 P[9]
369 #define FLAG_QPEL 1 //must be 1
370 
371 static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
372 {
373  SnowContext *const s = &enc->com;
374  MotionEstContext *const c = &enc->m.me;
375  uint8_t p_buffer[1024];
376  uint8_t i_buffer[1024];
377  uint8_t p_state[sizeof(s->block_state)];
378  uint8_t i_state[sizeof(s->block_state)];
379  RangeCoder pc, ic;
380  uint8_t *pbbak= s->c.bytestream;
381  uint8_t *pbbak_start= s->c.bytestream_start;
382  int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
383  const int w= s->b_width << s->block_max_depth;
384  const int h= s->b_height << s->block_max_depth;
385  const int rem_depth= s->block_max_depth - level;
386  const int index= (x + y*w) << rem_depth;
387  const int block_w= 1<<(LOG2_MB_SIZE - level);
388  int trx= (x+1)<<rem_depth;
389  int try= (y+1)<<rem_depth;
390  const BlockNode *left = x ? &s->block[index-1] : &null_block;
391  const BlockNode *top = y ? &s->block[index-w] : &null_block;
392  const BlockNode *right = trx<w ? &s->block[index+1] : &null_block;
393  const BlockNode *bottom= try<h ? &s->block[index+w] : &null_block;
394  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
395  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
396  int pl = left->color[0];
397  int pcb= left->color[1];
398  int pcr= left->color[2];
399  int pmx, pmy;
400  int mx=0, my=0;
401  int l,cr,cb;
402  const int stride= s->current_picture->linesize[0];
403  const int uvstride= s->current_picture->linesize[1];
404  const uint8_t *const current_data[3] = { s->input_picture->data[0] + (x + y* stride)*block_w,
405  s->input_picture->data[1] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift),
406  s->input_picture->data[2] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)};
407  int P[10][2];
408  int16_t last_mv[3][2];
409  int qpel= !!(s->avctx->flags & AV_CODEC_FLAG_QPEL); //unused
410  const int shift= 1+qpel;
411  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
412  int mx_context= av_log2(2*FFABS(left->mx - top->mx));
413  int my_context= av_log2(2*FFABS(left->my - top->my));
414  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
415  int ref, best_ref, ref_score, ref_mx, ref_my;
416 
417  av_assert0(sizeof(s->block_state) >= 256);
418  if(s->keyframe){
419  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
420  return 0;
421  }
422 
423 // clip predictors / edge ?
424 
425  P_LEFT[0]= left->mx;
426  P_LEFT[1]= left->my;
427  P_TOP [0]= top->mx;
428  P_TOP [1]= top->my;
429  P_TOPRIGHT[0]= tr->mx;
430  P_TOPRIGHT[1]= tr->my;
431 
432  last_mv[0][0]= s->block[index].mx;
433  last_mv[0][1]= s->block[index].my;
434  last_mv[1][0]= right->mx;
435  last_mv[1][1]= right->my;
436  last_mv[2][0]= bottom->mx;
437  last_mv[2][1]= bottom->my;
438 
439  enc->m.mb_stride = 2;
440  enc->m.mb_x =
441  enc->m.mb_y = 0;
442  c->skip= 0;
443 
444  av_assert1(c-> stride == stride);
445  av_assert1(c->uvstride == uvstride);
446 
447  c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
448  c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
449  c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
450  c->current_mv_penalty = c->mv_penalty[enc->m.f_code=1] + MAX_DMV;
451 
452  c->xmin = - x*block_w - 16+3;
453  c->ymin = - y*block_w - 16+3;
454  c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
455  c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
456 
457  if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
458  if(P_LEFT[1] > (c->ymax<<shift)) P_LEFT[1] = (c->ymax<<shift);
459  if(P_TOP[0] > (c->xmax<<shift)) P_TOP[0] = (c->xmax<<shift);
460  if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
461  if(P_TOPRIGHT[0] < (c->xmin * (1<<shift))) P_TOPRIGHT[0]= (c->xmin * (1<<shift));
462  if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); //due to pmx no clip
463  if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
464 
465  P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
466  P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
467 
468  if (!y) {
469  c->pred_x= P_LEFT[0];
470  c->pred_y= P_LEFT[1];
471  } else {
472  c->pred_x = P_MEDIAN[0];
473  c->pred_y = P_MEDIAN[1];
474  }
475 
476  score= INT_MAX;
477  best_ref= 0;
478  for(ref=0; ref<s->ref_frames; ref++){
479  init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
480 
481  ref_score= ff_epzs_motion_search(&enc->m, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
482  (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
483 
484  av_assert2(ref_mx >= c->xmin);
485  av_assert2(ref_mx <= c->xmax);
486  av_assert2(ref_my >= c->ymin);
487  av_assert2(ref_my <= c->ymax);
488 
489  ref_score= c->sub_motion_search(&enc->m, &ref_mx, &ref_my, ref_score, 0, 0, level-LOG2_MB_SIZE+4, block_w);
490  ref_score= ff_get_mb_score(&enc->m, ref_mx, ref_my, 0, 0, level-LOG2_MB_SIZE+4, block_w, 0);
491  ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
492  if(s->ref_mvs[ref]){
493  s->ref_mvs[ref][index][0]= ref_mx;
494  s->ref_mvs[ref][index][1]= ref_my;
495  s->ref_scores[ref][index]= ref_score;
496  }
497  if(score > ref_score){
498  score= ref_score;
499  best_ref= ref;
500  mx= ref_mx;
501  my= ref_my;
502  }
503  }
504  //FIXME if mb_cmp != SSE then intra cannot be compared currently and mb_penalty vs. lambda2
505 
506  // subpel search
507  base_bits= get_rac_count(&s->c) - 8*(s->c.bytestream - s->c.bytestream_start);
508  pc= s->c;
509  pc.bytestream_start=
510  pc.bytestream= p_buffer; //FIXME end/start? and at the other stoo
511  memcpy(p_state, s->block_state, sizeof(s->block_state));
512 
513  if(level!=s->block_max_depth)
514  put_rac(&pc, &p_state[4 + s_context], 1);
515  put_rac(&pc, &p_state[1 + left->type + top->type], 0);
516  if(s->ref_frames > 1)
517  put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
518  pred_mv(s, &pmx, &pmy, best_ref, left, top, tr);
519  put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
520  put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
521  p_len= pc.bytestream - pc.bytestream_start;
522  score += (enc->lambda2*(get_rac_count(&pc)-base_bits))>>FF_LAMBDA_SHIFT;
523 
524  block_s= block_w*block_w;
525  sum = pix_sum(current_data[0], stride, block_w, block_w);
526  l= (sum + block_s/2)/block_s;
527  iscore = pix_norm1(current_data[0], stride, block_w) - 2*l*sum + l*l*block_s;
528 
529  if (s->nb_planes > 2) {
530  block_s= block_w*block_w>>(s->chroma_h_shift + s->chroma_v_shift);
531  sum = pix_sum(current_data[1], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
532  cb= (sum + block_s/2)/block_s;
533  // iscore += pix_norm1(&current_mb[1][0], uvstride, block_w>>1) - 2*cb*sum + cb*cb*block_s;
534  sum = pix_sum(current_data[2], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
535  cr= (sum + block_s/2)/block_s;
536  // iscore += pix_norm1(&current_mb[2][0], uvstride, block_w>>1) - 2*cr*sum + cr*cr*block_s;
537  }else
538  cb = cr = 0;
539 
540  ic= s->c;
541  ic.bytestream_start=
542  ic.bytestream= i_buffer; //FIXME end/start? and at the other stoo
543  memcpy(i_state, s->block_state, sizeof(s->block_state));
544  if(level!=s->block_max_depth)
545  put_rac(&ic, &i_state[4 + s_context], 1);
546  put_rac(&ic, &i_state[1 + left->type + top->type], 1);
547  put_symbol(&ic, &i_state[32], l-pl , 1);
548  if (s->nb_planes > 2) {
549  put_symbol(&ic, &i_state[64], cb-pcb, 1);
550  put_symbol(&ic, &i_state[96], cr-pcr, 1);
551  }
552  i_len= ic.bytestream - ic.bytestream_start;
553  iscore += (enc->lambda2*(get_rac_count(&ic)-base_bits))>>FF_LAMBDA_SHIFT;
554 
555  av_assert1(iscore < 255*255*256 + enc->lambda2*10);
556  av_assert1(iscore >= 0);
557  av_assert1(l>=0 && l<=255);
558  av_assert1(pl>=0 && pl<=255);
559 
560  if(level==0){
561  int varc= iscore >> 8;
562  int vard= score >> 8;
563  if (vard <= 64 || vard < varc)
564  c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
565  else
566  c->scene_change_score += enc->m.qscale;
567  }
568 
569  if(level!=s->block_max_depth){
570  put_rac(&s->c, &s->block_state[4 + s_context], 0);
571  score2 = encode_q_branch(enc, level+1, 2*x+0, 2*y+0);
572  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+0);
573  score2+= encode_q_branch(enc, level+1, 2*x+0, 2*y+1);
574  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+1);
575  score2+= enc->lambda2>>FF_LAMBDA_SHIFT; //FIXME exact split overhead
576 
577  if(score2 < score && score2 < iscore)
578  return score2;
579  }
580 
581  if(iscore < score){
582  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
583  memcpy(pbbak, i_buffer, i_len);
584  s->c= ic;
585  s->c.bytestream_start= pbbak_start;
586  s->c.bytestream= pbbak + i_len;
587  set_blocks(s, level, x, y, l, cb, cr, pmx, pmy, 0, BLOCK_INTRA);
588  memcpy(s->block_state, i_state, sizeof(s->block_state));
589  return iscore;
590  }else{
591  memcpy(pbbak, p_buffer, p_len);
592  s->c= pc;
593  s->c.bytestream_start= pbbak_start;
594  s->c.bytestream= pbbak + p_len;
595  set_blocks(s, level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
596  memcpy(s->block_state, p_state, sizeof(s->block_state));
597  return score;
598  }
599 }
600 
601 static void encode_q_branch2(SnowContext *s, int level, int x, int y){
602  const int w= s->b_width << s->block_max_depth;
603  const int rem_depth= s->block_max_depth - level;
604  const int index= (x + y*w) << rem_depth;
605  int trx= (x+1)<<rem_depth;
606  BlockNode *b= &s->block[index];
607  const BlockNode *left = x ? &s->block[index-1] : &null_block;
608  const BlockNode *top = y ? &s->block[index-w] : &null_block;
609  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
610  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
611  int pl = left->color[0];
612  int pcb= left->color[1];
613  int pcr= left->color[2];
614  int pmx, pmy;
615  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
616  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
617  int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
618  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
619 
620  if(s->keyframe){
621  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
622  return;
623  }
624 
625  if(level!=s->block_max_depth){
626  if(same_block(b,b+1) && same_block(b,b+w) && same_block(b,b+w+1)){
627  put_rac(&s->c, &s->block_state[4 + s_context], 1);
628  }else{
629  put_rac(&s->c, &s->block_state[4 + s_context], 0);
630  encode_q_branch2(s, level+1, 2*x+0, 2*y+0);
631  encode_q_branch2(s, level+1, 2*x+1, 2*y+0);
632  encode_q_branch2(s, level+1, 2*x+0, 2*y+1);
633  encode_q_branch2(s, level+1, 2*x+1, 2*y+1);
634  return;
635  }
636  }
637  if(b->type & BLOCK_INTRA){
638  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
639  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 1);
640  put_symbol(&s->c, &s->block_state[32], b->color[0]-pl , 1);
641  if (s->nb_planes > 2) {
642  put_symbol(&s->c, &s->block_state[64], b->color[1]-pcb, 1);
643  put_symbol(&s->c, &s->block_state[96], b->color[2]-pcr, 1);
644  }
645  set_blocks(s, level, x, y, b->color[0], b->color[1], b->color[2], pmx, pmy, 0, BLOCK_INTRA);
646  }else{
647  pred_mv(s, &pmx, &pmy, b->ref, left, top, tr);
648  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 0);
649  if(s->ref_frames > 1)
650  put_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], b->ref, 0);
651  put_symbol(&s->c, &s->block_state[128 + 32*mx_context], b->mx - pmx, 1);
652  put_symbol(&s->c, &s->block_state[128 + 32*my_context], b->my - pmy, 1);
653  set_blocks(s, level, x, y, pl, pcb, pcr, b->mx, b->my, b->ref, 0);
654  }
655 }
656 
657 static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
658 {
659  SnowContext *const s = &enc->com;
660  int i, x2, y2;
661  Plane *p= &s->plane[plane_index];
662  const int block_size = MB_SIZE >> s->block_max_depth;
663  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
664  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
665  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
666  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
667  const int ref_stride= s->current_picture->linesize[plane_index];
668  const uint8_t *src = s->input_picture->data[plane_index];
669  IDWTELEM *dst= (IDWTELEM*)enc->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
670  const int b_stride = s->b_width << s->block_max_depth;
671  const int w= p->width;
672  const int h= p->height;
673  int index= mb_x + mb_y*b_stride;
674  BlockNode *b= &s->block[index];
675  BlockNode backup= *b;
676  int ab=0;
677  int aa=0;
678 
679  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc stuff above
680 
681  b->type|= BLOCK_INTRA;
682  b->color[plane_index]= 0;
683  memset(dst, 0, obmc_stride*obmc_stride*sizeof(IDWTELEM));
684 
685  for(i=0; i<4; i++){
686  int mb_x2= mb_x + (i &1) - 1;
687  int mb_y2= mb_y + (i>>1) - 1;
688  int x= block_w*mb_x2 + block_w/2;
689  int y= block_h*mb_y2 + block_h/2;
690 
691  add_yblock(s, 0, NULL, dst + (i&1)*block_w + (i>>1)*obmc_stride*block_h, NULL, obmc,
692  x, y, block_w, block_h, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
693 
694  for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_h); y2++){
695  for(x2= FFMAX(x, 0); x2<FFMIN(w, x+block_w); x2++){
696  int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
697  int obmc_v= obmc[index];
698  int d;
699  if(y<0) obmc_v += obmc[index + block_h*obmc_stride];
700  if(x<0) obmc_v += obmc[index + block_w];
701  if(y+block_h>h) obmc_v += obmc[index - block_h*obmc_stride];
702  if(x+block_w>w) obmc_v += obmc[index - block_w];
703  //FIXME precalculate this or simplify it somehow else
704 
705  d = -dst[index] + (1<<(FRAC_BITS-1));
706  dst[index] = d;
707  ab += (src[x2 + y2*ref_stride] - (d>>FRAC_BITS)) * obmc_v;
708  aa += obmc_v * obmc_v; //FIXME precalculate this
709  }
710  }
711  }
712  *b= backup;
713 
714  return av_clip_uint8( ROUNDED_DIV(ab<<LOG2_OBMC_MAX, aa) ); //FIXME we should not need clipping
715 }
716 
717 static inline int get_block_bits(SnowContext *s, int x, int y, int w){
718  const int b_stride = s->b_width << s->block_max_depth;
719  const int b_height = s->b_height<< s->block_max_depth;
720  int index= x + y*b_stride;
721  const BlockNode *b = &s->block[index];
722  const BlockNode *left = x ? &s->block[index-1] : &null_block;
723  const BlockNode *top = y ? &s->block[index-b_stride] : &null_block;
724  const BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
725  const BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
726  int dmx, dmy;
727 // int mx_context= av_log2(2*FFABS(left->mx - top->mx));
728 // int my_context= av_log2(2*FFABS(left->my - top->my));
729 
730  if(x<0 || x>=b_stride || y>=b_height)
731  return 0;
732 /*
733 1 0 0
734 01X 1-2 1
735 001XX 3-6 2-3
736 0001XXX 7-14 4-7
737 00001XXXX 15-30 8-15
738 */
739 //FIXME try accurate rate
740 //FIXME intra and inter predictors if surrounding blocks are not the same type
741  if(b->type & BLOCK_INTRA){
742  return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
743  + av_log2(2*FFABS(left->color[1] - b->color[1]))
744  + av_log2(2*FFABS(left->color[2] - b->color[2])));
745  }else{
746  pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
747  dmx-= b->mx;
748  dmy-= b->my;
749  return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
750  + av_log2(2*FFABS(dmy))
751  + av_log2(2*b->ref));
752  }
753 }
754 
755 static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
756  int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2])
757 {
758  SnowContext *const s = &enc->com;
759  Plane *p= &s->plane[plane_index];
760  const int block_size = MB_SIZE >> s->block_max_depth;
761  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
762  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
763  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
764  const int ref_stride= s->current_picture->linesize[plane_index];
765  uint8_t *dst= s->current_picture->data[plane_index];
766  const uint8_t *src = s->input_picture->data[plane_index];
767  IDWTELEM *pred= (IDWTELEM*)enc->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4;
768  uint8_t *cur = s->scratchbuf;
769  uint8_t *tmp = s->emu_edge_buffer;
770  const int b_stride = s->b_width << s->block_max_depth;
771  const int b_height = s->b_height<< s->block_max_depth;
772  const int w= p->width;
773  const int h= p->height;
774  int distortion;
775  int rate= 0;
776  const int penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
777  int sx= block_w*mb_x - block_w/2;
778  int sy= block_h*mb_y - block_h/2;
779  int x0= FFMAX(0,-sx);
780  int y0= FFMAX(0,-sy);
781  int x1= FFMIN(block_w*2, w-sx);
782  int y1= FFMIN(block_h*2, h-sy);
783  int i,x,y;
784 
785  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below chckinhg only block_w
786 
787  ff_snow_pred_block(s, cur, tmp, ref_stride, sx, sy, block_w*2, block_h*2, &s->block[mb_x + mb_y*b_stride], plane_index, w, h);
788 
789  for(y=y0; y<y1; y++){
790  const uint8_t *obmc1= obmc_edged[y];
791  const IDWTELEM *pred1 = pred + y*obmc_stride;
792  uint8_t *cur1 = cur + y*ref_stride;
793  uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
794  for(x=x0; x<x1; x++){
795 #if FRAC_BITS >= LOG2_OBMC_MAX
796  int v = (cur1[x] * obmc1[x]) << (FRAC_BITS - LOG2_OBMC_MAX);
797 #else
798  int v = (cur1[x] * obmc1[x] + (1<<(LOG2_OBMC_MAX - FRAC_BITS-1))) >> (LOG2_OBMC_MAX - FRAC_BITS);
799 #endif
800  v = (v + pred1[x]) >> FRAC_BITS;
801  if(v&(~255)) v= ~(v>>31);
802  dst1[x] = v;
803  }
804  }
805 
806  /* copy the regions where obmc[] = (uint8_t)256 */
807  if(LOG2_OBMC_MAX == 8
808  && (mb_x == 0 || mb_x == b_stride-1)
809  && (mb_y == 0 || mb_y == b_height-1)){
810  if(mb_x == 0)
811  x1 = block_w;
812  else
813  x0 = block_w;
814  if(mb_y == 0)
815  y1 = block_h;
816  else
817  y0 = block_h;
818  for(y=y0; y<y1; y++)
819  memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
820  }
821 
822  if(block_w==16){
823  /* FIXME rearrange dsputil to fit 32x32 cmp functions */
824  /* FIXME check alignment of the cmp wavelet vs the encoding wavelet */
825  /* FIXME cmps overlap but do not cover the wavelet's whole support.
826  * So improving the score of one block is not strictly guaranteed
827  * to improve the score of the whole frame, thus iterative motion
828  * estimation does not always converge. */
829  if(s->avctx->me_cmp == FF_CMP_W97)
830  distortion = ff_w97_32_c(&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
831  else if(s->avctx->me_cmp == FF_CMP_W53)
832  distortion = ff_w53_32_c(&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
833  else{
834  distortion = 0;
835  for(i=0; i<4; i++){
836  int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
837  distortion += enc->mecc.me_cmp[0](&enc->m, src + off, dst + off, ref_stride, 16);
838  }
839  }
840  }else{
841  av_assert2(block_w==8);
842  distortion = enc->mecc.me_cmp[0](&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
843  }
844 
845  if(plane_index==0){
846  for(i=0; i<4; i++){
847 /* ..RRr
848  * .RXx.
849  * rxx..
850  */
851  rate += get_block_bits(s, mb_x + (i&1) - (i>>1), mb_y + (i>>1), 1);
852  }
853  if(mb_x == b_stride-2)
854  rate += get_block_bits(s, mb_x + 1, mb_y + 1, 1);
855  }
856  return distortion + rate*penalty_factor;
857 }
858 
859 static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
860 {
861  SnowContext *const s = &enc->com;
862  int i, y2;
863  Plane *p= &s->plane[plane_index];
864  const int block_size = MB_SIZE >> s->block_max_depth;
865  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
866  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
867  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
868  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
869  const int ref_stride= s->current_picture->linesize[plane_index];
870  uint8_t *dst= s->current_picture->data[plane_index];
871  const uint8_t *src = s->input_picture->data[plane_index];
872  //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst
873  // const has only been removed from zero_dst to suppress a warning
874  static IDWTELEM zero_dst[4096]; //FIXME
875  const int b_stride = s->b_width << s->block_max_depth;
876  const int w= p->width;
877  const int h= p->height;
878  int distortion= 0;
879  int rate= 0;
880  const int penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
881 
882  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below
883 
884  for(i=0; i<9; i++){
885  int mb_x2= mb_x + (i%3) - 1;
886  int mb_y2= mb_y + (i/3) - 1;
887  int x= block_w*mb_x2 + block_w/2;
888  int y= block_h*mb_y2 + block_h/2;
889 
890  add_yblock(s, 0, NULL, zero_dst, dst, obmc,
891  x, y, block_w, block_h, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
892 
893  //FIXME find a cleaner/simpler way to skip the outside stuff
894  for(y2= y; y2<0; y2++)
895  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
896  for(y2= h; y2<y+block_h; y2++)
897  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
898  if(x<0){
899  for(y2= y; y2<y+block_h; y2++)
900  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, -x);
901  }
902  if(x+block_w > w){
903  for(y2= y; y2<y+block_h; y2++)
904  memcpy(dst + w + y2*ref_stride, src + w + y2*ref_stride, x+block_w - w);
905  }
906 
907  av_assert1(block_w== 8 || block_w==16);
908  distortion += enc->mecc.me_cmp[block_w==8](&enc->m, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
909  }
910 
911  if(plane_index==0){
912  BlockNode *b= &s->block[mb_x+mb_y*b_stride];
913  int merged= same_block(b,b+1) && same_block(b,b+b_stride) && same_block(b,b+b_stride+1);
914 
915 /* ..RRRr
916  * .RXXx.
917  * .RXXx.
918  * rxxx.
919  */
920  if(merged)
921  rate = get_block_bits(s, mb_x, mb_y, 2);
922  for(i=merged?4:0; i<9; i++){
923  static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
924  rate += get_block_bits(s, mb_x + dxy[i][0], mb_y + dxy[i][1], 1);
925  }
926  }
927  return distortion + rate*penalty_factor;
928 }
929 
930 static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
931  const int w= b->width;
932  const int h= b->height;
933  int x, y;
934 
935  if(1){
936  int run=0;
937  int *runs = s->run_buffer;
938  int run_index=0;
939  int max_index;
940 
941  for(y=0; y<h; y++){
942  for(x=0; x<w; x++){
943  int v, p=0;
944  int /*ll=0, */l=0, lt=0, t=0, rt=0;
945  v= src[x + y*stride];
946 
947  if(y){
948  t= src[x + (y-1)*stride];
949  if(x){
950  lt= src[x - 1 + (y-1)*stride];
951  }
952  if(x + 1 < w){
953  rt= src[x + 1 + (y-1)*stride];
954  }
955  }
956  if(x){
957  l= src[x - 1 + y*stride];
958  /*if(x > 1){
959  if(orientation==1) ll= src[y + (x-2)*stride];
960  else ll= src[x - 2 + y*stride];
961  }*/
962  }
963  if(parent){
964  int px= x>>1;
965  int py= y>>1;
966  if(px<b->parent->width && py<b->parent->height)
967  p= parent[px + py*2*stride];
968  }
969  if(!(/*ll|*/l|lt|t|rt|p)){
970  if(v){
971  runs[run_index++]= run;
972  run=0;
973  }else{
974  run++;
975  }
976  }
977  }
978  }
979  max_index= run_index;
980  runs[run_index++]= run;
981  run_index=0;
982  run= runs[run_index++];
983 
984  put_symbol2(&s->c, b->state[30], max_index, 0);
985  if(run_index <= max_index)
986  put_symbol2(&s->c, b->state[1], run, 3);
987 
988  for(y=0; y<h; y++){
989  if(s->c.bytestream_end - s->c.bytestream < w*40){
990  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
991  return AVERROR(ENOMEM);
992  }
993  for(x=0; x<w; x++){
994  int v, p=0;
995  int /*ll=0, */l=0, lt=0, t=0, rt=0;
996  v= src[x + y*stride];
997 
998  if(y){
999  t= src[x + (y-1)*stride];
1000  if(x){
1001  lt= src[x - 1 + (y-1)*stride];
1002  }
1003  if(x + 1 < w){
1004  rt= src[x + 1 + (y-1)*stride];
1005  }
1006  }
1007  if(x){
1008  l= src[x - 1 + y*stride];
1009  /*if(x > 1){
1010  if(orientation==1) ll= src[y + (x-2)*stride];
1011  else ll= src[x - 2 + y*stride];
1012  }*/
1013  }
1014  if(parent){
1015  int px= x>>1;
1016  int py= y>>1;
1017  if(px<b->parent->width && py<b->parent->height)
1018  p= parent[px + py*2*stride];
1019  }
1020  if(/*ll|*/l|lt|t|rt|p){
1021  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1022 
1023  put_rac(&s->c, &b->state[0][context], !!v);
1024  }else{
1025  if(!run){
1026  run= runs[run_index++];
1027 
1028  if(run_index <= max_index)
1029  put_symbol2(&s->c, b->state[1], run, 3);
1030  av_assert2(v);
1031  }else{
1032  run--;
1033  av_assert2(!v);
1034  }
1035  }
1036  if(v){
1037  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1038  int l2= 2*FFABS(l) + (l<0);
1039  int t2= 2*FFABS(t) + (t<0);
1040 
1041  put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
1042  put_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l2&0xFF] + 3*ff_quant3bA[t2&0xFF]], v<0);
1043  }
1044  }
1045  }
1046  }
1047  return 0;
1048 }
1049 
1050 static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
1051 // encode_subband_qtree(s, b, src, parent, stride, orientation);
1052 // encode_subband_z0run(s, b, src, parent, stride, orientation);
1053  return encode_subband_c0run(s, b, src, parent, stride, orientation);
1054 // encode_subband_dzr(s, b, src, parent, stride, orientation);
1055 }
1056 
1057 static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3],
1058  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1059 {
1060  SnowContext *const s = &enc->com;
1061  const int b_stride= s->b_width << s->block_max_depth;
1062  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1063  BlockNode backup= *block;
1064  int rd;
1065 
1066  av_assert2(mb_x>=0 && mb_y>=0);
1067  av_assert2(mb_x<b_stride);
1068 
1069  block->color[0] = p[0];
1070  block->color[1] = p[1];
1071  block->color[2] = p[2];
1072  block->type |= BLOCK_INTRA;
1073 
1074  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged) + enc->intra_penalty;
1075 
1076 //FIXME chroma
1077  if(rd < *best_rd){
1078  *best_rd= rd;
1079  return 1;
1080  }else{
1081  *block= backup;
1082  return 0;
1083  }
1084 }
1085 
1086 /* special case for int[2] args we discard afterwards,
1087  * fixes compilation problem with gcc 2.95 */
1089  int mb_x, int mb_y, int p0, int p1,
1090  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1091 {
1092  SnowContext *const s = &enc->com;
1093  const int b_stride = s->b_width << s->block_max_depth;
1094  BlockNode *block = &s->block[mb_x + mb_y * b_stride];
1095  BlockNode backup = *block;
1096  unsigned value;
1097  int rd, index;
1098 
1099  av_assert2(mb_x >= 0 && mb_y >= 0);
1100  av_assert2(mb_x < b_stride);
1101 
1102  index = (p0 + 31 * p1) & (ME_CACHE_SIZE-1);
1103  value = enc->me_cache_generation + (p0 >> 10) + p1 * (1 << 6) + (block->ref << 12);
1104  if (enc->me_cache[index] == value)
1105  return 0;
1106  enc->me_cache[index] = value;
1107 
1108  block->mx = p0;
1109  block->my = p1;
1110  block->type &= ~BLOCK_INTRA;
1111 
1112  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged);
1113 
1114 //FIXME chroma
1115  if (rd < *best_rd) {
1116  *best_rd = rd;
1117  return 1;
1118  } else {
1119  *block = backup;
1120  return 0;
1121  }
1122 }
1123 
1124 static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y,
1125  int p0, int p1, int ref, int *best_rd)
1126 {
1127  SnowContext *const s = &enc->com;
1128  const int b_stride= s->b_width << s->block_max_depth;
1129  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1130  BlockNode backup[4];
1131  unsigned value;
1132  int rd, index;
1133 
1134  /* We don't initialize backup[] during variable declaration, because
1135  * that fails to compile on MSVC: "cannot convert from 'BlockNode' to
1136  * 'int16_t'". */
1137  backup[0] = block[0];
1138  backup[1] = block[1];
1139  backup[2] = block[b_stride];
1140  backup[3] = block[b_stride + 1];
1141 
1142  av_assert2(mb_x>=0 && mb_y>=0);
1143  av_assert2(mb_x<b_stride);
1144  av_assert2(((mb_x|mb_y)&1) == 0);
1145 
1146  index= (p0 + 31*p1) & (ME_CACHE_SIZE-1);
1147  value = enc->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12);
1148  if (enc->me_cache[index] == value)
1149  return 0;
1150  enc->me_cache[index] = value;
1151 
1152  block->mx= p0;
1153  block->my= p1;
1154  block->ref= ref;
1155  block->type &= ~BLOCK_INTRA;
1156  block[1]= block[b_stride]= block[b_stride+1]= *block;
1157 
1158  rd = get_4block_rd(enc, mb_x, mb_y, 0);
1159 
1160 //FIXME chroma
1161  if(rd < *best_rd){
1162  *best_rd= rd;
1163  return 1;
1164  }else{
1165  block[0]= backup[0];
1166  block[1]= backup[1];
1167  block[b_stride]= backup[2];
1168  block[b_stride+1]= backup[3];
1169  return 0;
1170  }
1171 }
1172 
1173 static void iterative_me(SnowEncContext *enc)
1174 {
1175  SnowContext *const s = &enc->com;
1176  int pass, mb_x, mb_y;
1177  const int b_width = s->b_width << s->block_max_depth;
1178  const int b_height= s->b_height << s->block_max_depth;
1179  const int b_stride= b_width;
1180  int color[3];
1181 
1182  {
1183  RangeCoder r = s->c;
1184  uint8_t state[sizeof(s->block_state)];
1185  memcpy(state, s->block_state, sizeof(s->block_state));
1186  for(mb_y= 0; mb_y<s->b_height; mb_y++)
1187  for(mb_x= 0; mb_x<s->b_width; mb_x++)
1188  encode_q_branch(enc, 0, mb_x, mb_y);
1189  s->c = r;
1190  memcpy(s->block_state, state, sizeof(s->block_state));
1191  }
1192 
1193  for(pass=0; pass<25; pass++){
1194  int change= 0;
1195 
1196  for(mb_y= 0; mb_y<b_height; mb_y++){
1197  for(mb_x= 0; mb_x<b_width; mb_x++){
1198  int dia_change, i, j, ref;
1199  int best_rd= INT_MAX, ref_rd;
1200  BlockNode backup, ref_b;
1201  const int index= mb_x + mb_y * b_stride;
1202  BlockNode *block= &s->block[index];
1203  BlockNode *tb = mb_y ? &s->block[index-b_stride ] : NULL;
1204  BlockNode *lb = mb_x ? &s->block[index -1] : NULL;
1205  BlockNode *rb = mb_x+1<b_width ? &s->block[index +1] : NULL;
1206  BlockNode *bb = mb_y+1<b_height ? &s->block[index+b_stride ] : NULL;
1207  BlockNode *tlb= mb_x && mb_y ? &s->block[index-b_stride-1] : NULL;
1208  BlockNode *trb= mb_x+1<b_width && mb_y ? &s->block[index-b_stride+1] : NULL;
1209  BlockNode *blb= mb_x && mb_y+1<b_height ? &s->block[index+b_stride-1] : NULL;
1210  BlockNode *brb= mb_x+1<b_width && mb_y+1<b_height ? &s->block[index+b_stride+1] : NULL;
1211  const int b_w= (MB_SIZE >> s->block_max_depth);
1212  uint8_t obmc_edged[MB_SIZE * 2][MB_SIZE * 2];
1213 
1214  if(pass && (block->type & BLOCK_OPT))
1215  continue;
1216  block->type |= BLOCK_OPT;
1217 
1218  backup= *block;
1219 
1220  if (!enc->me_cache_generation)
1221  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1222  enc->me_cache_generation += 1<<22;
1223 
1224  //FIXME precalculate
1225  {
1226  int x, y;
1227  for (y = 0; y < b_w * 2; y++)
1228  memcpy(obmc_edged[y], ff_obmc_tab[s->block_max_depth] + y * b_w * 2, b_w * 2);
1229  if(mb_x==0)
1230  for(y=0; y<b_w*2; y++)
1231  memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
1232  if(mb_x==b_stride-1)
1233  for(y=0; y<b_w*2; y++)
1234  memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
1235  if(mb_y==0){
1236  for(x=0; x<b_w*2; x++)
1237  obmc_edged[0][x] += obmc_edged[b_w-1][x];
1238  for(y=1; y<b_w; y++)
1239  memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
1240  }
1241  if(mb_y==b_height-1){
1242  for(x=0; x<b_w*2; x++)
1243  obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
1244  for(y=b_w; y<b_w*2-1; y++)
1245  memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
1246  }
1247  }
1248 
1249  //skip stuff outside the picture
1250  if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
1251  const uint8_t *src = s->input_picture->data[0];
1252  uint8_t *dst= s->current_picture->data[0];
1253  const int stride= s->current_picture->linesize[0];
1254  const int block_w= MB_SIZE >> s->block_max_depth;
1255  const int block_h= MB_SIZE >> s->block_max_depth;
1256  const int sx= block_w*mb_x - block_w/2;
1257  const int sy= block_h*mb_y - block_h/2;
1258  const int w= s->plane[0].width;
1259  const int h= s->plane[0].height;
1260  int y;
1261 
1262  for(y=sy; y<0; y++)
1263  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1264  for(y=h; y<sy+block_h*2; y++)
1265  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1266  if(sx<0){
1267  for(y=sy; y<sy+block_h*2; y++)
1268  memcpy(dst + sx + y*stride, src + sx + y*stride, -sx);
1269  }
1270  if(sx+block_w*2 > w){
1271  for(y=sy; y<sy+block_h*2; y++)
1272  memcpy(dst + w + y*stride, src + w + y*stride, sx+block_w*2 - w);
1273  }
1274  }
1275 
1276  // intra(black) = neighbors' contribution to the current block
1277  for(i=0; i < s->nb_planes; i++)
1278  color[i]= get_dc(enc, mb_x, mb_y, i);
1279 
1280  // get previous score (cannot be cached due to OBMC)
1281  if(pass > 0 && (block->type&BLOCK_INTRA)){
1282  int color0[3]= {block->color[0], block->color[1], block->color[2]};
1283  check_block_intra(enc, mb_x, mb_y, color0, obmc_edged, &best_rd);
1284  }else
1285  check_block_inter(enc, mb_x, mb_y, block->mx, block->my, obmc_edged, &best_rd);
1286 
1287  ref_b= *block;
1288  ref_rd= best_rd;
1289  for(ref=0; ref < s->ref_frames; ref++){
1290  int16_t (*mvr)[2]= &s->ref_mvs[ref][index];
1291  if(s->ref_scores[ref][index] > s->ref_scores[ref_b.ref][index]*3/2) //FIXME tune threshold
1292  continue;
1293  block->ref= ref;
1294  best_rd= INT_MAX;
1295 
1296  check_block_inter(enc, mb_x, mb_y, mvr[0][0], mvr[0][1], obmc_edged, &best_rd);
1297  check_block_inter(enc, mb_x, mb_y, 0, 0, obmc_edged, &best_rd);
1298  if(tb)
1299  check_block_inter(enc, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
1300  if(lb)
1301  check_block_inter(enc, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
1302  if(rb)
1303  check_block_inter(enc, mb_x, mb_y, mvr[1][0], mvr[1][1], obmc_edged, &best_rd);
1304  if(bb)
1305  check_block_inter(enc, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
1306 
1307  /* fullpel ME */
1308  //FIXME avoid subpel interpolation / round to nearest integer
1309  do{
1310  int newx = block->mx;
1311  int newy = block->my;
1312  int dia_size = enc->iterative_dia_size ? enc->iterative_dia_size : FFMAX(s->avctx->dia_size, 1);
1313  dia_change=0;
1314  for(i=0; i < dia_size; i++){
1315  for(j=0; j<i; j++){
1316  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+4*(i-j), newy+(4*j), obmc_edged, &best_rd);
1317  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-4*(i-j), newy-(4*j), obmc_edged, &best_rd);
1318  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-(4*j), newy+4*(i-j), obmc_edged, &best_rd);
1319  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+(4*j), newy-4*(i-j), obmc_edged, &best_rd);
1320  }
1321  }
1322  }while(dia_change);
1323  /* subpel ME */
1324  do{
1325  static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
1326  dia_change=0;
1327  for(i=0; i<8; i++)
1328  dia_change |= check_block_inter(enc, mb_x, mb_y, block->mx+square[i][0], block->my+square[i][1], obmc_edged, &best_rd);
1329  }while(dia_change);
1330  //FIXME or try the standard 2 pass qpel or similar
1331 
1332  mvr[0][0]= block->mx;
1333  mvr[0][1]= block->my;
1334  if(ref_rd > best_rd){
1335  ref_rd= best_rd;
1336  ref_b= *block;
1337  }
1338  }
1339  best_rd= ref_rd;
1340  *block= ref_b;
1341  check_block_intra(enc, mb_x, mb_y, color, obmc_edged, &best_rd);
1342  //FIXME RD style color selection
1343  if(!same_block(block, &backup)){
1344  if(tb ) tb ->type &= ~BLOCK_OPT;
1345  if(lb ) lb ->type &= ~BLOCK_OPT;
1346  if(rb ) rb ->type &= ~BLOCK_OPT;
1347  if(bb ) bb ->type &= ~BLOCK_OPT;
1348  if(tlb) tlb->type &= ~BLOCK_OPT;
1349  if(trb) trb->type &= ~BLOCK_OPT;
1350  if(blb) blb->type &= ~BLOCK_OPT;
1351  if(brb) brb->type &= ~BLOCK_OPT;
1352  change ++;
1353  }
1354  }
1355  }
1356  av_log(s->avctx, AV_LOG_DEBUG, "pass:%d changed:%d\n", pass, change);
1357  if(!change)
1358  break;
1359  }
1360 
1361  if(s->block_max_depth == 1){
1362  int change= 0;
1363  for(mb_y= 0; mb_y<b_height; mb_y+=2){
1364  for(mb_x= 0; mb_x<b_width; mb_x+=2){
1365  int i;
1366  int best_rd, init_rd;
1367  const int index= mb_x + mb_y * b_stride;
1368  BlockNode *b[4];
1369 
1370  b[0]= &s->block[index];
1371  b[1]= b[0]+1;
1372  b[2]= b[0]+b_stride;
1373  b[3]= b[2]+1;
1374  if(same_block(b[0], b[1]) &&
1375  same_block(b[0], b[2]) &&
1376  same_block(b[0], b[3]))
1377  continue;
1378 
1379  if (!enc->me_cache_generation)
1380  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1381  enc->me_cache_generation += 1<<22;
1382 
1383  init_rd = best_rd = get_4block_rd(enc, mb_x, mb_y, 0);
1384 
1385  //FIXME more multiref search?
1386  check_4block_inter(enc, mb_x, mb_y,
1387  (b[0]->mx + b[1]->mx + b[2]->mx + b[3]->mx + 2) >> 2,
1388  (b[0]->my + b[1]->my + b[2]->my + b[3]->my + 2) >> 2, 0, &best_rd);
1389 
1390  for(i=0; i<4; i++)
1391  if(!(b[i]->type&BLOCK_INTRA))
1392  check_4block_inter(enc, mb_x, mb_y, b[i]->mx, b[i]->my, b[i]->ref, &best_rd);
1393 
1394  if(init_rd != best_rd)
1395  change++;
1396  }
1397  }
1398  av_log(s->avctx, AV_LOG_ERROR, "pass:4mv changed:%d\n", change*4);
1399  }
1400 }
1401 
1402 static void encode_blocks(SnowEncContext *enc, int search)
1403 {
1404  SnowContext *const s = &enc->com;
1405  int x, y;
1406  int w= s->b_width;
1407  int h= s->b_height;
1408 
1409  if (enc->motion_est == FF_ME_ITER && !s->keyframe && search)
1410  iterative_me(enc);
1411 
1412  for(y=0; y<h; y++){
1413  if(s->c.bytestream_end - s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
1414  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1415  return;
1416  }
1417  for(x=0; x<w; x++){
1418  if (enc->motion_est == FF_ME_ITER || !search)
1419  encode_q_branch2(s, 0, x, y);
1420  else
1421  encode_q_branch (enc, 0, x, y);
1422  }
1423  }
1424 }
1425 
1426 static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias){
1427  const int w= b->width;
1428  const int h= b->height;
1429  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1430  const int qmul= ff_qexp[qlog&(QROOT-1)]<<((qlog>>QSHIFT) + ENCODER_EXTRA_BITS);
1431  int x,y, thres1, thres2;
1432 
1433  if(s->qlog == LOSSLESS_QLOG){
1434  for(y=0; y<h; y++)
1435  for(x=0; x<w; x++)
1436  dst[x + y*stride]= src[x + y*stride];
1437  return;
1438  }
1439 
1440  bias= bias ? 0 : (3*qmul)>>3;
1441  thres1= ((qmul - bias)>>QEXPSHIFT) - 1;
1442  thres2= 2*thres1;
1443 
1444  if(!bias){
1445  for(y=0; y<h; y++){
1446  for(x=0; x<w; x++){
1447  int i= src[x + y*stride];
1448 
1449  if((unsigned)(i+thres1) > thres2){
1450  if(i>=0){
1451  i<<= QEXPSHIFT;
1452  i/= qmul; //FIXME optimize
1453  dst[x + y*stride]= i;
1454  }else{
1455  i= -i;
1456  i<<= QEXPSHIFT;
1457  i/= qmul; //FIXME optimize
1458  dst[x + y*stride]= -i;
1459  }
1460  }else
1461  dst[x + y*stride]= 0;
1462  }
1463  }
1464  }else{
1465  for(y=0; y<h; y++){
1466  for(x=0; x<w; x++){
1467  int i= src[x + y*stride];
1468 
1469  if((unsigned)(i+thres1) > thres2){
1470  if(i>=0){
1471  i<<= QEXPSHIFT;
1472  i= (i + bias) / qmul; //FIXME optimize
1473  dst[x + y*stride]= i;
1474  }else{
1475  i= -i;
1476  i<<= QEXPSHIFT;
1477  i= (i + bias) / qmul; //FIXME optimize
1478  dst[x + y*stride]= -i;
1479  }
1480  }else
1481  dst[x + y*stride]= 0;
1482  }
1483  }
1484  }
1485 }
1486 
1488  const int w= b->width;
1489  const int h= b->height;
1490  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1491  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1492  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
1493  int x,y;
1494 
1495  if(s->qlog == LOSSLESS_QLOG) return;
1496 
1497  for(y=0; y<h; y++){
1498  for(x=0; x<w; x++){
1499  int i= src[x + y*stride];
1500  if(i<0){
1501  src[x + y*stride]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
1502  }else if(i>0){
1503  src[x + y*stride]= (( i*qmul + qadd)>>(QEXPSHIFT));
1504  }
1505  }
1506  }
1507 }
1508 
1509 static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1510  const int w= b->width;
1511  const int h= b->height;
1512  int x,y;
1513 
1514  for(y=h-1; y>=0; y--){
1515  for(x=w-1; x>=0; x--){
1516  int i= x + y*stride;
1517 
1518  if(x){
1519  if(use_median){
1520  if(y && x+1<w) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1521  else src[i] -= src[i - 1];
1522  }else{
1523  if(y) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1524  else src[i] -= src[i - 1];
1525  }
1526  }else{
1527  if(y) src[i] -= src[i - stride];
1528  }
1529  }
1530  }
1531 }
1532 
1533 static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1534  const int w= b->width;
1535  const int h= b->height;
1536  int x,y;
1537 
1538  for(y=0; y<h; y++){
1539  for(x=0; x<w; x++){
1540  int i= x + y*stride;
1541 
1542  if(x){
1543  if(use_median){
1544  if(y && x+1<w) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1545  else src[i] += src[i - 1];
1546  }else{
1547  if(y) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1548  else src[i] += src[i - 1];
1549  }
1550  }else{
1551  if(y) src[i] += src[i - stride];
1552  }
1553  }
1554  }
1555 }
1556 
1558  int plane_index, level, orientation;
1559 
1560  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1561  for(level=0; level<s->spatial_decomposition_count; level++){
1562  for(orientation=level ? 1:0; orientation<4; orientation++){
1563  if(orientation==2) continue;
1564  put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1);
1565  }
1566  }
1567  }
1568 }
1569 
1571  int plane_index, i;
1572  uint8_t kstate[32];
1573 
1574  memset(kstate, MID_STATE, sizeof(kstate));
1575 
1576  put_rac(&s->c, kstate, s->keyframe);
1577  if(s->keyframe || s->always_reset){
1579  s->last_spatial_decomposition_type=
1580  s->last_qlog=
1581  s->last_qbias=
1582  s->last_mv_scale=
1583  s->last_block_max_depth= 0;
1584  for(plane_index=0; plane_index<2; plane_index++){
1585  Plane *p= &s->plane[plane_index];
1586  p->last_htaps=0;
1587  p->last_diag_mc=0;
1588  memset(p->last_hcoeff, 0, sizeof(p->last_hcoeff));
1589  }
1590  }
1591  if(s->keyframe){
1592  put_symbol(&s->c, s->header_state, s->version, 0);
1593  put_rac(&s->c, s->header_state, s->always_reset);
1594  put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0);
1595  put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0);
1596  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1597  put_symbol(&s->c, s->header_state, s->colorspace_type, 0);
1598  if (s->nb_planes > 2) {
1599  put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0);
1600  put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0);
1601  }
1602  put_rac(&s->c, s->header_state, s->spatial_scalability);
1603 // put_rac(&s->c, s->header_state, s->rate_scalability);
1604  put_symbol(&s->c, s->header_state, s->max_ref_frames-1, 0);
1605 
1606  encode_qlogs(s);
1607  }
1608 
1609  if(!s->keyframe){
1610  int update_mc=0;
1611  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1612  Plane *p= &s->plane[plane_index];
1613  update_mc |= p->last_htaps != p->htaps;
1614  update_mc |= p->last_diag_mc != p->diag_mc;
1615  update_mc |= !!memcmp(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1616  }
1617  put_rac(&s->c, s->header_state, update_mc);
1618  if(update_mc){
1619  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1620  Plane *p= &s->plane[plane_index];
1621  put_rac(&s->c, s->header_state, p->diag_mc);
1622  put_symbol(&s->c, s->header_state, p->htaps/2-1, 0);
1623  for(i= p->htaps/2; i; i--)
1624  put_symbol(&s->c, s->header_state, FFABS(p->hcoeff[i]), 0);
1625  }
1626  }
1627  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1628  put_rac(&s->c, s->header_state, 1);
1629  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1630  encode_qlogs(s);
1631  }else
1632  put_rac(&s->c, s->header_state, 0);
1633  }
1634 
1635  put_symbol(&s->c, s->header_state, s->spatial_decomposition_type - s->last_spatial_decomposition_type, 1);
1636  put_symbol(&s->c, s->header_state, s->qlog - s->last_qlog , 1);
1637  put_symbol(&s->c, s->header_state, s->mv_scale - s->last_mv_scale, 1);
1638  put_symbol(&s->c, s->header_state, s->qbias - s->last_qbias , 1);
1639  put_symbol(&s->c, s->header_state, s->block_max_depth - s->last_block_max_depth, 1);
1640 
1641 }
1642 
1644  int plane_index;
1645 
1646  if(!s->keyframe){
1647  for(plane_index=0; plane_index<2; plane_index++){
1648  Plane *p= &s->plane[plane_index];
1649  p->last_diag_mc= p->diag_mc;
1650  p->last_htaps = p->htaps;
1651  memcpy(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1652  }
1653  }
1654 
1655  s->last_spatial_decomposition_type = s->spatial_decomposition_type;
1656  s->last_qlog = s->qlog;
1657  s->last_qbias = s->qbias;
1658  s->last_mv_scale = s->mv_scale;
1659  s->last_block_max_depth = s->block_max_depth;
1660  s->last_spatial_decomposition_count = s->spatial_decomposition_count;
1661 }
1662 
1663 static int qscale2qlog(int qscale){
1664  return lrint(QROOT*log2(qscale / (float)FF_QP2LAMBDA))
1665  + 61*QROOT/8; ///< 64 > 60
1666 }
1667 
1669 {
1670  SnowContext *const s = &enc->com;
1671  /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
1672  * FIXME we know exact mv bits at this point,
1673  * but ratecontrol isn't set up to include them. */
1674  uint32_t coef_sum= 0;
1675  int level, orientation, delta_qlog;
1676 
1677  for(level=0; level<s->spatial_decomposition_count; level++){
1678  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1679  SubBand *b= &s->plane[0].band[level][orientation];
1680  IDWTELEM *buf= b->ibuf;
1681  const int w= b->width;
1682  const int h= b->height;
1683  const int stride= b->stride;
1684  const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
1685  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1686  const int qdiv= (1<<16)/qmul;
1687  int x, y;
1688  //FIXME this is ugly
1689  for(y=0; y<h; y++)
1690  for(x=0; x<w; x++)
1691  buf[x+y*stride]= b->buf[x+y*stride];
1692  if(orientation==0)
1693  decorrelate(s, b, buf, stride, 1, 0);
1694  for(y=0; y<h; y++)
1695  for(x=0; x<w; x++)
1696  coef_sum+= abs(buf[x+y*stride]) * qdiv >> 16;
1697  }
1698  }
1699 
1700  /* ugly, ratecontrol just takes a sqrt again */
1701  av_assert0(coef_sum < INT_MAX);
1702  coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
1703 
1704  if(pict->pict_type == AV_PICTURE_TYPE_I){
1705  enc->m.mb_var_sum = coef_sum;
1706  enc->m.mc_mb_var_sum = 0;
1707  }else{
1708  enc->m.mc_mb_var_sum = coef_sum;
1709  enc->m.mb_var_sum = 0;
1710  }
1711 
1712  pict->quality= ff_rate_estimate_qscale(&enc->m, 1);
1713  if (pict->quality < 0)
1714  return INT_MIN;
1715  enc->lambda= pict->quality * 3/2;
1716  delta_qlog= qscale2qlog(pict->quality) - s->qlog;
1717  s->qlog+= delta_qlog;
1718  return delta_qlog;
1719 }
1720 
1722  int width = p->width;
1723  int height= p->height;
1724  int level, orientation, x, y;
1725 
1726  for(level=0; level<s->spatial_decomposition_count; level++){
1727  int64_t error=0;
1728  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1729  SubBand *b= &p->band[level][orientation];
1730  IDWTELEM *ibuf= b->ibuf;
1731 
1732  memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
1733  ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
1734  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
1735  for(y=0; y<height; y++){
1736  for(x=0; x<width; x++){
1737  int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
1738  error += d*d;
1739  }
1740  }
1741  if (orientation == 2)
1742  error /= 2;
1743  b->qlog= (int)(QROOT * log2(352256.0/sqrt(error)) + 0.5);
1744  if (orientation != 1)
1745  error = 0;
1746  }
1747  p->band[level][1].qlog = p->band[level][2].qlog;
1748  }
1749 }
1750 
1752  const AVFrame *pict, int *got_packet)
1753 {
1754  SnowEncContext *const enc = avctx->priv_data;
1755  SnowContext *const s = &enc->com;
1756  MpegEncContext *const mpv = &enc->m;
1757  RangeCoder * const c= &s->c;
1758  AVCodecInternal *avci = avctx->internal;
1759  AVFrame *pic;
1760  const int width= s->avctx->width;
1761  const int height= s->avctx->height;
1762  int level, orientation, plane_index, i, y, ret;
1763  uint8_t rc_header_bak[sizeof(s->header_state)];
1764  uint8_t rc_block_bak[sizeof(s->block_state)];
1765 
1766  if ((ret = ff_alloc_packet(avctx, pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
1767  return ret;
1768 
1770  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1771 
1772  for(i=0; i < s->nb_planes; i++){
1773  int hshift= i ? s->chroma_h_shift : 0;
1774  int vshift= i ? s->chroma_v_shift : 0;
1775  for(y=0; y<AV_CEIL_RSHIFT(height, vshift); y++)
1776  memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]],
1777  &pict->data[i][y * pict->linesize[i]],
1778  AV_CEIL_RSHIFT(width, hshift));
1779  enc->mpvencdsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i],
1780  AV_CEIL_RSHIFT(width, hshift), AV_CEIL_RSHIFT(height, vshift),
1781  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1782  EDGE_TOP | EDGE_BOTTOM);
1783 
1784  }
1785  emms_c();
1786  pic = s->input_picture;
1787  pic->pict_type = pict->pict_type;
1788  pic->quality = pict->quality;
1789 
1790  mpv->picture_number = avctx->frame_num;
1791  if(avctx->flags&AV_CODEC_FLAG_PASS2){
1792  mpv->pict_type = pic->pict_type = mpv->rc_context.entry[avctx->frame_num].new_pict_type;
1793  s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
1794  if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
1795  pic->quality = ff_rate_estimate_qscale(mpv, 0);
1796  if (pic->quality < 0)
1797  return -1;
1798  }
1799  }else{
1800  s->keyframe= avctx->gop_size==0 || avctx->frame_num % avctx->gop_size == 0;
1801  mpv->pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1802  }
1803 
1804  if (enc->pass1_rc && avctx->frame_num == 0)
1805  pic->quality = 2*FF_QP2LAMBDA;
1806  if (pic->quality) {
1807  s->qlog = qscale2qlog(pic->quality);
1808  enc->lambda = pic->quality * 3/2;
1809  }
1810  if (s->qlog < 0 || (!pic->quality && (avctx->flags & AV_CODEC_FLAG_QSCALE))) {
1811  s->qlog= LOSSLESS_QLOG;
1812  enc->lambda = 0;
1813  }//else keep previous frame's qlog until after motion estimation
1814 
1815  if (s->current_picture->data[0]) {
1816  int w = s->avctx->width;
1817  int h = s->avctx->height;
1818 
1819  enc->mpvencdsp.draw_edges(s->current_picture->data[0],
1820  s->current_picture->linesize[0], w , h ,
1822  if (s->current_picture->data[2]) {
1823  enc->mpvencdsp.draw_edges(s->current_picture->data[1],
1824  s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1825  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1826  enc->mpvencdsp.draw_edges(s->current_picture->data[2],
1827  s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1828  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1829  }
1830  emms_c();
1831  }
1832 
1834  ret = get_encode_buffer(s, s->current_picture);
1835  if (ret < 0)
1836  return ret;
1837 
1838  mpv->cur_pic.ptr = &enc->cur_pic;
1839  mpv->cur_pic.ptr->f = s->current_picture;
1840  mpv->cur_pic.ptr->f->pts = pict->pts;
1841  if(pic->pict_type == AV_PICTURE_TYPE_P){
1842  int block_width = (width +15)>>4;
1843  int block_height= (height+15)>>4;
1844  int stride= s->current_picture->linesize[0];
1845 
1846  av_assert0(s->current_picture->data[0]);
1847  av_assert0(s->last_picture[0]->data[0]);
1848 
1849  mpv->avctx = s->avctx;
1850  mpv->last_pic.ptr = &enc->last_pic;
1851  mpv->last_pic.ptr->f = s->last_picture[0];
1852  mpv-> new_pic = s->input_picture;
1853  mpv->linesize = stride;
1854  mpv->uvlinesize = s->current_picture->linesize[1];
1855  mpv->width = width;
1856  mpv->height = height;
1857  mpv->mb_width = block_width;
1858  mpv->mb_height = block_height;
1859  mpv->mb_stride = mpv->mb_width + 1;
1860  mpv->b8_stride = 2 * mpv->mb_width + 1;
1861  mpv->f_code = 1;
1862  mpv->pict_type = pic->pict_type;
1863  mpv->motion_est = enc->motion_est;
1864  mpv->me.scene_change_score = 0;
1865  mpv->me.dia_size = avctx->dia_size;
1866  mpv->quarter_sample = (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0;
1867  mpv->out_format = FMT_H263;
1868  mpv->unrestricted_mv = 1;
1869 
1870  mpv->lambda = enc->lambda;
1871  mpv->qscale = (mpv->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
1872  enc->lambda2 = mpv->lambda2 = (mpv->lambda*mpv->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
1873 
1874  mpv->mecc = enc->mecc; //move
1875  mpv->qdsp = enc->qdsp; //move
1876  mpv->hdsp = s->hdsp;
1877  ff_init_me(&enc->m);
1878  s->hdsp = mpv->hdsp;
1879  enc->mecc = mpv->mecc;
1880  }
1881 
1882  if (enc->pass1_rc) {
1883  memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
1884  memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
1885  }
1886 
1887 redo_frame:
1888 
1889  s->spatial_decomposition_count= 5;
1890 
1891  while( !(width >>(s->chroma_h_shift + s->spatial_decomposition_count))
1892  || !(height>>(s->chroma_v_shift + s->spatial_decomposition_count)))
1893  s->spatial_decomposition_count--;
1894 
1895  if (s->spatial_decomposition_count <= 0) {
1896  av_log(avctx, AV_LOG_ERROR, "Resolution too low\n");
1897  return AVERROR(EINVAL);
1898  }
1899 
1900  mpv->pict_type = pic->pict_type;
1901  s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
1902 
1904 
1905  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1906  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1907  calculate_visual_weight(s, &s->plane[plane_index]);
1908  }
1909  }
1910 
1911  encode_header(s);
1912  mpv->misc_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
1913  encode_blocks(enc, 1);
1914  mpv->mv_bits = 8 * (s->c.bytestream - s->c.bytestream_start) - mpv->misc_bits;
1915 
1916  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1917  Plane *p= &s->plane[plane_index];
1918  int w= p->width;
1919  int h= p->height;
1920  int x, y;
1921 // int bits= put_bits_count(&s->c.pb);
1922 
1923  if (!enc->memc_only) {
1924  //FIXME optimize
1925  if(pict->data[plane_index]) //FIXME gray hack
1926  for(y=0; y<h; y++){
1927  for(x=0; x<w; x++){
1928  s->spatial_idwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<FRAC_BITS;
1929  }
1930  }
1931  predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
1932 
1933  if( plane_index==0
1934  && pic->pict_type == AV_PICTURE_TYPE_P
1935  && !(avctx->flags&AV_CODEC_FLAG_PASS2)
1936  && mpv->me.scene_change_score > enc->scenechange_threshold) {
1938  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1940  s->keyframe=1;
1941  s->current_picture->flags |= AV_FRAME_FLAG_KEY;
1942  goto redo_frame;
1943  }
1944 
1945  if(s->qlog == LOSSLESS_QLOG){
1946  for(y=0; y<h; y++){
1947  for(x=0; x<w; x++){
1948  s->spatial_dwt_buffer[y*w + x]= (s->spatial_idwt_buffer[y*w + x] + (1<<(FRAC_BITS-1))-1)>>FRAC_BITS;
1949  }
1950  }
1951  }else{
1952  for(y=0; y<h; y++){
1953  for(x=0; x<w; x++){
1954  s->spatial_dwt_buffer[y*w + x]= s->spatial_idwt_buffer[y*w + x] * (1 << ENCODER_EXTRA_BITS);
1955  }
1956  }
1957  }
1958 
1959  ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
1960 
1961  if (enc->pass1_rc && plane_index==0) {
1962  int delta_qlog = ratecontrol_1pass(enc, pic);
1963  if (delta_qlog <= INT_MIN)
1964  return -1;
1965  if(delta_qlog){
1966  //reordering qlog in the bitstream would eliminate this reset
1968  memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
1969  memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
1970  encode_header(s);
1971  encode_blocks(enc, 0);
1972  }
1973  }
1974 
1975  for(level=0; level<s->spatial_decomposition_count; level++){
1976  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1977  SubBand *b= &p->band[level][orientation];
1978 
1979  quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
1980  if(orientation==0)
1981  decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
1982  if (!enc->no_bitstream)
1983  encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
1984  av_assert0(b->parent==NULL || b->parent->stride == b->stride*2);
1985  if(orientation==0)
1986  correlate(s, b, b->ibuf, b->stride, 1, 0);
1987  }
1988  }
1989 
1990  for(level=0; level<s->spatial_decomposition_count; level++){
1991  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1992  SubBand *b= &p->band[level][orientation];
1993 
1994  dequantize(s, b, b->ibuf, b->stride);
1995  }
1996  }
1997 
1998  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
1999  if(s->qlog == LOSSLESS_QLOG){
2000  for(y=0; y<h; y++){
2001  for(x=0; x<w; x++){
2002  s->spatial_idwt_buffer[y*w + x] *= 1 << FRAC_BITS;
2003  }
2004  }
2005  }
2006  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2007  }else{
2008  //ME/MC only
2009  if(pic->pict_type == AV_PICTURE_TYPE_I){
2010  for(y=0; y<h; y++){
2011  for(x=0; x<w; x++){
2012  s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]=
2013  pict->data[plane_index][y*pict->linesize[plane_index] + x];
2014  }
2015  }
2016  }else{
2017  memset(s->spatial_idwt_buffer, 0, sizeof(IDWTELEM)*w*h);
2018  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2019  }
2020  }
2021  if(s->avctx->flags&AV_CODEC_FLAG_PSNR){
2022  int64_t error= 0;
2023 
2024  if(pict->data[plane_index]) //FIXME gray hack
2025  for(y=0; y<h; y++){
2026  for(x=0; x<w; x++){
2027  int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x];
2028  error += d*d;
2029  }
2030  }
2031  s->avctx->error[plane_index] += error;
2032  enc->encoding_error[plane_index] = error;
2033  }
2034 
2035  }
2036  emms_c();
2037 
2039 
2040  ff_snow_release_buffer(avctx);
2041 
2042  s->current_picture->pict_type = pic->pict_type;
2043  s->current_picture->quality = pic->quality;
2044  mpv->frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
2045  mpv->p_tex_bits = mpv->frame_bits - mpv->misc_bits - mpv->mv_bits;
2046  mpv->total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
2048  enc->cur_pic.coded_picture_number = avctx->frame_num;
2049  enc->cur_pic.f->quality = pic->quality;
2050  if (enc->pass1_rc)
2051  if (ff_rate_estimate_qscale(mpv, 0) < 0)
2052  return -1;
2053  if(avctx->flags&AV_CODEC_FLAG_PASS1)
2054  ff_write_pass1_stats(mpv);
2055  mpv->last_pict_type = mpv->pict_type;
2056 
2057  emms_c();
2058 
2059  ff_side_data_set_encoder_stats(pkt, s->current_picture->quality,
2060  enc->encoding_error,
2061  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? SNOW_MAX_PLANES : 0,
2062  s->current_picture->pict_type);
2063  if (s->avctx->flags & AV_CODEC_FLAG_RECON_FRAME) {
2064  av_frame_replace(avci->recon_frame, s->current_picture);
2065  }
2066 
2067  pkt->size = ff_rac_terminate(c, 0);
2068  if (s->current_picture->flags & AV_FRAME_FLAG_KEY)
2070  *got_packet = 1;
2071 
2072  return 0;
2073 }
2074 
2076 {
2077  SnowEncContext *const enc = avctx->priv_data;
2078  SnowContext *const s = &enc->com;
2079 
2082  av_frame_free(&s->input_picture);
2083 
2084  for (int i = 0; i < MAX_REF_FRAMES; i++) {
2085  av_freep(&s->ref_mvs[i]);
2086  av_freep(&s->ref_scores[i]);
2087  }
2088 
2089  enc->m.me.temp = NULL;
2090  av_freep(&enc->m.me.scratchpad);
2091  av_freep(&enc->m.me.map);
2092  av_freep(&enc->m.sc.obmc_scratchpad);
2093 
2094  av_freep(&avctx->stats_out);
2095 
2096  return 0;
2097 }
2098 
2099 #define OFFSET(x) offsetof(SnowEncContext, x)
2100 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2101 static const AVOption options[] = {
2102  {"motion_est", "motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_ITER, VE, .unit = "motion_est" },
2103  { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, VE, .unit = "motion_est" },
2104  { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, VE, .unit = "motion_est" },
2105  { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, VE, .unit = "motion_est" },
2106  { "iter", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ITER }, 0, 0, VE, .unit = "motion_est" },
2107  { "memc_only", "Only do ME/MC (I frames -> ref, P frame -> ME+MC).", OFFSET(memc_only), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2108  { "no_bitstream", "Skip final bitstream writeout.", OFFSET(no_bitstream), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2109  { "intra_penalty", "Penalty for intra blocks in block decission", OFFSET(intra_penalty), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2110  { "iterative_dia_size", "Dia size for the iterative ME", OFFSET(iterative_dia_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2111  { "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, VE },
2112  { "pred", "Spatial decomposition type", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, DWT_97, DWT_53, VE, .unit = "pred" },
2113  { "dwt97", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2114  { "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2115  { "rc_eq", "Set rate control equation. When computing the expression, besides the standard functions "
2116  "defined in the section 'Expression Evaluation', the following functions are available: "
2117  "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv "
2118  "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.",
2119  OFFSET(m.rc_eq), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VE },
2120  { NULL },
2121 };
2122 
2123 static const AVClass snowenc_class = {
2124  .class_name = "snow encoder",
2125  .item_name = av_default_item_name,
2126  .option = options,
2127  .version = LIBAVUTIL_VERSION_INT,
2128 };
2129 
2131  .p.name = "snow",
2132  CODEC_LONG_NAME("Snow"),
2133  .p.type = AVMEDIA_TYPE_VIDEO,
2134  .p.id = AV_CODEC_ID_SNOW,
2135  .p.capabilities = AV_CODEC_CAP_DR1 |
2138  .priv_data_size = sizeof(SnowEncContext),
2139  .init = encode_init,
2141  .close = encode_end,
2142  .p.pix_fmts = (const enum AVPixelFormat[]){
2146  },
2147  .p.priv_class = &snowenc_class,
2148  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2149 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
encode_subband
static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:1050
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1509
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:402
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
Plane::last_diag_mc
int last_diag_mc
Definition: snow.h:110
P_LEFT
#define P_LEFT
Definition: snowenc.c:364
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:429
ScratchpadContext::obmc_scratchpad
uint8_t * obmc_scratchpad
Definition: mpegpicture.h:35
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:201
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
SnowEncContext::lambda
int lambda
Definition: snowenc.c:51
libm.h
MID_STATE
#define MID_STATE
Definition: snow.h:39
color
Definition: vf_paletteuse.c:512
ratecontrol_1pass
static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
Definition: snowenc.c:1668
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:246
MpegEncContext::total_bits
int64_t total_bits
Definition: mpegvideo.h:335
FF_ME_EPZS
#define FF_ME_EPZS
Definition: motion_est.h:41
inverse
inverse
Definition: af_crystalizer.c:121
MpegEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:339
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: snowenc.c:2075
SnowEncContext::scenechange_threshold
int scenechange_threshold
Definition: snowenc.c:61
LOG2_MB_SIZE
#define LOG2_MB_SIZE
Definition: snow.h:72
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
MotionEstContext
Motion estimation context.
Definition: motion_est.h:47
AV_CODEC_CAP_ENCODER_RECON_FRAME
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Definition: codec.h:174
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:156
h263enc.h
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
DWT_97
#define DWT_97
Definition: snow_dwt.h:70
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
MpegEncContext::mb_num
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:128
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
update_last_header_values
static void update_last_header_values(SnowContext *s)
Definition: snowenc.c:1643
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
iterative_me
static void iterative_me(SnowEncContext *enc)
Definition: snowenc.c:1173
AVPacket::data
uint8_t * data
Definition: packet.h:520
MpegEncContext::mb_width
int mb_width
Definition: mpegvideo.h:124
AVOption
AVOption.
Definition: opt.h:357
encode.h
b
#define b
Definition: input.c:41
SnowEncContext::qdsp
QpelDSPContext qdsp
Definition: snowenc.c:48
DWT_53
#define DWT_53
Definition: snow_dwt.h:71
get_penalty_factor
static int get_penalty_factor(int lambda, int lambda2, int type)
Definition: snowenc.c:339
MpegvideoEncDSPContext::draw_edges
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
Definition: mpegvideoencdsp.h:43
encode_subband_c0run
static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:930
rangecoder.h
FFCodec
Definition: codec_internal.h:126
MpegEncContext::unrestricted_mv
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:215
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:491
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
SnowContext
Definition: snow.h:113
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: snowenc.c:1751
QSHIFT
#define QSHIFT
Definition: snow.h:42
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:46
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:96
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:575
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:554
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
ff_spatial_dwt
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:320
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:100
Plane::diag_mc
int diag_mc
Definition: snow.h:105
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:55
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
check_4block_inter
static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd)
Definition: snowenc.c:1124
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:124
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
ff_spatial_idwt
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:732
SnowEncContext::me_cache_generation
unsigned me_cache_generation
Definition: snowenc.c:68
encode_blocks
static void encode_blocks(SnowEncContext *enc, int search)
Definition: snowenc.c:1402
ff_init_range_encoder
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
Definition: rangecoder.c:42
LOG2_OBMC_MAX
#define LOG2_OBMC_MAX
Definition: snow.h:48
BlockNode
Definition: snow.h:50
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:715
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1008
check_block_intra
static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3], uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1057
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
OFFSET
#define OFFSET(x)
Definition: snowenc.c:2099
ff_snow_pred_block
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
Definition: snow.c:285
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
MpegEncContext::width
int width
Definition: mpegvideo.h:96
get_4block_rd
static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:859
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:295
FF_CMP_SSE
#define FF_CMP_SSE
Definition: avcodec.h:896
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
SnowEncContext
Definition: snowenc.c:46
MpegEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideo.h:254
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:889
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:450
lrint
#define lrint
Definition: tablegen.h:53
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
encode_q_branch
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
Definition: snowenc.c:371
FF_CMP_BIT
#define FF_CMP_BIT
Definition: avcodec.h:900
emms_c
#define emms_c()
Definition: emms.h:63
SnowEncContext::mecc
MECmpContext mecc
Definition: snowenc.c:63
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1239
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
BLOCK_OPT
#define BLOCK_OPT
Block needs no checks in this round of iterative motion estiation.
Definition: snow.h:58
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:44
calculate_visual_weight
static void calculate_visual_weight(SnowContext *s, Plane *p)
Definition: snowenc.c:1721
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
MpegEncContext::bit_rate
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:99
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:220
MpegEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:336
pix_norm1
static int pix_norm1(const uint8_t *pix, int line_size, int w)
Definition: snowenc.c:323
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:395
get_encode_buffer
static int get_encode_buffer(SnowContext *s, AVFrame *frame)
Definition: snowenc.c:137
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:177
SnowEncContext::encoding_error
uint64_t encoding_error[SNOW_MAX_PLANES]
Definition: snowenc.c:70
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:125
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
MotionEstContext::dia_size
int dia_size
Definition: motion_est.h:70
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
MpegEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideo.h:255
MECmpContext
Definition: me_cmp.h:55
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ff_epzs_motion_search
int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, const int16_t(*last_mv)[2], int ref_mv_scale, int size, int h)
Definition: motion_est_template.c:977
run
uint8_t run
Definition: svq3.c:204
SnowEncContext::me_cache
unsigned me_cache[ME_CACHE_SIZE]
Definition: snowenc.c:67
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:288
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:229
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
snow.h
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:52
get_block_rd
static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index, uint8_t(*obmc_edged)[MB_SIZE *2])
Definition: snowenc.c:755
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
VE
#define VE
Definition: snowenc.c:2100
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
ff_rac_terminate
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
Definition: rangecoder.c:109
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
MpegEncContext::mecc
MECmpContext mecc
Definition: mpegvideo.h:223
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
MpegEncContext::hdsp
HpelDSPContext hdsp
Definition: mpegvideo.h:221
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:514
mathops.h
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:342
MpegEncContext::b8_stride
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:126
qpeldsp.h
abs
#define abs(x)
Definition: cuda_runtime.h:35
correlate
static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1533
ff_w53_32_c
int ff_w53_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:833
QROOT
#define QROOT
Definition: snow.h:43
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:282
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
FF_ME_XONE
#define FF_ME_XONE
Definition: motion_est.h:42
state
static struct @435 state
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
init_ref
static void init_ref(MotionEstContext *c, const uint8_t *const src[3], uint8_t *const ref[3], uint8_t *const ref2[3], int x, int y, int ref_index)
Definition: snowenc.c:73
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:54
put_symbol
static void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
Definition: snowenc.c:90
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:818
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1334
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
MECmpContext::me_cmp
me_cmp_func me_cmp[6]
Definition: me_cmp.h:74
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:476
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:54
AVPacket::size
int size
Definition: packet.h:521
SNOW_MAX_PLANES
#define SNOW_MAX_PLANES
Definition: snow.h:37
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1031
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
encode_header
static void encode_header(SnowContext *s)
Definition: snowenc.c:1570
codec_internal.h
FF_CMP_PSNR
#define FF_CMP_PSNR
Definition: avcodec.h:899
Plane::height
int height
Definition: cfhd.h:119
P
#define P
shift
static int shift(int a, int b)
Definition: bonk.c:261
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:506
SnowEncContext::pass1_rc
int pass1_rc
Definition: snowenc.c:53
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:202
FF_CMP_W53
#define FF_CMP_W53
Definition: avcodec.h:906
Plane::last_hcoeff
int8_t last_hcoeff[HTAPS_MAX/2]
Definition: snow.h:109
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
MpegEncContext::qdsp
QpelDSPContext qdsp
Definition: mpegvideo.h:226
pix_sum
static int pix_sum(const uint8_t *pix, int line_size, int w, int h)
Definition: snowenc.c:307
SnowEncContext::motion_est
int motion_est
Definition: snowenc.c:59
ff_snow_encoder
const FFCodec ff_snow_encoder
Definition: snowenc.c:2130
SubBand
Definition: cfhd.h:108
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:389
FF_CMP_SATD
#define FF_CMP_SATD
Definition: avcodec.h:897
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
height
#define height
Plane::htaps
int htaps
Definition: snow.h:103
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
Plane::last_htaps
int last_htaps
Definition: snow.h:108
Plane::width
int width
Definition: cfhd.h:118
SnowEncContext::intra_penalty
int intra_penalty
Definition: snowenc.c:58
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
snow_dwt.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:526
AVCodecInternal
Definition: internal.h:49
FF_CMP_SAD
#define FF_CMP_SAD
Definition: avcodec.h:895
encode_q_branch2
static void encode_q_branch2(SnowContext *s, int level, int x, int y)
Definition: snowenc.c:601
ff_get_mb_score
int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate)
Definition: motion_est_template.c:192
SnowEncContext::iterative_dia_size
int iterative_dia_size
Definition: snowenc.c:60
ff_quant3bA
const int8_t ff_quant3bA[256]
Definition: snowdata.h:104
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:104
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
SnowEncContext::m
MpegEncContext m
Definition: snowenc.c:64
emms.h
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
ENCODER_EXTRA_BITS
#define ENCODER_EXTRA_BITS
Definition: snow.h:74
AV_CODEC_FLAG_RECON_FRAME
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
Definition: avcodec.h:264
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1392
FF_CMP_RD
#define FF_CMP_RD
Definition: avcodec.h:901
get_block_bits
static int get_block_bits(SnowContext *s, int x, int y, int w)
Definition: snowenc.c:717
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:35
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:57
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
qscale2qlog
static int qscale2qlog(int qscale)
Definition: snowenc.c:1663
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:288
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AVCodecContext::dia_size
int dia_size
ME diamond size & shape.
Definition: avcodec.h:918
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:833
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:197
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:905
MECmpContext::me_sub_cmp
me_cmp_func me_sub_cmp[6]
Definition: me_cmp.h:75
AVCodecContext::mb_lmin
int mb_lmin
minimum MB Lagrange multiplier
Definition: avcodec.h:1004
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:395
SnowEncContext::no_bitstream
int no_bitstream
Definition: snowenc.c:57
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
ME_CACHE_SIZE
#define ME_CACHE_SIZE
Definition: snowenc.c:66
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:159
SnowEncContext::com
SnowContext com
Definition: snowenc.c:47
FF_ME_ITER
#define FF_ME_ITER
Definition: snowenc.c:44
get_dc
static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:657
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:308
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:122
log2
#define log2(x)
Definition: libm.h:404
MotionEstContext::score_map
uint32_t * score_map
map to store the scores
Definition: motion_est.h:56
MpegEncContext::motion_est
int motion_est
ME algorithm.
Definition: mpegvideo.h:258
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:2031
mid_pred
#define mid_pred
Definition: mathops.h:96
ret
ret
Definition: filter_design.txt:187
SnowEncContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: snowenc.c:49
pred
static const float pred[4]
Definition: siprdata.h:259
search
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:147
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: snowenc.c:159
options
static const AVOption options[]
Definition: snowenc.c:2101
AVCodecInternal::recon_frame
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
Definition: internal.h:107
square
static int square(int x)
Definition: roqvideoenc.c:196
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:52
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
put_rac
#define put_rac(C, S, B)
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:63
me_cmp.h
SubBand::qlog
int qlog
log(qscale)/log[2^(1/6)]
Definition: snow.h:87
encode_qlogs
static void encode_qlogs(SnowContext *s)
Definition: snowenc.c:1557
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:483
QpelDSPContext
quarterpel DSP context
Definition: qpeldsp.h:72
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:263
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
SnowEncContext::cur_pic
MPVPicture cur_pic
Definition: snowenc.c:65
SnowEncContext::last_pic
MPVPicture last_pic
Definition: snowenc.c:65
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
MpegEncContext::lmin
int lmin
Definition: mpegvideo.h:519
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:245
FF_CMP_DCT
#define FF_CMP_DCT
Definition: avcodec.h:898
MpegEncContext::lmax
int lmax
Definition: mpegvideo.h:519
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
get_rac_count
static int get_rac_count(RangeCoder *c)
Definition: rangecoder.h:87
AVCodecContext::mb_lmax
int mb_lmax
maximum MB Lagrange multiplier
Definition: avcodec.h:1011
put_symbol2
static void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
Definition: snowenc.c:118
Plane
Definition: cfhd.h:117
MotionEstContext::map
uint32_t * map
map to avoid duplicate evaluations
Definition: motion_est.h:55
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
BlockNode::level
uint8_t level
Definition: snow.h:60
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
same_block
static av_always_inline int same_block(BlockNode *a, BlockNode *b)
Definition: snow.h:210
mem.h
packet_internal.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:51
ff_set_cmp
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:476
mcf
#define mcf(dx, dy)
AVPacket
This structure stores compressed data.
Definition: packet.h:497
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:261
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:247
ff_snow_frames_prepare
int ff_snow_frames_prepare(SnowContext *s)
Definition: snow.c:523
FF_CMP_DCT264
#define FF_CMP_DCT264
Definition: avcodec.h:909
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
quantize
static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias)
Definition: snowenc.c:1426
SnowEncContext::memc_only
int memc_only
Definition: snowenc.c:56
dequantize
static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride)
Definition: snowenc.c:1487
ff_w97_32_c
int ff_w97_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:838
d
d
Definition: ffmpeg_filter.c:424
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
MpegEncContext::last_pict_type
int last_pict_type
Definition: mpegvideo.h:208
null_block
static const BlockNode null_block
Definition: snow.h:63
MotionEstContext::scene_change_score
int scene_change_score
Definition: motion_est.h:84
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:347
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:607
h
h
Definition: vp9dsp_template.c:2038
RangeCoder
Definition: mss3.c:63
snowenc_class
static const AVClass snowenc_class
Definition: snowenc.c:2123
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:249
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
int
int
Definition: ffmpeg_filter.c:424
SnowEncContext::pred
int pred
Definition: snowenc.c:55
P_TOP
#define P_TOP
Definition: snowenc.c:365
check_block_inter
static av_always_inline int check_block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1088
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:254
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:77
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:53
P_TOPRIGHT
#define P_TOPRIGHT
Definition: snowenc.c:366
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:345
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
P_MEDIAN
#define P_MEDIAN
Definition: snowenc.c:367
FF_ME_ZERO
#define FF_ME_ZERO
Definition: motion_est.h:40
SnowEncContext::lambda2
int lambda2
Definition: snowenc.c:52
FF_CMP_W97
#define FF_CMP_W97
Definition: avcodec.h:907
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:694
intmath.h