FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/attributes.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/thread.h"
34 
35 #include "avcodec.h"
36 #include "decode.h"
37 #include "error_resilience.h"
38 #include "mpegutils.h"
39 #include "mpegvideo.h"
40 #include "mpegvideodec.h"
41 #include "golomb.h"
42 #include "mathops.h"
43 #include "mpeg_er.h"
44 #include "qpeldsp.h"
45 #include "rectangle.h"
46 #include "thread.h"
47 #include "threadprogress.h"
48 
49 #include "rv34vlc.h"
50 #include "rv34data.h"
51 #include "rv34.h"
52 
53 static inline void ZERO8x2(void* dst, int stride)
54 {
55  fill_rectangle(dst, 1, 2, stride, 0, 4);
56  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
57 }
58 
59 /** translation of RV30/40 macroblock types to lavc ones */
60 static const int rv34_mb_type_to_lavc[12] = {
73 };
74 
75 
77 
78 static int rv34_decode_mv(RV34DecContext *r, int block_type);
79 
80 /**
81  * @name RV30/40 VLC generating functions
82  * @{
83  */
84 
85 static VLCElem table_data[117592];
86 
87 /**
88  * Generate VLC from codeword lengths.
89  * @param bits codeword lengths (zeroes are accepted)
90  * @param size length of input data
91  * @param vlc output VLC
92  * @param insyms symbols for input codes (NULL for default ones)
93  * @param num VLC table number (for static initialization)
94  */
95 static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc,
96  const uint8_t *syms, int mod_three_bits_offset, int *offset)
97 {
98  int counts[17] = {0}, codes[17];
99  int maxbits;
100 
101  av_assert1(size > 0);
102 
103  for (int i = 0; i < size; i++)
104  counts[bits[i]]++;
105 
106  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
107  * So we reset it here. The code assigned to this element is 0x00. */
108  codes[0] = counts[0] = 0;
109  for (int i = 0; i < 16; i++) {
110  codes[i+1] = (codes[i] + counts[i]) << 1;
111  if (counts[i])
112  maxbits = i;
113  }
114 
115  uint16_t symbols[MAX_VLC_SIZE];
116  uint16_t cw[MAX_VLC_SIZE];
117  const void *symp = syms;
118  int symbol_size;
119 
120  if (mod_three_bits_offset > 0) {
121  symp = symbols;
122  symbol_size = 2;
123 
124  for (int i = 0, mask = (1 << mod_three_bits_offset) - 1; i < size; ++i) {
125  cw[i] = codes[bits[i]]++;
126  symbols[i] = (modulo_three_table[i >> mod_three_bits_offset] << mod_three_bits_offset) | (i & mask);
127  }
128  } else {
129  if (!mod_three_bits_offset)
130  symp = modulo_three_table;
131 
132  symbol_size = !!symp;
133  for (int i = 0; i < size; ++i)
134  cw[i] = codes[bits[i]]++;
135  }
136 
137  vlc->table = &table_data[*offset];
139  ff_vlc_init_sparse(vlc, FFMIN(maxbits, 9), size,
140  bits, 1, 1,
141  cw, 2, 2,
142  symp, symbol_size, symbol_size, VLC_INIT_STATIC_OVERLONG);
143  *offset += vlc->table_size;
144 }
145 
146 static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp,
147  int mod_three_bits_offset, int *offset)
148 {
149  VLC vlc = { 0 };
150  rv34_gen_vlc_ext(bits, size, &vlc, NULL, mod_three_bits_offset, offset);
151  *vlcp = vlc.table;
152 }
153 
154 /**
155  * Initialize all tables.
156  */
157 static av_cold void rv34_init_tables(void)
158 {
159  int i, j, k, offset = 0;
160 
161  for(i = 0; i < NUM_INTRA_TABLES; i++){
162  for(j = 0; j < 2; j++){
164  &intra_vlcs[i].cbppattern[j], 4, &offset);
166  &intra_vlcs[i].second_pattern[j], 0, &offset);
168  &intra_vlcs[i].third_pattern[j], 0, &offset);
169  for(k = 0; k < 4; k++){
171  &intra_vlcs[i].cbp[j][k], rv34_cbp_code, -1, &offset);
172  }
173  }
174  for(j = 0; j < 4; j++){
176  &intra_vlcs[i].first_pattern[j], 3, &offset);
177  }
179  &intra_vlcs[i].coefficient, -1, &offset);
180  }
181 
182  for(i = 0; i < NUM_INTER_TABLES; i++){
184  &inter_vlcs[i].cbppattern[0], 4, &offset);
185  for(j = 0; j < 4; j++){
187  &inter_vlcs[i].cbp[0][j], rv34_cbp_code, -1, &offset);
188  }
189  for(j = 0; j < 2; j++){
191  &inter_vlcs[i].first_pattern[j], 3, &offset);
193  &inter_vlcs[i].second_pattern[j], 0, &offset);
195  &inter_vlcs[i].third_pattern[j], 0, &offset);
196  }
198  &inter_vlcs[i].coefficient, -1, &offset);
199  }
200 }
201 
202 /** @} */ // vlc group
203 
204 /**
205  * @name RV30/40 4x4 block decoding functions
206  * @{
207  */
208 
209 /**
210  * Decode coded block pattern.
211  */
212 static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
213 {
214  int pattern, code, cbp=0;
215  int ones;
216  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
217  static const int shifts[4] = { 0, 2, 8, 10 };
218  const int *curshift = shifts;
219  int i, t, mask;
220 
221  code = get_vlc2(gb, vlc->cbppattern[table], 9, 2);
222  pattern = code & 0xF;
223  code >>= 4;
224 
225  ones = rv34_count_ones[pattern];
226 
227  for(mask = 8; mask; mask >>= 1, curshift++){
228  if(pattern & mask)
229  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
230  }
231 
232  for(i = 0; i < 4; i++){
233  t = (code >> (6 - 2*i)) & 3;
234  if(t == 1)
235  cbp |= cbp_masks[get_bits1(gb)] << i;
236  if(t == 2)
237  cbp |= cbp_masks[2] << i;
238  }
239  return cbp;
240 }
241 
242 /**
243  * Get one coefficient value from the bitstream and store it.
244  */
245 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb,
246  const VLCElem *vlc, int q)
247 {
248  if(coef){
249  if(coef == esc){
250  coef = get_vlc2(gb, vlc, 9, 2);
251  if(coef > 23){
252  coef -= 23;
253  coef = 22 + ((1 << coef) | get_bits(gb, coef));
254  }
255  coef += esc;
256  }
257  if(get_bits1(gb))
258  coef = -coef;
259  *dst = (coef*q + 8) >> 4;
260  }
261 }
262 
263 /**
264  * Decode 2x2 subblock of coefficients.
265  */
266 static inline void decode_subblock(int16_t *dst, int flags, const int is_block2,
267  GetBitContext *gb, const VLCElem *vlc, int q)
268 {
269  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
270  if(is_block2){
271  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
272  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
273  }else{
274  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
275  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
276  }
277  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
278 }
279 
280 /**
281  * Decode a single coefficient.
282  */
283 static inline void decode_subblock1(int16_t *dst, int flags, GetBitContext *gb,
284  const VLCElem *vlc, int q)
285 {
286  int coeff = flags >> 6;
287  decode_coeff(dst, coeff, 3, gb, vlc, q);
288 }
289 
290 static inline void decode_subblock3(int16_t *dst, int flags, GetBitContext *gb,
291  const VLCElem *vlc,
292  int q_dc, int q_ac1, int q_ac2)
293 {
294  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
295  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
296  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
297  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
298 }
299 
300 /**
301  * Decode coefficients for 4x4 block.
302  *
303  * This is done by filling 2x2 subblocks with decoded coefficients
304  * in this order (the same for subblocks and subblock coefficients):
305  * o--o
306  * /
307  * /
308  * o--o
309  */
310 
311 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc,
312  int fc, int sc, int q_dc, int q_ac1, int q_ac2)
313 {
314  int flags = get_vlc2(gb, rvlc->first_pattern[fc], 9, 2);
315 
316  int pattern = flags & 0x7;
317 
318  flags >>= 3;
319 
320  if (flags & 0x3F) {
321  decode_subblock3(dst, flags, gb, rvlc->coefficient, q_dc, q_ac1, q_ac2);
322  } else {
323  decode_subblock1(dst, flags, gb, rvlc->coefficient, q_dc);
324  if (!pattern)
325  return 0;
326  }
327 
328  if(pattern & 4){
329  flags = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
330  decode_subblock(dst + 4*0+2, flags, 0, gb, rvlc->coefficient, q_ac2);
331  }
332  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
333  flags = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
334  decode_subblock(dst + 4*2+0, flags, 1, gb, rvlc->coefficient, q_ac2);
335  }
336  if(pattern & 1){
337  flags = get_vlc2(gb, rvlc->third_pattern[sc], 9, 2);
338  decode_subblock(dst + 4*2+2, flags, 0, gb, rvlc->coefficient, q_ac2);
339  }
340  return 1;
341 }
342 
343 /**
344  * @name RV30/40 bitstream parsing
345  * @{
346  */
347 
348 /**
349  * Decode starting slice position.
350  * @todo Maybe replace with ff_h263_decode_mba() ?
351  */
353 {
354  int i;
355  for(i = 0; i < 5; i++)
356  if(rv34_mb_max_sizes[i] >= mb_size - 1)
357  break;
358  return get_bits(gb, rv34_mb_bits_sizes[i]);
359 }
360 
361 /**
362  * Select VLC set for decoding from current quantizer, modifier and frame type.
363  */
364 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
365 {
366  if(mod == 2 && quant < 19) quant += 10;
367  else if(mod && quant < 26) quant += 5;
368  av_assert2(quant >= 0 && quant < 32);
371 }
372 
373 /**
374  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
375  */
376 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
377 {
378  MpegEncContext *s = &r->s;
379  GetBitContext *const gb = &r->gb;
380  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
381  int t;
382 
383  r->is16 = get_bits1(gb);
384  if(r->is16){
385  s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA16x16;
386  r->block_type = RV34_MB_TYPE_INTRA16x16;
387  t = get_bits(gb, 2);
388  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
389  r->luma_vlc = 2;
390  }else{
391  if(!r->rv30){
392  if(!get_bits1(gb))
393  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
394  }
395  s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA;
396  r->block_type = RV34_MB_TYPE_INTRA;
397  if(r->decode_intra_types(r, gb, intra_types) < 0)
398  return -1;
399  r->luma_vlc = 1;
400  }
401 
402  r->chroma_vlc = 0;
403  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
404 
405  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
406 }
407 
408 /**
409  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
410  */
411 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
412 {
413  MpegEncContext *s = &r->s;
414  GetBitContext *const gb = &r->gb;
415  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
416  int i, t;
417 
418  r->block_type = r->decode_mb_info(r);
419  if(r->block_type == -1)
420  return -1;
421  s->cur_pic.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
422  r->mb_type[mb_pos] = r->block_type;
423  if(r->block_type == RV34_MB_SKIP){
424  if(s->pict_type == AV_PICTURE_TYPE_P)
425  r->mb_type[mb_pos] = RV34_MB_P_16x16;
426  if(s->pict_type == AV_PICTURE_TYPE_B)
427  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
428  }
429  r->is16 = !!IS_INTRA16x16(s->cur_pic.mb_type[mb_pos]);
430  if (rv34_decode_mv(r, r->block_type) < 0)
431  return -1;
432  if(r->block_type == RV34_MB_SKIP){
433  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
434  return 0;
435  }
436  r->chroma_vlc = 1;
437  r->luma_vlc = 0;
438 
439  if (IS_INTRA(s->cur_pic.mb_type[mb_pos])) {
440  if(r->is16){
441  t = get_bits(gb, 2);
442  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
443  r->luma_vlc = 2;
444  }else{
445  if(r->decode_intra_types(r, gb, intra_types) < 0)
446  return -1;
447  r->luma_vlc = 1;
448  }
449  r->chroma_vlc = 0;
450  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
451  }else{
452  for(i = 0; i < 16; i++)
453  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
454  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
455  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
456  r->is16 = 1;
457  r->chroma_vlc = 1;
458  r->luma_vlc = 2;
459  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
460  }
461  }
462 
463  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
464 }
465 
466 /** @} */ //bitstream functions
467 
468 /**
469  * @name motion vector related code (prediction, reconstruction, motion compensation)
470  * @{
471  */
472 
473 /** macroblock partition width in 8x8 blocks */
474 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
475 
476 /** macroblock partition height in 8x8 blocks */
477 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
478 
479 /** availability index for subblocks */
480 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
481 
482 /**
483  * motion vector prediction
484  *
485  * Motion prediction performed for the block by using median prediction of
486  * motion vectors from the left, top and right top blocks but in corner cases
487  * some other vectors may be used instead.
488  */
489 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
490 {
491  MpegEncContext *s = &r->s;
492  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
493  int A[2] = {0}, B[2], C[2];
494  int i, j;
495  int mx, my;
496  int* avail = r->avail_cache + avail_indexes[subblock_no];
497  int c_off = part_sizes_w[block_type];
498  int16_t (*motion_val)[2] = s->cur_pic.motion_val[0];
499 
500  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
501  if(subblock_no == 3)
502  c_off = -1;
503 
504  if(avail[-1]){
505  A[0] = motion_val[mv_pos-1][0];
506  A[1] = motion_val[mv_pos-1][1];
507  }
508  if(avail[-4]){
509  B[0] = motion_val[mv_pos-s->b8_stride][0];
510  B[1] = motion_val[mv_pos-s->b8_stride][1];
511  }else{
512  B[0] = A[0];
513  B[1] = A[1];
514  }
515  if(!avail[c_off-4]){
516  if(avail[-4] && (avail[-1] || r->rv30)){
517  C[0] = motion_val[mv_pos-s->b8_stride-1][0];
518  C[1] = motion_val[mv_pos-s->b8_stride-1][1];
519  }else{
520  C[0] = A[0];
521  C[1] = A[1];
522  }
523  }else{
524  C[0] = motion_val[mv_pos-s->b8_stride+c_off][0];
525  C[1] = motion_val[mv_pos-s->b8_stride+c_off][1];
526  }
527  mx = mid_pred(A[0], B[0], C[0]);
528  my = mid_pred(A[1], B[1], C[1]);
529  mx += r->dmv[dmv_no][0];
530  my += r->dmv[dmv_no][1];
531  for(j = 0; j < part_sizes_h[block_type]; j++){
532  for(i = 0; i < part_sizes_w[block_type]; i++){
533  motion_val[mv_pos + i + j*s->b8_stride][0] = mx;
534  motion_val[mv_pos + i + j*s->b8_stride][1] = my;
535  }
536  }
537 }
538 
539 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
540 
541 /**
542  * Calculate motion vector component that should be added for direct blocks.
543  */
544 static int calc_add_mv(RV34DecContext *r, int dir, int val)
545 {
546  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
547 
548  return (int)(val * (SUINT)mul + 0x2000) >> 14;
549 }
550 
551 /**
552  * Predict motion vector for B-frame macroblock.
553  */
554 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
555  int A_avail, int B_avail, int C_avail,
556  int *mx, int *my)
557 {
558  if(A_avail + B_avail + C_avail != 3){
559  *mx = A[0] + B[0] + C[0];
560  *my = A[1] + B[1] + C[1];
561  if(A_avail + B_avail + C_avail == 2){
562  *mx /= 2;
563  *my /= 2;
564  }
565  }else{
566  *mx = mid_pred(A[0], B[0], C[0]);
567  *my = mid_pred(A[1], B[1], C[1]);
568  }
569 }
570 
571 /**
572  * motion vector prediction for B-frames
573  */
574 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
575 {
576  MpegEncContext *s = &r->s;
577  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
578  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
579  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
580  int has_A = 0, has_B = 0, has_C = 0;
581  int mx, my;
582  int i, j;
583  MPVWorkPicture *cur_pic = &s->cur_pic;
584  const int mask = dir ? MB_TYPE_BACKWARD_MV : MB_TYPE_FORWARD_MV;
585  int type = cur_pic->mb_type[mb_pos];
586 
587  if((r->avail_cache[6-1] & type) & mask){
588  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
589  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
590  has_A = 1;
591  }
592  if((r->avail_cache[6-4] & type) & mask){
593  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
594  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
595  has_B = 1;
596  }
597  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
598  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
599  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
600  has_C = 1;
601  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
602  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
603  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
604  has_C = 1;
605  }
606 
607  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
608 
609  mx += r->dmv[dir][0];
610  my += r->dmv[dir][1];
611 
612  for(j = 0; j < 2; j++){
613  for(i = 0; i < 2; i++){
614  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
615  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
616  }
617  }
618  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
619  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
620  }
621 }
622 
623 /**
624  * motion vector prediction - RV3 version
625  */
626 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
627 {
628  MpegEncContext *s = &r->s;
629  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
630  int A[2] = {0}, B[2], C[2];
631  int i, j, k;
632  int mx, my;
633  int* avail = r->avail_cache + avail_indexes[0];
634 
635  if(avail[-1]){
636  A[0] = s->cur_pic.motion_val[0][mv_pos - 1][0];
637  A[1] = s->cur_pic.motion_val[0][mv_pos - 1][1];
638  }
639  if(avail[-4]){
640  B[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride][0];
641  B[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride][1];
642  }else{
643  B[0] = A[0];
644  B[1] = A[1];
645  }
646  if(!avail[-4 + 2]){
647  if(avail[-4] && (avail[-1])){
648  C[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride - 1][0];
649  C[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride - 1][1];
650  }else{
651  C[0] = A[0];
652  C[1] = A[1];
653  }
654  }else{
655  C[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride + 2][0];
656  C[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride + 2][1];
657  }
658  mx = mid_pred(A[0], B[0], C[0]);
659  my = mid_pred(A[1], B[1], C[1]);
660  mx += r->dmv[0][0];
661  my += r->dmv[0][1];
662  for(j = 0; j < 2; j++){
663  for(i = 0; i < 2; i++){
664  for(k = 0; k < 2; k++){
665  s->cur_pic.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
666  s->cur_pic.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
667  }
668  }
669  }
670 }
671 
672 static const int chroma_coeffs[3] = { 0, 3, 5 };
673 
674 /**
675  * generic motion compensation function
676  *
677  * @param r decoder context
678  * @param block_type type of the current block
679  * @param xoff horizontal offset from the start of the current block
680  * @param yoff vertical offset from the start of the current block
681  * @param mv_off offset to the motion vector information
682  * @param width width of the current partition in 8x8 blocks
683  * @param height height of the current partition in 8x8 blocks
684  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
685  * @param thirdpel motion vectors are specified in 1/3 of pixel
686  * @param qpel_mc a set of functions used to perform luma motion compensation
687  * @param chroma_mc a set of functions used to perform chroma motion compensation
688  */
689 static inline void rv34_mc(RV34DecContext *r, const int block_type,
690  const int xoff, const int yoff, int mv_off,
691  const int width, const int height, int dir,
692  const int thirdpel, int weighted,
693  qpel_mc_func (*qpel_mc)[16],
695 {
696  MpegEncContext *s = &r->s;
697  uint8_t *Y, *U, *V;
698  const uint8_t *srcY, *srcU, *srcV;
699  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
700  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
701  int is16x16 = 1;
702  int emu = 0;
703  int16_t *motion_val = s->cur_pic.motion_val[dir][mv_pos];
704 
705  if(thirdpel){
706  int chroma_mx, chroma_my;
707  mx = (motion_val[0] + (3 << 24)) / 3 - (1 << 24);
708  my = (motion_val[1] + (3 << 24)) / 3 - (1 << 24);
709  lx = (motion_val[0] + (3 << 24)) % 3;
710  ly = (motion_val[1] + (3 << 24)) % 3;
711  chroma_mx = motion_val[0] / 2;
712  chroma_my = motion_val[1] / 2;
713  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
714  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
715  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
716  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
717  }else{
718  int cx, cy;
719  mx = motion_val[0] >> 2;
720  my = motion_val[1] >> 2;
721  lx = motion_val[0] & 3;
722  ly = motion_val[1] & 3;
723  cx = motion_val[0] / 2;
724  cy = motion_val[1] / 2;
725  umx = cx >> 2;
726  umy = cy >> 2;
727  uvmx = (cx & 3) << 1;
728  uvmy = (cy & 3) << 1;
729  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
730  if(uvmx == 6 && uvmy == 6)
731  uvmx = uvmy = 4;
732  }
733 
734  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
735  /* wait for the referenced mb row to be finished */
736  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
737  const ThreadProgress *p = dir ? &s->next_pic.ptr->progress : &s->last_pic.ptr->progress;
738  ff_thread_progress_await(p, mb_row);
739  }
740 
741  dxy = ly*4 + lx;
742  srcY = dir ? s->next_pic.data[0] : s->last_pic.data[0];
743  srcU = dir ? s->next_pic.data[1] : s->last_pic.data[1];
744  srcV = dir ? s->next_pic.data[2] : s->last_pic.data[2];
745  src_x = s->mb_x * 16 + xoff + mx;
746  src_y = s->mb_y * 16 + yoff + my;
747  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
748  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
749  srcY += src_y * s->linesize + src_x;
750  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
751  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
752  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
753  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
754  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
755  srcY -= 2 + 2*s->linesize;
756  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
757  s->linesize, s->linesize,
758  (width << 3) + 6, (height << 3) + 6,
759  src_x - 2, src_y - 2,
760  s->h_edge_pos, s->v_edge_pos);
761  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
762  emu = 1;
763  }
764  if(!weighted){
765  Y = s->dest[0] + xoff + yoff *s->linesize;
766  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
767  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
768  }else{
769  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
770  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
771  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
772  }
773 
774  if(block_type == RV34_MB_P_16x8){
775  qpel_mc[1][dxy](Y, srcY, s->linesize);
776  Y += 8;
777  srcY += 8;
778  }else if(block_type == RV34_MB_P_8x16){
779  qpel_mc[1][dxy](Y, srcY, s->linesize);
780  Y += 8 * s->linesize;
781  srcY += 8 * s->linesize;
782  }
783  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
784  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
785  if (emu) {
786  uint8_t *uvbuf = s->sc.edge_emu_buffer;
787 
788  s->vdsp.emulated_edge_mc(uvbuf, srcU,
789  s->uvlinesize, s->uvlinesize,
790  (width << 2) + 1, (height << 2) + 1,
791  uvsrc_x, uvsrc_y,
792  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
793  srcU = uvbuf;
794  uvbuf += 9*s->uvlinesize;
795 
796  s->vdsp.emulated_edge_mc(uvbuf, srcV,
797  s->uvlinesize, s->uvlinesize,
798  (width << 2) + 1, (height << 2) + 1,
799  uvsrc_x, uvsrc_y,
800  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
801  srcV = uvbuf;
802  }
803  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
804  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
805 }
806 
807 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
808  const int xoff, const int yoff, int mv_off,
809  const int width, const int height, int dir)
810 {
811  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
812  r->rdsp.put_pixels_tab,
813  r->rdsp.put_chroma_pixels_tab);
814 }
815 
817 {
818  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
819  r->tmp_b_block_y[0],
820  r->tmp_b_block_y[1],
821  r->weight1,
822  r->weight2,
823  r->s.linesize);
824  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
825  r->tmp_b_block_uv[0],
826  r->tmp_b_block_uv[2],
827  r->weight1,
828  r->weight2,
829  r->s.uvlinesize);
830  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
831  r->tmp_b_block_uv[1],
832  r->tmp_b_block_uv[3],
833  r->weight1,
834  r->weight2,
835  r->s.uvlinesize);
836 }
837 
838 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
839 {
840  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
841 
842  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
843  r->rdsp.put_pixels_tab,
844  r->rdsp.put_chroma_pixels_tab);
845  if(!weighted){
846  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
847  r->rdsp.avg_pixels_tab,
848  r->rdsp.avg_chroma_pixels_tab);
849  }else{
850  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
851  r->rdsp.put_pixels_tab,
852  r->rdsp.put_chroma_pixels_tab);
853  rv4_weight(r);
854  }
855 }
856 
858 {
859  int i, j;
860  int weighted = !r->rv30 && r->weight1 != 8192;
861 
862  for(j = 0; j < 2; j++)
863  for(i = 0; i < 2; i++){
864  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
865  weighted,
866  r->rdsp.put_pixels_tab,
867  r->rdsp.put_chroma_pixels_tab);
868  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
869  weighted,
870  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
871  weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
872  }
873  if(weighted)
874  rv4_weight(r);
875 }
876 
877 /** number of motion vectors in each macroblock type */
878 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
879 
880 /**
881  * Decode motion vector differences
882  * and perform motion vector reconstruction and motion compensation.
883  */
884 static int rv34_decode_mv(RV34DecContext *r, int block_type)
885 {
886  MpegEncContext *s = &r->s;
887  GetBitContext *const gb = &r->gb;
888  int i, j, k, l;
889  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
890  int next_bt;
891 
892  memset(r->dmv, 0, sizeof(r->dmv));
893  for(i = 0; i < num_mvs[block_type]; i++){
894  r->dmv[i][0] = get_interleaved_se_golomb(gb);
895  r->dmv[i][1] = get_interleaved_se_golomb(gb);
896  if (r->dmv[i][0] == INVALID_VLC ||
897  r->dmv[i][1] == INVALID_VLC) {
898  r->dmv[i][0] = r->dmv[i][1] = 0;
899  return AVERROR_INVALIDDATA;
900  }
901  }
902  switch(block_type){
903  case RV34_MB_TYPE_INTRA:
905  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
906  return 0;
907  case RV34_MB_SKIP:
908  if(s->pict_type == AV_PICTURE_TYPE_P){
909  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
910  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
911  break;
912  }
914  case RV34_MB_B_DIRECT:
915  //surprisingly, it uses motion scheme from next reference frame
916  /* wait for the current mb row to be finished */
917  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
918  ff_thread_progress_await(&s->next_pic.ptr->progress, FFMAX(0, s->mb_y-1));
919 
920  next_bt = s->next_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride];
921  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
922  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
923  ZERO8x2(s->cur_pic.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
924  }else
925  for(j = 0; j < 2; j++)
926  for(i = 0; i < 2; i++)
927  for(k = 0; k < 2; k++)
928  for(l = 0; l < 2; l++)
929  s->cur_pic.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_pic.motion_val[0][mv_pos + i + j*s->b8_stride][k]);
930  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
931  rv34_mc_2mv(r, block_type);
932  else
934  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
935  break;
936  case RV34_MB_P_16x16:
937  case RV34_MB_P_MIX16x16:
938  rv34_pred_mv(r, block_type, 0, 0);
939  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
940  break;
941  case RV34_MB_B_FORWARD:
942  case RV34_MB_B_BACKWARD:
943  r->dmv[1][0] = r->dmv[0][0];
944  r->dmv[1][1] = r->dmv[0][1];
945  if(r->rv30)
946  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
947  else
948  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
949  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
950  break;
951  case RV34_MB_P_16x8:
952  case RV34_MB_P_8x16:
953  rv34_pred_mv(r, block_type, 0, 0);
954  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
955  if(block_type == RV34_MB_P_16x8){
956  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
957  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
958  }
959  if(block_type == RV34_MB_P_8x16){
960  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
961  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
962  }
963  break;
964  case RV34_MB_B_BIDIR:
965  rv34_pred_mv_b (r, block_type, 0);
966  rv34_pred_mv_b (r, block_type, 1);
967  rv34_mc_2mv (r, block_type);
968  break;
969  case RV34_MB_P_8x8:
970  for(i=0;i< 4;i++){
971  rv34_pred_mv(r, block_type, i, i);
972  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
973  }
974  break;
975  }
976 
977  return 0;
978 }
979 /** @} */ // mv group
980 
981 /**
982  * @name Macroblock reconstruction functions
983  * @{
984  */
985 /** mapping of RV30/40 intra prediction types to standard H.264 types */
986 static const int ittrans[9] = {
989 };
990 
991 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
992 static const int ittrans16[4] = {
994 };
995 
996 /**
997  * Perform 4x4 intra prediction.
998  */
999 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
1000 {
1001  uint8_t *prev = dst - stride + 4;
1002  uint32_t topleft;
1003 
1004  if(!up && !left)
1005  itype = DC_128_PRED;
1006  else if(!up){
1007  if(itype == VERT_PRED) itype = HOR_PRED;
1008  if(itype == DC_PRED) itype = LEFT_DC_PRED;
1009  }else if(!left){
1010  if(itype == HOR_PRED) itype = VERT_PRED;
1011  if(itype == DC_PRED) itype = TOP_DC_PRED;
1013  }
1014  if(!down){
1016  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
1017  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
1018  }
1019  if(!right && up){
1020  topleft = dst[-stride + 3] * 0x01010101u;
1021  prev = (uint8_t*)&topleft;
1022  }
1023  r->h.pred4x4[itype](dst, prev, stride);
1024 }
1025 
1026 static inline int adjust_pred16(int itype, int up, int left)
1027 {
1028  if(!up && !left)
1029  itype = DC_128_PRED8x8;
1030  else if(!up){
1031  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1032  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1033  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1034  }else if(!left){
1035  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1036  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1037  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1038  }
1039  return itype;
1040 }
1041 
1043  uint8_t *pdst, int stride,
1044  int fc, int sc, int q_dc, int q_ac)
1045 {
1046  int16_t *const ptr = r->block;
1047  int has_ac = rv34_decode_block(ptr, &r->gb, r->cur_vlcs,
1048  fc, sc, q_dc, q_ac, q_ac);
1049  if(has_ac){
1050  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1051  }else{
1052  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1053  ptr[0] = 0;
1054  }
1055 }
1056 
1057 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1058 {
1059  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1060  MpegEncContext *s = &r->s;
1061  GetBitContext *const gb = &r->gb;
1062  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1063  q_ac = rv34_qscale_tab[s->qscale];
1064  uint8_t *dst = s->dest[0];
1065  int16_t *const ptr = r->block;
1066  int i, j, itype, has_ac;
1067 
1068  memset(block16, 0, 16 * sizeof(*block16));
1069 
1070  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1071  if(has_ac)
1072  r->rdsp.rv34_inv_transform(block16);
1073  else
1074  r->rdsp.rv34_inv_transform_dc(block16);
1075 
1076  itype = ittrans16[intra_types[0]];
1077  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1078  r->h.pred16x16[itype](dst, s->linesize);
1079 
1080  for(j = 0; j < 4; j++){
1081  for(i = 0; i < 4; i++, cbp >>= 1){
1082  int dc = block16[i + j*4];
1083 
1084  if(cbp & 1){
1085  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1086  }else
1087  has_ac = 0;
1088 
1089  if(has_ac){
1090  ptr[0] = dc;
1091  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1092  }else
1093  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1094  }
1095 
1096  dst += 4*s->linesize;
1097  }
1098 
1099  itype = ittrans16[intra_types[0]];
1100  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1101  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1102 
1103  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1104  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1105 
1106  for(j = 1; j < 3; j++){
1107  dst = s->dest[j];
1108  r->h.pred8x8[itype](dst, s->uvlinesize);
1109  for(i = 0; i < 4; i++, cbp >>= 1){
1110  uint8_t *pdst;
1111  if(!(cbp & 1)) continue;
1112  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1113 
1114  rv34_process_block(r, pdst, s->uvlinesize,
1115  r->chroma_vlc, 1, q_dc, q_ac);
1116  }
1117  }
1118 }
1119 
1120 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1121 {
1122  MpegEncContext *s = &r->s;
1123  uint8_t *dst = s->dest[0];
1124  int avail[6*8] = {0};
1125  int i, j, k;
1126  int idx, q_ac, q_dc;
1127 
1128  // Set neighbour information.
1129  if(r->avail_cache[1])
1130  avail[0] = 1;
1131  if(r->avail_cache[2])
1132  avail[1] = avail[2] = 1;
1133  if(r->avail_cache[3])
1134  avail[3] = avail[4] = 1;
1135  if(r->avail_cache[4])
1136  avail[5] = 1;
1137  if(r->avail_cache[5])
1138  avail[8] = avail[16] = 1;
1139  if(r->avail_cache[9])
1140  avail[24] = avail[32] = 1;
1141 
1142  q_ac = rv34_qscale_tab[s->qscale];
1143  for(j = 0; j < 4; j++){
1144  idx = 9 + j*8;
1145  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1146  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1147  avail[idx] = 1;
1148  if(!(cbp & 1)) continue;
1149 
1150  rv34_process_block(r, dst, s->linesize,
1151  r->luma_vlc, 0, q_ac, q_ac);
1152  }
1153  dst += s->linesize * 4 - 4*4;
1154  intra_types += r->intra_types_stride;
1155  }
1156 
1157  intra_types -= r->intra_types_stride * 4;
1158 
1159  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1160  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1161 
1162  for(k = 0; k < 2; k++){
1163  dst = s->dest[1+k];
1164  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1165 
1166  for(j = 0; j < 2; j++){
1167  int* acache = r->avail_cache + 6 + j*4;
1168  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1169  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1170  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1171  acache[0] = 1;
1172 
1173  if(!(cbp&1)) continue;
1174 
1175  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1176  r->chroma_vlc, 1, q_dc, q_ac);
1177  }
1178 
1179  dst += 4*s->uvlinesize;
1180  }
1181  }
1182 }
1183 
1184 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1185 {
1186  int d;
1187  d = motion_val[0][0] - motion_val[-step][0];
1188  if(d < -3 || d > 3)
1189  return 1;
1190  d = motion_val[0][1] - motion_val[-step][1];
1191  if(d < -3 || d > 3)
1192  return 1;
1193  return 0;
1194 }
1195 
1197 {
1198  MpegEncContext *s = &r->s;
1199  int hmvmask = 0, vmvmask = 0, i, j;
1200  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1201  int16_t (*motion_val)[2] = &s->cur_pic.motion_val[0][midx];
1202  for(j = 0; j < 16; j += 8){
1203  for(i = 0; i < 2; i++){
1204  if(is_mv_diff_gt_3(motion_val + i, 1))
1205  vmvmask |= 0x11 << (j + i*2);
1206  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1207  hmvmask |= 0x03 << (j + i*2);
1208  }
1209  motion_val += s->b8_stride;
1210  }
1211  if(s->first_slice_line)
1212  hmvmask &= ~0x000F;
1213  if(!s->mb_x)
1214  vmvmask &= ~0x1111;
1215  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1216  vmvmask |= (vmvmask & 0x4444) >> 1;
1217  hmvmask |= (hmvmask & 0x0F00) >> 4;
1218  if(s->mb_x)
1219  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1220  if(!s->first_slice_line)
1221  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1222  }
1223  return hmvmask | vmvmask;
1224 }
1225 
1226 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1227 {
1228  MpegEncContext *s = &r->s;
1229  GetBitContext *const gb = &r->gb;
1230  uint8_t *dst = s->dest[0];
1231  int16_t *const ptr = r->block;
1232  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1233  int cbp, cbp2;
1234  int q_dc, q_ac, has_ac;
1235  int i, j;
1236  int dist;
1237 
1238  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1239  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1240  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1241  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1242  if(s->mb_x && dist)
1243  r->avail_cache[5] =
1244  r->avail_cache[9] = s->cur_pic.mb_type[mb_pos - 1];
1245  if(dist >= s->mb_width)
1246  r->avail_cache[2] =
1247  r->avail_cache[3] = s->cur_pic.mb_type[mb_pos - s->mb_stride];
1248  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1249  r->avail_cache[4] = s->cur_pic.mb_type[mb_pos - s->mb_stride + 1];
1250  if(s->mb_x && dist > s->mb_width)
1251  r->avail_cache[1] = s->cur_pic.mb_type[mb_pos - s->mb_stride - 1];
1252 
1253  s->qscale = r->si.quant;
1254  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1255  r->cbp_luma [mb_pos] = cbp;
1256  r->cbp_chroma[mb_pos] = cbp >> 16;
1257  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1258  s->cur_pic.qscale_table[mb_pos] = s->qscale;
1259 
1260  if(cbp == -1)
1261  return -1;
1262 
1263  if (IS_INTRA(s->cur_pic.mb_type[mb_pos])) {
1264  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1265  else rv34_output_intra(r, intra_types, cbp);
1266  return 0;
1267  }
1268 
1269  if(r->is16){
1270  // Only for RV34_MB_P_MIX16x16
1271  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1272  memset(block16, 0, 16 * sizeof(*block16));
1273  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1274  q_ac = rv34_qscale_tab[s->qscale];
1275  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1276  r->rdsp.rv34_inv_transform(block16);
1277  else
1278  r->rdsp.rv34_inv_transform_dc(block16);
1279 
1280  q_ac = rv34_qscale_tab[s->qscale];
1281 
1282  for(j = 0; j < 4; j++){
1283  for(i = 0; i < 4; i++, cbp >>= 1){
1284  int dc = block16[i + j*4];
1285 
1286  if(cbp & 1){
1287  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1288  }else
1289  has_ac = 0;
1290 
1291  if(has_ac){
1292  ptr[0] = dc;
1293  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1294  }else
1295  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1296  }
1297 
1298  dst += 4*s->linesize;
1299  }
1300 
1301  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1302  }else{
1303  q_ac = rv34_qscale_tab[s->qscale];
1304 
1305  for(j = 0; j < 4; j++){
1306  for(i = 0; i < 4; i++, cbp >>= 1){
1307  if(!(cbp & 1)) continue;
1308 
1309  rv34_process_block(r, dst + 4*i, s->linesize,
1310  r->luma_vlc, 0, q_ac, q_ac);
1311  }
1312  dst += 4*s->linesize;
1313  }
1314  }
1315 
1316  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1317  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1318 
1319  for(j = 1; j < 3; j++){
1320  dst = s->dest[j];
1321  for(i = 0; i < 4; i++, cbp >>= 1){
1322  uint8_t *pdst;
1323  if(!(cbp & 1)) continue;
1324  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1325 
1326  rv34_process_block(r, pdst, s->uvlinesize,
1327  r->chroma_vlc, 1, q_dc, q_ac);
1328  }
1329  }
1330 
1331  return 0;
1332 }
1333 
1334 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1335 {
1336  MpegEncContext *s = &r->s;
1337  int cbp, dist;
1338  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1339 
1340  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1341  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1342  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1343  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1344  if(s->mb_x && dist)
1345  r->avail_cache[5] =
1346  r->avail_cache[9] = s->cur_pic.mb_type[mb_pos - 1];
1347  if(dist >= s->mb_width)
1348  r->avail_cache[2] =
1349  r->avail_cache[3] = s->cur_pic.mb_type[mb_pos - s->mb_stride];
1350  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1351  r->avail_cache[4] = s->cur_pic.mb_type[mb_pos - s->mb_stride + 1];
1352  if(s->mb_x && dist > s->mb_width)
1353  r->avail_cache[1] = s->cur_pic.mb_type[mb_pos - s->mb_stride - 1];
1354 
1355  s->qscale = r->si.quant;
1356  cbp = rv34_decode_intra_mb_header(r, intra_types);
1357  r->cbp_luma [mb_pos] = cbp;
1358  r->cbp_chroma[mb_pos] = cbp >> 16;
1359  r->deblock_coefs[mb_pos] = 0xFFFF;
1360  s->cur_pic.qscale_table[mb_pos] = s->qscale;
1361 
1362  if(cbp == -1)
1363  return -1;
1364 
1365  if(r->is16){
1366  rv34_output_i16x16(r, intra_types, cbp);
1367  return 0;
1368  }
1369 
1370  rv34_output_intra(r, intra_types, cbp);
1371  return 0;
1372 }
1373 
1375 {
1376  int bits;
1377  if(s->mb_y >= s->mb_height)
1378  return 1;
1379  if (!r->mb_num_left)
1380  return 1;
1381  if (r->mb_skip_run > 1)
1382  return 0;
1383  bits = get_bits_left(&r->gb);
1384  if (bits <= 0 || (bits < 8 && !show_bits(&r->gb, bits)))
1385  return 1;
1386  return 0;
1387 }
1388 
1389 
1391 {
1392  av_freep(&r->intra_types_hist);
1393  r->intra_types = NULL;
1394  av_freep(&r->tmp_b_block_base);
1395  av_freep(&r->mb_type);
1396  av_freep(&r->cbp_luma);
1397  av_freep(&r->cbp_chroma);
1398  av_freep(&r->deblock_coefs);
1399 }
1400 
1401 
1403 {
1404  r->intra_types_stride = r->s.mb_width * 4 + 4;
1405 
1406  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1407  sizeof(*r->cbp_chroma));
1408  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1409  sizeof(*r->cbp_luma));
1410  r->deblock_coefs = av_mallocz(r->s.mb_stride * r->s.mb_height *
1411  sizeof(*r->deblock_coefs));
1412  r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
1413  sizeof(*r->intra_types_hist));
1414  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1415  sizeof(*r->mb_type));
1416 
1417  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1418  r->intra_types_hist && r->mb_type)) {
1419  r->s.context_reinit = 1;
1421  return AVERROR(ENOMEM);
1422  }
1423 
1424  r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
1425 
1426  return 0;
1427 }
1428 
1429 
1431 {
1433  return rv34_decoder_alloc(r);
1434 }
1435 
1436 
1437 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1438 {
1439  MpegEncContext *s = &r->s;
1440  GetBitContext *const gb = &r->gb;
1441  int mb_pos, slice_type;
1442  int res;
1443 
1444  init_get_bits(gb, buf, buf_size*8);
1445  res = r->parse_slice_header(r, gb, &r->si);
1446  if(res < 0){
1447  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1448  return -1;
1449  }
1450 
1451  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1452  if (slice_type != s->pict_type) {
1453  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1454  return AVERROR_INVALIDDATA;
1455  }
1456  if (s->width != r->si.width || s->height != r->si.height) {
1457  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1458  return AVERROR_INVALIDDATA;
1459  }
1460 
1461  r->si.end = end;
1462  s->qscale = r->si.quant;
1463  r->mb_num_left = r->si.end - r->si.start;
1464  r->mb_skip_run = 0;
1465 
1466  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1467  if(r->si.start != mb_pos){
1468  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1469  s->mb_x = r->si.start % s->mb_width;
1470  s->mb_y = r->si.start / s->mb_width;
1471  }
1472  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1473  s->first_slice_line = 1;
1474  s->resync_mb_x = s->mb_x;
1475  s->resync_mb_y = s->mb_y;
1476 
1478  while(!check_slice_end(r, s)) {
1479  s->dest[0] += 16;
1480  s->dest[1] += 8;
1481  s->dest[2] += 8;
1482 
1483  if(r->si.type)
1484  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1485  else
1486  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1487  if(res < 0){
1488  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1489  return -1;
1490  }
1491  if (++s->mb_x == s->mb_width) {
1492  s->mb_x = 0;
1493  s->mb_y++;
1495 
1496  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1497  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1498 
1499  if(r->loop_filter && s->mb_y >= 2)
1500  r->loop_filter(r, s->mb_y - 2);
1501 
1502  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1503  ff_thread_progress_report(&s->cur_pic.ptr->progress,
1504  s->mb_y - 2);
1505 
1506  }
1507  if(s->mb_x == s->resync_mb_x)
1508  s->first_slice_line=0;
1509  r->mb_num_left--;
1510  }
1511  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1512 
1513  return s->mb_y == s->mb_height;
1514 }
1515 
1516 /** @} */ // reconstruction group end
1517 
1518 /**
1519  * Initialize decoder.
1520  */
1522 {
1523  static AVOnce init_static_once = AV_ONCE_INIT;
1524  RV34DecContext *r = avctx->priv_data;
1525  MpegEncContext *s = &r->s;
1526  int ret;
1527 
1528  ret = ff_mpv_decode_init(s, avctx);
1529  if (ret < 0)
1530  return ret;
1531  s->out_format = FMT_H263;
1532 
1533  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1534  avctx->has_b_frames = 1;
1535  s->low_delay = 0;
1536 
1537  if ((ret = ff_mpv_common_init(s)) < 0)
1538  return ret;
1539 
1540  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1541 
1543  if (ret < 0)
1544  return ret;
1545 
1546  ff_thread_once(&init_static_once, rv34_init_tables);
1547 
1548  return 0;
1549 }
1550 
1552 {
1553  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1554  MpegEncContext *const s1 = &r1->s;
1555  int ret;
1556 
1557  if (dst == src || !s1->context_initialized)
1558  return 0;
1559 
1561  if (ret < 0)
1562  return ret;
1563 
1564  // Did ff_mpeg_update_thread_context reinit?
1565  if (ret > 0) {
1567  if (ret < 0)
1568  return ret;
1569  }
1570 
1571  r->cur_pts = r1->cur_pts;
1572  r->last_pts = r1->last_pts;
1573  r->next_pts = r1->next_pts;
1574 
1575  memset(&r->si, 0, sizeof(r->si));
1576 
1577  return 0;
1578 }
1579 
1580 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1581 {
1582  if (n < slice_count) {
1583  return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1584  } else
1585  return buf_size;
1586 }
1587 
1588 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1589 {
1590  RV34DecContext *r = avctx->priv_data;
1591  MpegEncContext *s = &r->s;
1592  int got_picture = 0, ret;
1593 
1594  ff_er_frame_end(&s->er, NULL);
1596  r->mb_num_left = 0;
1597 
1598  if (s->pict_type == AV_PICTURE_TYPE_B) {
1599  if ((ret = av_frame_ref(pict, s->cur_pic.ptr->f)) < 0)
1600  return ret;
1601  ff_print_debug_info(s, s->cur_pic.ptr, pict);
1602  ff_mpv_export_qp_table(s, pict, s->cur_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1603  got_picture = 1;
1604  } else if (s->last_pic.ptr) {
1605  if ((ret = av_frame_ref(pict, s->last_pic.ptr->f)) < 0)
1606  return ret;
1607  ff_print_debug_info(s, s->last_pic.ptr, pict);
1608  ff_mpv_export_qp_table(s, pict, s->last_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1609  got_picture = 1;
1610  }
1611 
1612  return got_picture;
1613 }
1614 
1615 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1616 {
1617  // attempt to keep aspect during typical resolution switches
1618  if (!sar.num)
1619  sar = (AVRational){1, 1};
1620 
1621  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1622  return sar;
1623 }
1624 
1626  int *got_picture_ptr, AVPacket *avpkt)
1627 {
1628  const uint8_t *buf = avpkt->data;
1629  int buf_size = avpkt->size;
1630  RV34DecContext *r = avctx->priv_data;
1631  MpegEncContext *s = &r->s;
1632  SliceInfo si;
1633  int i, ret;
1634  int slice_count;
1635  const uint8_t *slices_hdr = NULL;
1636  int last = 0;
1637  int faulty_b = 0;
1638  int offset;
1639 
1640  /* no supplementary picture */
1641  if (buf_size == 0) {
1642  /* special case for last picture */
1643  if (s->next_pic.ptr) {
1644  if ((ret = av_frame_ref(pict, s->next_pic.ptr->f)) < 0)
1645  return ret;
1646  ff_mpv_unref_picture(&s->next_pic);
1647 
1648  *got_picture_ptr = 1;
1649  }
1650  return 0;
1651  }
1652 
1653  slice_count = (*buf++) + 1;
1654  slices_hdr = buf + 4;
1655  buf += 8 * slice_count;
1656  buf_size -= 1 + 8 * slice_count;
1657 
1658  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1659  //parse first slice header to check whether this frame can be decoded
1660  if(offset < 0 || offset > buf_size){
1661  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1662  return AVERROR_INVALIDDATA;
1663  }
1664  init_get_bits(&r->gb, buf+offset, (buf_size-offset)*8);
1665  if (r->parse_slice_header(r, &r->gb, &si) < 0 || si.start) {
1666  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1667  return AVERROR_INVALIDDATA;
1668  }
1669  if (!s->last_pic.ptr && si.type == AV_PICTURE_TYPE_B) {
1670  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1671  "reference data.\n");
1672  faulty_b = 1;
1673  }
1674  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1675  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1676  || avctx->skip_frame >= AVDISCARD_ALL)
1677  return avpkt->size;
1678 
1679  /* first slice */
1680  if (si.start == 0) {
1681  if (r->mb_num_left > 0 && s->cur_pic.ptr) {
1682  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1683  r->mb_num_left);
1684  if (!s->context_reinit)
1685  ff_er_frame_end(&s->er, NULL);
1687  }
1688 
1689  if (s->width != si.width || s->height != si.height || s->context_reinit) {
1690  int err;
1691 
1692  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1693  si.width, si.height);
1694 
1695  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1696  return AVERROR_INVALIDDATA;
1697 
1698  s->avctx->sample_aspect_ratio = update_sar(
1699  s->width, s->height, s->avctx->sample_aspect_ratio,
1700  si.width, si.height);
1701  s->width = si.width;
1702  s->height = si.height;
1703 
1704  err = ff_set_dimensions(s->avctx, s->width, s->height);
1705  if (err < 0)
1706  return err;
1707  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1708  return err;
1709  if ((err = rv34_decoder_realloc(r)) < 0)
1710  return err;
1711  }
1712  if (faulty_b)
1713  return AVERROR_INVALIDDATA;
1714  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1715  if (ff_mpv_frame_start(s, s->avctx) < 0)
1716  return -1;
1718  if (!r->tmp_b_block_base) {
1719  int i;
1720 
1721  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1722  if (!r->tmp_b_block_base)
1723  return AVERROR(ENOMEM);
1724  for (i = 0; i < 2; i++)
1725  r->tmp_b_block_y[i] = r->tmp_b_block_base
1726  + i * 16 * s->linesize;
1727  for (i = 0; i < 4; i++)
1728  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1729  + (i >> 1) * 8 * s->uvlinesize
1730  + (i & 1) * 16;
1731  }
1732  r->cur_pts = si.pts;
1733  if (s->pict_type != AV_PICTURE_TYPE_B) {
1734  r->last_pts = r->next_pts;
1735  r->next_pts = r->cur_pts;
1736  } else {
1737  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1738  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1739  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1740 
1741  if(!refdist){
1742  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1743  r->scaled_weight = 0;
1744  }else{
1745  if (FFMAX(dist0, dist1) > refdist)
1746  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1747 
1748  r->mv_weight1 = (dist0 << 14) / refdist;
1749  r->mv_weight2 = (dist1 << 14) / refdist;
1750  if((r->mv_weight1|r->mv_weight2) & 511){
1751  r->weight1 = r->mv_weight1;
1752  r->weight2 = r->mv_weight2;
1753  r->scaled_weight = 0;
1754  }else{
1755  r->weight1 = r->mv_weight1 >> 9;
1756  r->weight2 = r->mv_weight2 >> 9;
1757  r->scaled_weight = 1;
1758  }
1759  }
1760  }
1761  s->mb_x = s->mb_y = 0;
1762  ff_thread_finish_setup(s->avctx);
1763  } else if (s->context_reinit) {
1764  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames to "
1765  "reinitialize (start MB is %d).\n", si.start);
1766  return AVERROR_INVALIDDATA;
1767  } else if (HAVE_THREADS &&
1768  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1769  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1770  "multithreading mode (start MB is %d).\n", si.start);
1771  return AVERROR_INVALIDDATA;
1772  }
1773 
1774  for(i = 0; i < slice_count; i++){
1775  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1776  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1777  int size;
1778 
1779  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1780  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1781  break;
1782  }
1783  size = offset1 - offset;
1784 
1785  r->si.end = s->mb_width * s->mb_height;
1786  r->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1787 
1788  if(i+1 < slice_count){
1789  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1790  if (offset2 < offset1 || offset2 > buf_size) {
1791  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1792  break;
1793  }
1794  init_get_bits(&r->gb, buf+offset1, (buf_size-offset1)*8);
1795  if (r->parse_slice_header(r, &r->gb, &si) < 0) {
1796  size = offset2 - offset;
1797  }else
1798  r->si.end = si.start;
1799  }
1800  av_assert0 (size >= 0 && size <= buf_size - offset);
1801  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1802  if(last)
1803  break;
1804  }
1805 
1806  if (s->cur_pic.ptr) {
1807  if (last) {
1808  if(r->loop_filter)
1809  r->loop_filter(r, s->mb_height - 1);
1810 
1811  ret = finish_frame(avctx, pict);
1812  if (ret < 0)
1813  return ret;
1814  *got_picture_ptr = ret;
1815  } else if (HAVE_THREADS &&
1816  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1817  av_log(avctx, AV_LOG_INFO, "marking unfinished frame as finished\n");
1818  /* always mark the current frame as finished, frame-mt supports
1819  * only complete frames */
1820  ff_er_frame_end(&s->er, NULL);
1822  r->mb_num_left = 0;
1823  return AVERROR_INVALIDDATA;
1824  }
1825  }
1826 
1827  return avpkt->size;
1828 }
1829 
1831 {
1832  RV34DecContext *r = avctx->priv_data;
1833 
1835 
1836  return ff_mpv_decode_close(avctx);
1837 }
flags
const SwsFlags flags[]
Definition: swscale.c:72
RV34DecContext
decoder context
Definition: rv34.h:87
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:359
A
#define A(x)
Definition: vpx_arith.h:28
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:83
rv34_mb_type_to_lavc
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:60
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
rv34_qscale_tab
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
rv34_output_intra
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1120
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:688
r
const char * r
Definition: vf_curves.c:127
ff_rv34_decode_end
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1830
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
ff_mpv_decode_init
av_cold int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:86
threadprogress.h
rv34_pred_mv_rv3
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:626
mem_internal.h
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
ThreadProgress
ThreadProgress is an API to easily notify other threads about progress of any kind as long as it can ...
Definition: threadprogress.h:43
thread.h
rv34_table_inter_secondpat
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
ittrans16
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:992
num_mvs
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:878
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:42
chroma_coeffs
static const int chroma_coeffs[3]
Definition: rv34.c:672
ff_rv34_get_start_offset
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:352
mask
int mask
Definition: mediacodecdec_common.c:154
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:435
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
ff_rv34_decode_update_thread_context
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1551
AVPacket::data
uint8_t * data
Definition: packet.h:595
DC_PRED
@ DC_PRED
Definition: vp9.h:48
table
static const uint16_t table[]
Definition: prosumer.c:203
rv34_decoder_realloc
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1430
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:41
check_slice_end
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1374
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:840
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:472
chroma_mc
#define chroma_mc(a)
Definition: vc1dsp.c:786
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
decode_subblock
static void decode_subblock(int16_t *dst, int flags, const int is_block2, GetBitContext *gb, const VLCElem *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:266
rv34_set_deblock_coef
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1196
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:39
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
thread.h
MPVWorkPicture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:105
avail_indexes
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:480
MPVWorkPicture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:103
golomb.h
exp golomb vlc stuff
NUM_INTRA_TABLES
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
adjust_pred16
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:1026
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
RV34_MB_B_FORWARD
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
Definition: rv34.h:50
rv34_decoder_alloc
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1402
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1674
VERT_PRED
@ VERT_PRED
Definition: vp9.h:46
rv34_pred_mv
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:489
rv34_gen_vlc
static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp, int mod_three_bits_offset, int *offset)
Definition: rv34.c:146
GetBitContext
Definition: get_bits.h:109
RV34VLC::first_pattern
const VLCElem * first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:69
DIAG_DOWN_RIGHT_PRED
@ DIAG_DOWN_RIGHT_PRED
Definition: vp9.h:50
rv34_decode_block
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:311
RV34_MB_B_DIRECT
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:53
val
static double val(void *priv, double ch)
Definition: aeval.c:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
rv34_count_ones
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
rv34_table_intra_firstpat
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
rv34data.h
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
mpegvideodec.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:236
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
HOR_PRED
@ HOR_PRED
Definition: vp9.h:47
av_cold
#define av_cold
Definition: attributes.h:119
ff_rv34_decode_init
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1521
rv34_pred_4x4_block
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:999
rv34_decode_intra_macroblock
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1334
ZERO8x2
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:53
RV34VLC
VLC tables used by the decoder.
Definition: rv34.h:66
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:705
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:910
rv34_mc_1mv
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:807
rv34_decode_inter_macroblock
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1226
intra_vlcs
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:76
s
#define s(width, name)
Definition: cbs_vp9.c:198
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:81
VERT_LEFT_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
RV34VLC::cbp
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:68
CBPPAT_VLC_SIZE
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:494
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
calc_add_mv
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:544
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
LEFT_DC_PRED
@ LEFT_DC_PRED
Definition: vp9.h:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
B
#define B
Definition: huffyuv.h:42
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:75
CBP_VLC_SIZE
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
finish_frame
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1588
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
rv34_mb_max_sizes
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:106
av_fallthrough
#define av_fallthrough
Definition: attributes.h:67
decode_coeff
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, const VLCElem *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:245
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:43
TOP_DC_PRED8x8
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
RV34VLC::second_pattern
const VLCElem * second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:70
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
rv34_inter_coeff
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_thread_progress_await
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
Definition: threadprogress.c:64
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:419
RV34VLC::cbppattern
const VLCElem * cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:67
NULL
#define NULL
Definition: coverity.c:32
GET_PTS_DIFF
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:539
rv34_decode_slice
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1437
rv34_init_tables
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:157
RV34_MB_SKIP
@ RV34_MB_SKIP
Skipped block.
Definition: rv34.h:52
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mpv_decode_close
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:172
COEFF_VLC_SIZE
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
rv34_table_intra_cbppat
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
RV34VLC::third_pattern
const VLCElem * third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:71
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:44
SliceInfo::type
int type
slice type (intra, inter)
Definition: rv34.h:77
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:36
decode_subblock3
static void decode_subblock3(int16_t *dst, int flags, GetBitContext *gb, const VLCElem *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:290
V
#define V
Definition: avdct.c:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
mathops.h
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
MB_TYPE_BIDIR_MV
#define MB_TYPE_BIDIR_MV
Definition: mpegutils.h:51
qpeldsp.h
rv34_table_intra_secondpat
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:645
MAX_VLC_SIZE
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
rv34.h
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AVOnce
#define AVOnce
Definition: thread.h:202
rv34_decode_mv
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
Definition: rv34.c:884
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
RV34_MB_P_8x8
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:49
rv34_table_intra_thirdpat
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
rv34_gen_vlc_ext
static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int mod_three_bits_offset, int *offset)
Generate VLC from codeword lengths.
Definition: rv34.c:95
VLC::table_allocated
int table_allocated
Definition: vlc.h:53
rv34_mc_2mv_skip
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:857
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:231
decode_subblock1
static void decode_subblock1(int16_t *dst, int flags, GetBitContext *gb, const VLCElem *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:283
rv34_cbp_code
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
is_mv_diff_gt_3
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1184
AVPacket::size
int size
Definition: packet.h:596
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
RV34_MB_B_BACKWARD
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
Definition: rv34.h:51
ff_rv34_decode_frame
int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1625
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
rectangle.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:95
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
update_sar
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1615
FIRSTBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
RV34_MB_P_8x16
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:55
size
int size
Definition: twinvq_data.h:10344
VERT_RIGHT_PRED
@ VERT_RIGHT_PRED
Definition: vp9.h:51
VLCElem
Definition: vlc.h:32
rv34_decode_cbp
static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:212
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DC_128_PRED8x8
#define DC_128_PRED8x8
Definition: h264pred.h:76
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:61
rv34_inter_cbppat
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:353
SliceInfo::pts
int pts
frame timestamp
Definition: rv34.h:83
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
OTHERBLK_VLC_SIZE
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:70
ff_vlc_init_sparse
int ff_vlc_init_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Build VLC decoding tables suitable for use with get_vlc2().
Definition: vlc.c:250
PLANE_PRED8x8
#define PLANE_PRED8x8
Definition: h264pred.h:71
Y
#define Y
Definition: boxblur.h:37
rv34_output_i16x16
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1057
RV34_MB_TYPE_INTRA16x16
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:47
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
rv34_pred_mv_b
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:574
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1584
rv34_table_inter_thirdpat
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
SliceInfo::height
int height
coded height
Definition: rv34.h:82
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:412
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
AV_CODEC_ID_RV40
@ AV_CODEC_ID_RV40
Definition: codec_id.h:121
part_sizes_h
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:477
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:373
rv34_table_inter_firstpat
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
internal.h
HOR_UP_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
rv34_mc_2mv
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:838
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
rv34_table_intra_cbp
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
MB_TYPE_BACKWARD_MV
#define MB_TYPE_BACKWARD_MV
Definition: mpegutils.h:50
RV34_MB_TYPE_INTRA
@ RV34_MB_TYPE_INTRA
Intra macroblock.
Definition: rv34.h:46
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SUINT
#define SUINT
Definition: dct32_template.c:30
RV34_MB_TYPES
@ RV34_MB_TYPES
Definition: rv34.h:58
table_data
static VLCElem table_data[117592]
Definition: rv34.c:85
rv34_quant_to_vlc_set
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
SliceInfo
essential slice information
Definition: rv34.h:76
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
get_slice_offset
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1580
mod
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:755
LEFT_DC_PRED8x8
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
avcodec.h
VLC::bits
int bits
Definition: vlc.h:51
mid_pred
#define mid_pred
Definition: mathops.h:115
ret
ret
Definition: filter_design.txt:187
INVALID_VLC
#define INVALID_VLC
Definition: golomb.h:37
RV34VLC::coefficient
const VLCElem * coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:72
rv4_weight
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:816
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
rv34_inter_cbp
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
update_thread_context for mpegvideo-based decoders.
Definition: mpegvideo_dec.c:125
AVCodecContext
main external API structure.
Definition: avcodec.h:439
VLC_INIT_STATIC_OVERLONG
#define VLC_INIT_STATIC_OVERLONG
Definition: vlc.h:191
SliceInfo::start
int start
Definition: rv34.h:80
rv34_decode_inter_mb_header
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:411
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
HOR_UP_PRED
@ HOR_UP_PRED
Definition: vp9.h:54
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
rv34_intra_coeff
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
error_resilience.h
part_sizes_w
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:474
VLC
Definition: vlc.h:50
ittrans
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:986
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:828
rv34_chroma_quant
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:404
VLC::table
VLCElem * table
Definition: vlc.h:52
rv34_decode_intra_mb_header
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:376
HOR_DOWN_PRED
@ HOR_DOWN_PRED
Definition: vp9.h:52
rv34_mb_bits_sizes
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:111
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:82
rv34_process_block
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1042
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
RV34_MB_P_MIX16x16
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:57
rv34vlc.h
VLC::table_size
int table_size
Definition: vlc.h:53
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
mem.h
rv34_mc
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:689
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:37
MPVWorkPicture
Definition: mpegpicture.h:95
MB_TYPE_SEPARATE_DC
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:39
RV34_MB_P_16x8
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:54
TOP_DC_PRED
@ TOP_DC_PRED
Definition: vp9.h:57
AVPacket
This structure stores compressed data.
Definition: packet.h:572
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
inter_vlcs
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:76
mpeg_er.h
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
SliceInfo::width
int width
coded width
Definition: rv34.h:81
imgutils.h
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:46
RV34_MB_P_16x16
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
Definition: rv34.h:48
choose_vlc_set
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:364
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
RV34_MB_B_BIDIR
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:56
modulo_three_table
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
stride
#define stride
Definition: h264pred_template.c:536
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:228
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
MB_TYPE_FORWARD_MV
#define MB_TYPE_FORWARD_MV
Definition: mpegutils.h:49
src
#define src
Definition: vp8dsp.c:248
rv34_decoder_free
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1390
ff_mpv_common_frame_size_change
av_cold int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:181
shifts
static const uint8_t shifts[2][12]
Definition: camellia.c:178
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:64
NUM_INTER_TABLES
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
rv34_pred_b_vector
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:554