41 #define VP6_MAX_HUFF_SIZE 12
49 int parse_filter_info = 0;
55 int separated_coeff = buf[0] & 1;
61 sub_version = buf[1] >> 3;
64 s->filter_header = buf[1] & 0x06;
69 if (separated_coeff || !s->filter_header) {
70 coeff_offset =
AV_RB16(buf+2) - 2;
84 if (!s->macroblocks ||
85 16*cols != s->avctx->coded_width ||
86 16*rows != s->avctx->coded_height) {
88 if (s->avctx->extradata_size == 1) {
89 s->avctx->width -= s->avctx->extradata[0] >> 4;
90 s->avctx->height -= s->avctx->extradata[0] & 0x0F;
98 parse_filter_info = s->filter_header;
101 s->sub_version = sub_version;
104 if (!s->sub_version || !s->avctx->coded_width || !s->avctx->coded_height)
107 if (separated_coeff || !s->filter_header) {
108 coeff_offset =
AV_RB16(buf+1) - 2;
115 if (s->filter_header) {
117 if (s->deblock_filtering)
119 if (s->sub_version > 7)
124 if (parse_filter_info) {
127 s->sample_variance_threshold =
vp56_rac_gets(c, 5) << vrt_shift;
134 if (s->sub_version > 7)
137 s->filter_selection = 16;
145 buf_size -= coeff_offset;
151 if (s->use_huffman) {
169 s->modelp->coeff_index_to_pos[0] = 0;
171 for (pos=1; pos<64; pos++)
172 if (s->modelp->coeff_reorder[pos] == i)
173 s->modelp->coeff_index_to_pos[idx++] = pos;
200 for (comp=0; comp<2; comp++) {
207 for (comp=0; comp<2; comp++)
208 for (node=0; node<7; node++)
212 for (comp=0; comp<2; comp++)
213 for (node=0; node<8; node++)
221 const Node *
a = va, *
b = vb;
222 return (a->
count - b->count)*16 + (b->sym - a->
sym);
233 for (i=0; i<size-1; i++) {
234 a = tmp[i].
count * coeff_model[i] >> 8;
235 b = tmp[i].
count * (255 - coeff_model[i]) >> 8;
236 nodes[map[2*i ]].
count = a + !
a;
237 nodes[map[2*i+1]].
count = b + !
b;
251 int node, cg, ctx, pos;
255 memset(def_prob, 0x80,
sizeof(def_prob));
257 for (pt=0; pt<2; pt++)
258 for (node=0; node<11; node++)
267 for (pos=1; pos<64; pos++)
273 for (cg=0; cg<2; cg++)
274 for (node=0; node<14; node++)
278 for (ct=0; ct<3; ct++)
279 for (pt=0; pt<2; pt++)
280 for (cg=0; cg<6; cg++)
281 for (node=0; node<11; node++)
289 if (s->use_huffman) {
290 for (pt=0; pt<2; pt++) {
297 for (ct=0; ct<3; ct++)
298 for (cg = 0; cg < 6; cg++)
301 &s->ract_vlc[pt][ct][cg]))
304 memset(s->nb_null, 0,
sizeof(s->nb_null));
307 for (pt=0; pt<2; pt++)
308 for (ctx=0; ctx<3; ctx++)
309 for (node=0; node<5; node++)
322 if (s->vector_candidate_pos < 2)
323 *vect = s->vector_candidate[0];
325 for (comp=0; comp<2; comp++) {
329 static const uint8_t prob_order[] = {0, 1, 2, 7, 6, 5, 4};
330 for (i=0; i<
sizeof(prob_order); i++) {
331 int j = prob_order[i];
364 val = 6+val +
get_bits(&s->gb, 2+val);
374 int coeff, sign, coeff_idx;
378 for (b=0; b<6; b++) {
381 vlc_coeff = &s->dccv_vlc[
pt];
383 for (coeff_idx = 0;;) {
385 if (coeff_idx<2 && s->nb_null[coeff_idx][pt]) {
386 s->nb_null[coeff_idx][
pt]--;
395 int pt = (coeff_idx >= 6);
396 run +=
get_vlc2(&s->gb, s->runv_vlc[pt].table, 9, 3);
402 }
else if (coeff == 11) {
409 coeff2 +=
get_bits(&s->gb, coeff <= 9 ? coeff - 4 : 11);
410 ct = 1 + (coeff2 > 1);
412 coeff2 = (coeff2 ^ -sign) + sign;
414 coeff2 *= s->dequant_ac;
416 s->block_coeff[
b][permute[idx]] = coeff2;
423 vlc_coeff = &s->ract_vlc[
pt][ct][cg];
433 uint8_t *model1, *model2, *model3;
434 int coeff, sign, coeff_idx;
435 int b, i, cg, idx, ctx;
438 for (b=0; b<6; b++) {
445 + s->above_blocks[s->above_block_idx[
b]].not_null_dc;
471 coeff = (coeff ^ -sign) + sign;
473 coeff *= s->dequant_ac;
475 s->block_coeff[
b][permute[idx]] =
coeff;
487 for (run=9, i=0; i<6; i++)
499 s->above_blocks[s->above_block_idx[
b]].not_null_dc = !!s->block_coeff[
b][0];
505 int sum = 0, square_sum = 0;
508 for (y=0; y<8; y+=2) {
509 for (x=0; x<8; x+=2) {
511 square_sum += src[x]*src[x];
515 return (16*square_sum - sum*sum) >> 8;
519 int delta,
const int16_t *weights)
523 for (y=0; y<8; y++) {
524 for (x=0; x<8; x++) {
525 dst[x] = av_clip_uint8(( src[x-delta ] * weights[0]
526 + src[x ] * weights[1]
527 + src[x+delta ] * weights[2]
528 + src[x+2*delta] * weights[3] + 64) >> 7);
536 int stride,
int h_weight,
int v_weight)
538 uint8_t *tmp = s->edge_emu_buffer+16;
539 s->dsp.put_h264_chroma_pixels_tab[0](tmp, src,
stride, 9, h_weight, 0);
540 s->dsp.put_h264_chroma_pixels_tab[0](
dst, tmp,
stride, 8, 0, v_weight);
544 int offset1,
int offset2,
int stride,
548 int x8 = mv.
x &
mask;
549 int y8 = mv.
y &
mask;
554 filter4 = s->filter_mode;
556 if (s->max_vector_length &&
557 (
FFABS(mv.
x) > s->max_vector_length ||
558 FFABS(mv.
y) > s->max_vector_length)) {
560 }
else if (s->sample_variance_threshold
562 < s->sample_variance_threshold)) {
568 if ((y8 && (offset2-offset1)*s->flip<0) || (!y8 && offset1 > offset2)) {
580 s->vp56dsp.vp6_filter_diag4(dst, src+offset1+((mv.
x^mv.
y)>>31), stride,
586 s->dsp.put_h264_chroma_pixels_tab[0](
dst, src+offset1,
stride, 8, x8, y8);
606 s->alpha_context =
av_mallocz(
sizeof(VP56Context));
608 s->flip == -1, s->has_alpha);
610 for (i = 0; i < 6; ++i)
611 s->alpha_context->framep[i] = s->framep[i];
619 s->deblock_filtering = 0;
638 if (s->alpha_context) {
651 for (pt=0; pt<2; pt++) {
654 for (ct=0; ct<3; ct++)
655 for (cg=0; cg<6; cg++)
664 .priv_data_size =
sizeof(VP56Context),
677 .priv_data_size =
sizeof(VP56Context),
690 .priv_data_size =
sizeof(VP56Context),