FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 
38 #include "config_components.h"
39 
40 #include "avcodec.h"
41 #include "get_bits.h"
42 #include "bytestream.h"
43 #include "adpcm.h"
44 #include "adpcm_data.h"
45 #include "codec_internal.h"
46 #include "decode.h"
47 
48 /**
49  * @file
50  * ADPCM decoders
51  * Features and limitations:
52  *
53  * Reference documents:
54  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
55  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
56  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
57  * http://openquicktime.sourceforge.net/
58  * XAnim sources (xa_codec.c) http://xanim.polter.net/
59  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
60  * SoX source code http://sox.sourceforge.net/
61  *
62  * CD-ROM XA:
63  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
64  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
65  * readstr http://www.geocities.co.jp/Playtown/2004/
66  */
67 
68 #define CASE_0(codec_id, ...)
69 #define CASE_1(codec_id, ...) \
70  case codec_id: \
71  { __VA_ARGS__ } \
72  break;
73 #define CASE_2(enabled, codec_id, ...) \
74  CASE_ ## enabled(codec_id, __VA_ARGS__)
75 #define CASE_3(config, codec_id, ...) \
76  CASE_2(config, codec_id, __VA_ARGS__)
77 #define CASE(codec, ...) \
78  CASE_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
79 
80 /* These are for CD-ROM XA ADPCM */
81 static const int8_t xa_adpcm_table[5][2] = {
82  { 0, 0 },
83  { 60, 0 },
84  { 115, -52 },
85  { 98, -55 },
86  { 122, -60 }
87 };
88 
89 static const int16_t afc_coeffs[2][16] = {
90  { 0, 2048, 0, 1024, 4096, 3584, 3072, 4608, 4200, 4800, 5120, 2048, 1024, -1024, -1024, -2048 },
91  { 0, 0, 2048, 1024, -2048, -1536, -1024, -2560, -2248, -2300, -3072, -2048, -1024, 1024, 0, 0 }
92 };
93 
94 static const int16_t ea_adpcm_table[] = {
95  0, 240, 460, 392,
96  0, 0, -208, -220,
97  0, 1, 3, 4,
98  7, 8, 10, 11,
99  0, -1, -3, -4
100 };
101 
102 /*
103  * Dumped from the binaries:
104  * - FantasticJourney.exe - 0x794D2, DGROUP:0x47A4D2
105  * - BigRaceUSA.exe - 0x9B8AA, DGROUP:0x49C4AA
106  * - Timeshock!.exe - 0x8506A, DGROUP:0x485C6A
107  */
108 static const int8_t ima_cunning_index_table[9] = {
109  -1, -1, -1, -1, 1, 2, 3, 4, -1
110 };
111 
112 /*
113  * Dumped from the binaries:
114  * - FantasticJourney.exe - 0x79458, DGROUP:0x47A458
115  * - BigRaceUSA.exe - 0x9B830, DGROUP:0x49C430
116  * - Timeshock!.exe - 0x84FF0, DGROUP:0x485BF0
117  */
118 static const int16_t ima_cunning_step_table[61] = {
119  1, 1, 1, 1, 2, 2, 3, 3, 4, 5,
120  6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
121  32, 40, 48, 56, 64, 80, 96, 112, 128, 160,
122  192, 224, 256, 320, 384, 448, 512, 640, 768, 896,
123  1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120,
124  6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 0
125 };
126 
127 static const int8_t adpcm_index_table2[4] = {
128  -1, 2,
129  -1, 2,
130 };
131 
132 static const int8_t adpcm_index_table3[8] = {
133  -1, -1, 1, 2,
134  -1, -1, 1, 2,
135 };
136 
137 static const int8_t adpcm_index_table5[32] = {
138  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
139  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
140 };
141 
142 static const int8_t * const adpcm_index_tables[4] = {
143  &adpcm_index_table2[0],
144  &adpcm_index_table3[0],
146  &adpcm_index_table5[0],
147 };
148 
149 static const int16_t mtaf_stepsize[32][16] = {
150  { 1, 5, 9, 13, 16, 20, 24, 28,
151  -1, -5, -9, -13, -16, -20, -24, -28, },
152  { 2, 6, 11, 15, 20, 24, 29, 33,
153  -2, -6, -11, -15, -20, -24, -29, -33, },
154  { 2, 7, 13, 18, 23, 28, 34, 39,
155  -2, -7, -13, -18, -23, -28, -34, -39, },
156  { 3, 9, 15, 21, 28, 34, 40, 46,
157  -3, -9, -15, -21, -28, -34, -40, -46, },
158  { 3, 11, 18, 26, 33, 41, 48, 56,
159  -3, -11, -18, -26, -33, -41, -48, -56, },
160  { 4, 13, 22, 31, 40, 49, 58, 67,
161  -4, -13, -22, -31, -40, -49, -58, -67, },
162  { 5, 16, 26, 37, 48, 59, 69, 80,
163  -5, -16, -26, -37, -48, -59, -69, -80, },
164  { 6, 19, 31, 44, 57, 70, 82, 95,
165  -6, -19, -31, -44, -57, -70, -82, -95, },
166  { 7, 22, 38, 53, 68, 83, 99, 114,
167  -7, -22, -38, -53, -68, -83, -99, -114, },
168  { 9, 27, 45, 63, 81, 99, 117, 135,
169  -9, -27, -45, -63, -81, -99, -117, -135, },
170  { 10, 32, 53, 75, 96, 118, 139, 161,
171  -10, -32, -53, -75, -96, -118, -139, -161, },
172  { 12, 38, 64, 90, 115, 141, 167, 193,
173  -12, -38, -64, -90, -115, -141, -167, -193, },
174  { 15, 45, 76, 106, 137, 167, 198, 228,
175  -15, -45, -76, -106, -137, -167, -198, -228, },
176  { 18, 54, 91, 127, 164, 200, 237, 273,
177  -18, -54, -91, -127, -164, -200, -237, -273, },
178  { 21, 65, 108, 152, 195, 239, 282, 326,
179  -21, -65, -108, -152, -195, -239, -282, -326, },
180  { 25, 77, 129, 181, 232, 284, 336, 388,
181  -25, -77, -129, -181, -232, -284, -336, -388, },
182  { 30, 92, 153, 215, 276, 338, 399, 461,
183  -30, -92, -153, -215, -276, -338, -399, -461, },
184  { 36, 109, 183, 256, 329, 402, 476, 549,
185  -36, -109, -183, -256, -329, -402, -476, -549, },
186  { 43, 130, 218, 305, 392, 479, 567, 654,
187  -43, -130, -218, -305, -392, -479, -567, -654, },
188  { 52, 156, 260, 364, 468, 572, 676, 780,
189  -52, -156, -260, -364, -468, -572, -676, -780, },
190  { 62, 186, 310, 434, 558, 682, 806, 930,
191  -62, -186, -310, -434, -558, -682, -806, -930, },
192  { 73, 221, 368, 516, 663, 811, 958, 1106,
193  -73, -221, -368, -516, -663, -811, -958, -1106, },
194  { 87, 263, 439, 615, 790, 966, 1142, 1318,
195  -87, -263, -439, -615, -790, -966, -1142, -1318, },
196  { 104, 314, 523, 733, 942, 1152, 1361, 1571,
197  -104, -314, -523, -733, -942, -1152, -1361, -1571, },
198  { 124, 374, 623, 873, 1122, 1372, 1621, 1871,
199  -124, -374, -623, -873, -1122, -1372, -1621, -1871, },
200  { 148, 445, 743, 1040, 1337, 1634, 1932, 2229,
201  -148, -445, -743, -1040, -1337, -1634, -1932, -2229, },
202  { 177, 531, 885, 1239, 1593, 1947, 2301, 2655,
203  -177, -531, -885, -1239, -1593, -1947, -2301, -2655, },
204  { 210, 632, 1053, 1475, 1896, 2318, 2739, 3161,
205  -210, -632, -1053, -1475, -1896, -2318, -2739, -3161, },
206  { 251, 753, 1255, 1757, 2260, 2762, 3264, 3766,
207  -251, -753, -1255, -1757, -2260, -2762, -3264, -3766, },
208  { 299, 897, 1495, 2093, 2692, 3290, 3888, 4486,
209  -299, -897, -1495, -2093, -2692, -3290, -3888, -4486, },
210  { 356, 1068, 1781, 2493, 3206, 3918, 4631, 5343,
211  -356, -1068, -1781, -2493, -3206, -3918, -4631, -5343, },
212  { 424, 1273, 2121, 2970, 3819, 4668, 5516, 6365,
213  -424, -1273, -2121, -2970, -3819, -4668, -5516, -6365, },
214 };
215 
216 static const int16_t oki_step_table[49] = {
217  16, 17, 19, 21, 23, 25, 28, 31, 34, 37,
218  41, 45, 50, 55, 60, 66, 73, 80, 88, 97,
219  107, 118, 130, 143, 157, 173, 190, 209, 230, 253,
220  279, 307, 337, 371, 408, 449, 494, 544, 598, 658,
221  724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552
222 };
223 
224 // padded to zero where table size is less then 16
225 static const int8_t swf_index_tables[4][16] = {
226  /*2*/ { -1, 2 },
227  /*3*/ { -1, -1, 2, 4 },
228  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
229  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
230 };
231 
232 static const int8_t zork_index_table[8] = {
233  -1, -1, -1, 1, 4, 7, 10, 12,
234 };
235 
236 static const int8_t mtf_index_table[16] = {
237  8, 6, 4, 2, -1, -1, -1, -1,
238  -1, -1, -1, -1, 2, 4, 6, 8,
239 };
240 
241 /* end of tables */
242 
243 typedef struct ADPCMDecodeContext {
245  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
246  int has_status; /**< Status flag. Reset to 0 after a flush. */
248 
249 static void adpcm_flush(AVCodecContext *avctx);
250 
252 {
253  ADPCMDecodeContext *c = avctx->priv_data;
254  unsigned int min_channels = 1;
255  unsigned int max_channels = 2;
256 
257  adpcm_flush(avctx);
258 
259  switch(avctx->codec->id) {
261  max_channels = 1;
262  break;
269  max_channels = 6;
270  break;
272  min_channels = 2;
273  max_channels = 8;
274  if (avctx->ch_layout.nb_channels & 1) {
275  avpriv_request_sample(avctx, "channel count %d", avctx->ch_layout.nb_channels);
276  return AVERROR_PATCHWELCOME;
277  }
278  break;
280  min_channels = 2;
281  break;
283  max_channels = 8;
284  if (avctx->ch_layout.nb_channels <= 0 ||
285  avctx->block_align % (16 * avctx->ch_layout.nb_channels))
286  return AVERROR_INVALIDDATA;
287  break;
291  max_channels = 14;
292  break;
293  }
294  if (avctx->ch_layout.nb_channels < min_channels ||
295  avctx->ch_layout.nb_channels > max_channels) {
296  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
297  return AVERROR(EINVAL);
298  }
299 
300  switch(avctx->codec->id) {
302  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
303  return AVERROR_INVALIDDATA;
304  break;
306  if (avctx->bits_per_coded_sample != 4 ||
307  avctx->block_align != 17 * avctx->ch_layout.nb_channels)
308  return AVERROR_INVALIDDATA;
309  break;
311  if (avctx->bits_per_coded_sample != 4)
312  return AVERROR_INVALIDDATA;
313  break;
315  if (avctx->bits_per_coded_sample != 8)
316  return AVERROR_INVALIDDATA;
317  break;
318  default:
319  break;
320  }
321 
322  switch (avctx->codec->id) {
345  break;
347  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
349  break;
351  avctx->sample_fmt = avctx->ch_layout.nb_channels > 2 ? AV_SAMPLE_FMT_S16P :
353  break;
354  default:
355  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
356  }
357  return 0;
358 }
359 
360 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
361 {
362  int delta, pred, step, add;
363 
364  pred = c->predictor;
365  delta = nibble & 7;
366  step = c->step;
367  add = (delta * 2 + 1) * step;
368  if (add < 0)
369  add = add + 7;
370 
371  if ((nibble & 8) == 0)
372  pred = av_clip(pred + (add >> 3), -32767, 32767);
373  else
374  pred = av_clip(pred - (add >> 3), -32767, 32767);
375 
376  switch (delta) {
377  case 7:
378  step *= 0x99;
379  break;
380  case 6:
381  c->step = av_clip(c->step * 2, 127, 24576);
382  c->predictor = pred;
383  return pred;
384  case 5:
385  step *= 0x66;
386  break;
387  case 4:
388  step *= 0x4d;
389  break;
390  default:
391  step *= 0x39;
392  break;
393  }
394 
395  if (step < 0)
396  step += 0x3f;
397 
398  c->step = step >> 6;
399  c->step = av_clip(c->step, 127, 24576);
400  c->predictor = pred;
401  return pred;
402 }
403 
404 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
405 {
406  int step_index;
407  int predictor;
408  int sign, delta, diff, step;
409 
410  step = ff_adpcm_step_table[c->step_index];
411  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
412  step_index = av_clip(step_index, 0, 88);
413 
414  sign = nibble & 8;
415  delta = nibble & 7;
416  /* perform direct multiplication instead of series of jumps proposed by
417  * the reference ADPCM implementation since modern CPUs can do the mults
418  * quickly enough */
419  diff = ((2 * delta + 1) * step) >> shift;
420  predictor = c->predictor;
421  if (sign) predictor -= diff;
422  else predictor += diff;
423 
424  c->predictor = av_clip_int16(predictor);
425  c->step_index = step_index;
426 
427  return (int16_t)c->predictor;
428 }
429 
430 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
431 {
432  int step_index;
433  int predictor;
434  int sign, delta, diff, step;
435 
436  step = ff_adpcm_step_table[c->step_index];
437  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
438  step_index = av_clip(step_index, 0, 88);
439 
440  sign = nibble & 8;
441  delta = nibble & 7;
442  diff = (delta * step) >> shift;
443  predictor = c->predictor;
444  if (sign) predictor -= diff;
445  else predictor += diff;
446 
447  c->predictor = av_clip_int16(predictor);
448  c->step_index = step_index;
449 
450  return (int16_t)c->predictor;
451 }
452 
453 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
454 {
455  int step_index, step, delta, predictor;
456 
457  step = ff_adpcm_step_table[c->step_index];
458 
459  delta = step * (2 * nibble - 15);
460  predictor = c->predictor + delta;
461 
462  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
463  c->predictor = av_clip_int16(predictor >> 4);
464  c->step_index = av_clip(step_index, 0, 88);
465 
466  return (int16_t)c->predictor;
467 }
468 
469 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
470 {
471  int step_index;
472  int predictor;
473  int step;
474 
475  nibble = sign_extend(nibble & 0xF, 4);
476 
477  step = ima_cunning_step_table[c->step_index];
478  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
479  step_index = av_clip(step_index, 0, 60);
480 
481  predictor = c->predictor + step * nibble;
482 
483  c->predictor = av_clip_int16(predictor);
484  c->step_index = step_index;
485 
486  return c->predictor;
487 }
488 
490 {
491  int nibble, step_index, predictor, sign, delta, diff, step, shift;
492 
493  shift = bps - 1;
494  nibble = get_bits_le(gb, bps),
495  step = ff_adpcm_step_table[c->step_index];
496  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
497  step_index = av_clip(step_index, 0, 88);
498 
499  sign = nibble & (1 << shift);
500  delta = av_zero_extend(nibble, shift);
501  diff = ((2 * delta + 1) * step) >> shift;
502  predictor = c->predictor;
503  if (sign) predictor -= diff;
504  else predictor += diff;
505 
506  c->predictor = av_clip_int16(predictor);
507  c->step_index = step_index;
508 
509  return (int16_t)c->predictor;
510 }
511 
512 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
513 {
514  int step_index;
515  int predictor;
516  int diff, step;
517 
518  step = ff_adpcm_step_table[c->step_index];
519  step_index = c->step_index + ff_adpcm_index_table[nibble];
520  step_index = av_clip(step_index, 0, 88);
521 
522  diff = step >> 3;
523  if (nibble & 4) diff += step;
524  if (nibble & 2) diff += step >> 1;
525  if (nibble & 1) diff += step >> 2;
526 
527  if (nibble & 8)
528  predictor = c->predictor - diff;
529  else
530  predictor = c->predictor + diff;
531 
532  c->predictor = av_clip_int16(predictor);
533  c->step_index = step_index;
534 
535  return c->predictor;
536 }
537 
538 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
539 {
540  int predictor;
541 
542  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
543  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
544 
545  c->sample2 = c->sample1;
546  c->sample1 = av_clip_int16(predictor);
547  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
548  if (c->idelta < 16) c->idelta = 16;
549  if (c->idelta > INT_MAX/768) {
550  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
551  c->idelta = INT_MAX/768;
552  }
553 
554  return c->sample1;
555 }
556 
557 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
558 {
559  int step_index, predictor, sign, delta, diff, step;
560 
561  step = oki_step_table[c->step_index];
562  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
563  step_index = av_clip(step_index, 0, 48);
564 
565  sign = nibble & 8;
566  delta = nibble & 7;
567  diff = ((2 * delta + 1) * step) >> 3;
568  predictor = c->predictor;
569  if (sign) predictor -= diff;
570  else predictor += diff;
571 
572  c->predictor = av_clip_intp2(predictor, 11);
573  c->step_index = step_index;
574 
575  return c->predictor * 16;
576 }
577 
578 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
579 {
580  int sign, delta, diff;
581  int new_step;
582 
583  sign = nibble & 8;
584  delta = nibble & 7;
585  /* perform direct multiplication instead of series of jumps proposed by
586  * the reference ADPCM implementation since modern CPUs can do the mults
587  * quickly enough */
588  diff = ((2 * delta + 1) * c->step) >> 3;
589  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
590  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
591  c->predictor = av_clip_int16(c->predictor);
592  /* calculate new step and clamp it to range 511..32767 */
593  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
594  c->step = av_clip(new_step, 511, 32767);
595 
596  return (int16_t)c->predictor;
597 }
598 
599 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
600 {
601  int sign, delta, diff;
602 
603  sign = nibble & (1<<(size-1));
604  delta = nibble & ((1<<(size-1))-1);
605  diff = delta << (7 + c->step + shift);
606 
607  /* clamp result */
608  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
609 
610  /* calculate new step */
611  if (delta >= (2*size - 3) && c->step < 3)
612  c->step++;
613  else if (delta == 0 && c->step > 0)
614  c->step--;
615 
616  return (int16_t) c->predictor;
617 }
618 
619 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
620 {
621  if(!c->step) {
622  c->predictor = 0;
623  c->step = 127;
624  }
625 
626  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
627  c->predictor = av_clip_int16(c->predictor);
628  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
629  c->step = av_clip(c->step, 127, 24576);
630  return c->predictor;
631 }
632 
633 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
634 {
635  c->predictor += mtaf_stepsize[c->step][nibble];
636  c->predictor = av_clip_int16(c->predictor);
637  c->step += ff_adpcm_index_table[nibble];
638  c->step = av_clip_uintp2(c->step, 5);
639  return c->predictor;
640 }
641 
642 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
643 {
644  int16_t index = c->step_index;
645  uint32_t lookup_sample = ff_adpcm_step_table[index];
646  int32_t sample = 0;
647 
648  if (nibble & 0x40)
649  sample += lookup_sample;
650  if (nibble & 0x20)
651  sample += lookup_sample >> 1;
652  if (nibble & 0x10)
653  sample += lookup_sample >> 2;
654  if (nibble & 0x08)
655  sample += lookup_sample >> 3;
656  if (nibble & 0x04)
657  sample += lookup_sample >> 4;
658  if (nibble & 0x02)
659  sample += lookup_sample >> 5;
660  if (nibble & 0x01)
661  sample += lookup_sample >> 6;
662  if (nibble & 0x80)
663  sample = -sample;
664 
665  sample += c->predictor;
667 
668  index += zork_index_table[(nibble >> 4) & 7];
669  index = av_clip(index, 0, 88);
670 
671  c->predictor = sample;
672  c->step_index = index;
673 
674  return sample;
675 }
676 
677 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
678  const uint8_t *in, ADPCMChannelStatus *left,
679  ADPCMChannelStatus *right, int channels, int sample_offset)
680 {
681  int i, j;
682  int shift,filter,f0,f1;
683  int s_1,s_2;
684  int d,s,t;
685 
686  out0 += sample_offset;
687  if (channels == 1)
688  out1 = out0 + 28;
689  else
690  out1 += sample_offset;
691 
692  for(i=0;i<4;i++) {
693  shift = 12 - (in[4+i*2] & 15);
694  filter = in[4+i*2] >> 4;
696  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
697  filter=0;
698  }
699  if (shift < 0) {
700  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
701  shift = 0;
702  }
703  f0 = xa_adpcm_table[filter][0];
704  f1 = xa_adpcm_table[filter][1];
705 
706  s_1 = left->sample1;
707  s_2 = left->sample2;
708 
709  for(j=0;j<28;j++) {
710  d = in[16+i+j*4];
711 
712  t = sign_extend(d, 4);
713  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
714  s_2 = s_1;
715  s_1 = av_clip_int16(s);
716  out0[j] = s_1;
717  }
718 
719  if (channels == 2) {
720  left->sample1 = s_1;
721  left->sample2 = s_2;
722  s_1 = right->sample1;
723  s_2 = right->sample2;
724  }
725 
726  shift = 12 - (in[5+i*2] & 15);
727  filter = in[5+i*2] >> 4;
728  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
729  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
730  filter=0;
731  }
732  if (shift < 0) {
733  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
734  shift = 0;
735  }
736 
737  f0 = xa_adpcm_table[filter][0];
738  f1 = xa_adpcm_table[filter][1];
739 
740  for(j=0;j<28;j++) {
741  d = in[16+i+j*4];
742 
743  t = sign_extend(d >> 4, 4);
744  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
745  s_2 = s_1;
746  s_1 = av_clip_int16(s);
747  out1[j] = s_1;
748  }
749 
750  if (channels == 2) {
751  right->sample1 = s_1;
752  right->sample2 = s_2;
753  } else {
754  left->sample1 = s_1;
755  left->sample2 = s_2;
756  }
757 
758  out0 += 28 * (3 - channels);
759  out1 += 28 * (3 - channels);
760  }
761 
762  return 0;
763 }
764 
765 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
766 {
767  ADPCMDecodeContext *c = avctx->priv_data;
768  GetBitContext gb;
769  const int8_t *table;
770  int channels = avctx->ch_layout.nb_channels;
771  int k0, signmask, nb_bits, count;
772  int size = buf_size*8;
773  int i;
774 
775  init_get_bits(&gb, buf, size);
776 
777  //read bits & initial values
778  nb_bits = get_bits(&gb, 2)+2;
779  table = swf_index_tables[nb_bits-2];
780  k0 = 1 << (nb_bits-2);
781  signmask = 1 << (nb_bits-1);
782 
783  while (get_bits_count(&gb) <= size - 22 * channels) {
784  for (i = 0; i < channels; i++) {
785  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
786  c->status[i].step_index = get_bits(&gb, 6);
787  }
788 
789  for (count = 0; get_bits_count(&gb) <= size - nb_bits * channels && count < 4095; count++) {
790  int i;
791 
792  for (i = 0; i < channels; i++) {
793  // similar to IMA adpcm
794  int delta = get_bits(&gb, nb_bits);
795  int step = ff_adpcm_step_table[c->status[i].step_index];
796  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
797  int k = k0;
798 
799  do {
800  if (delta & k)
801  vpdiff += step;
802  step >>= 1;
803  k >>= 1;
804  } while(k);
805  vpdiff += step;
806 
807  if (delta & signmask)
808  c->status[i].predictor -= vpdiff;
809  else
810  c->status[i].predictor += vpdiff;
811 
812  c->status[i].step_index += table[delta & (~signmask)];
813 
814  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
815  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
816 
817  *samples++ = c->status[i].predictor;
818  }
819  }
820  }
821 }
822 
823 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
824 {
825  int sample = sign_extend(nibble, 4) * (1 << shift);
826 
827  if (flag)
828  sample += (8 * cs->sample1) - (4 * cs->sample2);
829  else
830  sample += 4 * cs->sample1;
831 
832  sample = av_clip_int16(sample >> 2);
833 
834  cs->sample2 = cs->sample1;
835  cs->sample1 = sample;
836 
837  return sample;
838 }
839 
840 /**
841  * Get the number of samples (per channel) that will be decoded from the packet.
842  * In one case, this is actually the maximum number of samples possible to
843  * decode with the given buf_size.
844  *
845  * @param[out] coded_samples set to the number of samples as coded in the
846  * packet, or 0 if the codec does not encode the
847  * number of samples in each frame.
848  * @param[out] approx_nb_samples set to non-zero if the number of samples
849  * returned is an approximation.
850  */
852  int buf_size, int *coded_samples, int *approx_nb_samples)
853 {
854  ADPCMDecodeContext *s = avctx->priv_data;
855  int nb_samples = 0;
856  int ch = avctx->ch_layout.nb_channels;
857  int has_coded_samples = 0;
858  int header_size;
859 
860  *coded_samples = 0;
861  *approx_nb_samples = 0;
862 
863  if(ch <= 0)
864  return 0;
865 
866  switch (avctx->codec->id) {
867  /* constant, only check buf_size */
869  if (buf_size < 76 * ch)
870  return 0;
871  nb_samples = 128;
872  break;
874  if (buf_size < 34 * ch)
875  return 0;
876  nb_samples = 64;
877  break;
878  /* simple 4-bit adpcm */
891  nb_samples = buf_size * 2 / ch;
892  break;
893  }
894  if (nb_samples)
895  return nb_samples;
896 
897  /* simple 4-bit adpcm, with header */
898  header_size = 0;
899  switch (avctx->codec->id) {
905  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
906  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
907  }
908  if (header_size > 0)
909  return (buf_size - header_size) * 2 / ch;
910 
911  /* more complex formats */
912  switch (avctx->codec->id) {
914  bytestream2_skip(gb, 4);
915  has_coded_samples = 1;
916  *coded_samples = bytestream2_get_le32u(gb);
917  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
918  bytestream2_seek(gb, -8, SEEK_CUR);
919  break;
921  /* Stereo is 30 bytes per block */
922  /* Mono is 15 bytes per block */
923  has_coded_samples = 1;
924  *coded_samples = bytestream2_get_le32(gb);
925  *coded_samples -= *coded_samples % 28;
926  nb_samples = (buf_size - 12) / (ch == 2 ? 30 : 15) * 28;
927  break;
929  has_coded_samples = 1;
930  *coded_samples = bytestream2_get_le32(gb);
931  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
932  break;
934  nb_samples = (buf_size - ch) / ch * 2;
935  break;
939  /* maximum number of samples */
940  /* has internal offsets and a per-frame switch to signal raw 16-bit */
941  has_coded_samples = 1;
942  switch (avctx->codec->id) {
944  header_size = 4 + 9 * ch;
945  *coded_samples = bytestream2_get_le32(gb);
946  break;
948  header_size = 4 + 5 * ch;
949  *coded_samples = bytestream2_get_le32(gb);
950  break;
952  header_size = 4 + 5 * ch;
953  *coded_samples = bytestream2_get_be32(gb);
954  break;
955  }
956  *coded_samples -= *coded_samples % 28;
957  nb_samples = (buf_size - header_size) * 2 / ch;
958  nb_samples -= nb_samples % 28;
959  *approx_nb_samples = 1;
960  break;
962  if (avctx->block_align > 0)
963  buf_size = FFMIN(buf_size, avctx->block_align);
964  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
965  break;
967  if (avctx->block_align > 0)
968  buf_size = FFMIN(buf_size, avctx->block_align);
969  if (buf_size < 4 * ch)
970  return AVERROR_INVALIDDATA;
971  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
972  break;
974  if (avctx->block_align > 0)
975  buf_size = FFMIN(buf_size, avctx->block_align);
976  nb_samples = (buf_size - 4 * ch) * 2 / ch;
977  break;
978  CASE(ADPCM_IMA_WAV,
979  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
980  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
981  if (avctx->block_align > 0)
982  buf_size = FFMIN(buf_size, avctx->block_align);
983  if (buf_size < 4 * ch)
984  return AVERROR_INVALIDDATA;
985  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
986  ) /* End of CASE */
987  CASE(ADPCM_IMA_XBOX,
988  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
989  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
990  if (avctx->block_align > 0)
991  buf_size = FFMIN(buf_size, avctx->block_align);
992  if (buf_size < 4 * ch)
993  return AVERROR_INVALIDDATA;
994  nb_samples = (buf_size - 4 * ch) / (bsize * ch) * bsamples + 1;
995  ) /* End of CASE */
997  if (avctx->block_align > 0)
998  buf_size = FFMIN(buf_size, avctx->block_align);
999  nb_samples = (buf_size - 6 * ch) * 2 / ch;
1000  break;
1002  if (avctx->block_align > 0)
1003  buf_size = FFMIN(buf_size, avctx->block_align);
1004  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
1005  break;
1009  {
1010  int samples_per_byte;
1011  switch (avctx->codec->id) {
1012  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
1013  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
1014  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
1015  }
1016  if (!s->status[0].step_index) {
1017  if (buf_size < ch)
1018  return AVERROR_INVALIDDATA;
1019  nb_samples++;
1020  buf_size -= ch;
1021  }
1022  nb_samples += buf_size * samples_per_byte / ch;
1023  break;
1024  }
1025  case AV_CODEC_ID_ADPCM_SWF:
1026  {
1027  int buf_bits = buf_size * 8 - 2;
1028  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
1029  int block_hdr_size = 22 * ch;
1030  int block_size = block_hdr_size + nbits * ch * 4095;
1031  int nblocks = buf_bits / block_size;
1032  int bits_left = buf_bits - nblocks * block_size;
1033  nb_samples = nblocks * 4096;
1034  if (bits_left >= block_hdr_size)
1035  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
1036  break;
1037  }
1038  case AV_CODEC_ID_ADPCM_THP:
1040  if (avctx->extradata) {
1041  nb_samples = buf_size * 14 / (8 * ch);
1042  break;
1043  }
1044  has_coded_samples = 1;
1045  bytestream2_skip(gb, 4); // channel size
1046  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
1047  bytestream2_get_le32(gb) :
1048  bytestream2_get_be32(gb);
1049  buf_size -= 8 + 36 * ch;
1050  buf_size /= ch;
1051  nb_samples = buf_size / 8 * 14;
1052  if (buf_size % 8 > 1)
1053  nb_samples += (buf_size % 8 - 1) * 2;
1054  *approx_nb_samples = 1;
1055  break;
1056  case AV_CODEC_ID_ADPCM_AFC:
1057  nb_samples = buf_size / (9 * ch) * 16;
1058  break;
1059  case AV_CODEC_ID_ADPCM_XA:
1060  nb_samples = (buf_size / 128) * 224 / ch;
1061  break;
1062  case AV_CODEC_ID_ADPCM_XMD:
1063  nb_samples = buf_size / (21 * ch) * 32;
1064  break;
1065  case AV_CODEC_ID_ADPCM_DTK:
1066  case AV_CODEC_ID_ADPCM_PSX:
1067  nb_samples = buf_size / (16 * ch) * 28;
1068  break;
1070  nb_samples = buf_size / avctx->block_align * 32;
1071  break;
1073  nb_samples = buf_size / ch;
1074  break;
1075  }
1076 
1077  /* validate coded sample count */
1078  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
1079  return AVERROR_INVALIDDATA;
1080 
1081  return nb_samples;
1082 }
1083 
1085  int *got_frame_ptr, AVPacket *avpkt)
1086 {
1087  const uint8_t *buf = avpkt->data;
1088  int buf_size = avpkt->size;
1089  ADPCMDecodeContext *c = avctx->priv_data;
1090  int channels = avctx->ch_layout.nb_channels;
1091  int16_t *samples;
1092  int16_t **samples_p;
1093  int st; /* stereo */
1094  int nb_samples, coded_samples, approx_nb_samples, ret;
1095  GetByteContext gb;
1096 
1097  bytestream2_init(&gb, buf, buf_size);
1098  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
1099  if (nb_samples <= 0) {
1100  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
1101  return AVERROR_INVALIDDATA;
1102  }
1103 
1104  /* get output buffer */
1105  frame->nb_samples = nb_samples;
1106  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1107  return ret;
1108  samples = (int16_t *)frame->data[0];
1109  samples_p = (int16_t **)frame->extended_data;
1110 
1111  /* use coded_samples when applicable */
1112  /* it is always <= nb_samples, so the output buffer will be large enough */
1113  if (coded_samples) {
1114  if (!approx_nb_samples && coded_samples != nb_samples)
1115  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
1116  frame->nb_samples = nb_samples = coded_samples;
1117  }
1118 
1119  st = channels == 2 ? 1 : 0;
1120 
1121  switch(avctx->codec->id) {
1122  CASE(ADPCM_IMA_QT,
1123  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
1124  Channel data is interleaved per-chunk. */
1125  for (int channel = 0; channel < channels; channel++) {
1126  ADPCMChannelStatus *cs = &c->status[channel];
1127  int predictor;
1128  int step_index;
1129  /* (pppppp) (piiiiiii) */
1130 
1131  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
1132  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1133  step_index = predictor & 0x7F;
1134  predictor &= ~0x7F;
1135 
1136  if (cs->step_index == step_index) {
1137  int diff = predictor - cs->predictor;
1138  if (diff < 0)
1139  diff = - diff;
1140  if (diff > 0x7f)
1141  goto update;
1142  } else {
1143  update:
1144  cs->step_index = step_index;
1145  cs->predictor = predictor;
1146  }
1147 
1148  if (cs->step_index > 88u){
1149  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1150  channel, cs->step_index);
1151  return AVERROR_INVALIDDATA;
1152  }
1153 
1154  samples = samples_p[channel];
1155 
1156  for (int m = 0; m < 64; m += 2) {
1157  int byte = bytestream2_get_byteu(&gb);
1158  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1159  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1160  }
1161  }
1162  ) /* End of CASE */
1163  CASE(ADPCM_IMA_WAV,
1164  for (int i = 0; i < channels; i++) {
1165  ADPCMChannelStatus *cs = &c->status[i];
1166  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1167 
1168  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1169  if (cs->step_index > 88u){
1170  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1171  i, cs->step_index);
1172  return AVERROR_INVALIDDATA;
1173  }
1174  }
1175 
1176  if (avctx->bits_per_coded_sample != 4) {
1177  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1178  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1179  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1180  GetBitContext g;
1181 
1182  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1183  for (int i = 0; i < channels; i++) {
1184  ADPCMChannelStatus *cs = &c->status[i];
1185  samples = &samples_p[i][1 + n * samples_per_block];
1186  for (int j = 0; j < block_size; j++) {
1187  temp[j] = buf[4 * channels + block_size * n * channels +
1188  (j % 4) + (j / 4) * (channels * 4) + i * 4];
1189  }
1190  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1191  if (ret < 0)
1192  return ret;
1193  for (int m = 0; m < samples_per_block; m++) {
1195  avctx->bits_per_coded_sample);
1196  }
1197  }
1198  }
1199  bytestream2_skip(&gb, avctx->block_align - channels * 4);
1200  } else {
1201  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
1202  for (int i = 0; i < channels; i++) {
1203  ADPCMChannelStatus *cs = &c->status[i];
1204  samples = &samples_p[i][1 + n * 8];
1205  for (int m = 0; m < 8; m += 2) {
1206  int v = bytestream2_get_byteu(&gb);
1207  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1208  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1209  }
1210  }
1211  }
1212  }
1213  ) /* End of CASE */
1214  CASE(ADPCM_IMA_XBOX,
1215  for (int i = 0; i < channels; i++) {
1216  ADPCMChannelStatus *cs = &c->status[i];
1217  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1218 
1219  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1220  if (cs->step_index > 88u) {
1221  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1222  i, cs->step_index);
1223  return AVERROR_INVALIDDATA;
1224  }
1225  }
1226 
1227  for (int n = 0; n < (nb_samples-1) / 8; n++) {
1228  for (int i = 0; i < channels; i++) {
1229  ADPCMChannelStatus *cs = &c->status[i];
1230  samples = &samples_p[i][1 + n * 8];
1231  for (int m = 0; m < 8; m += 2) {
1232  int v = bytestream2_get_byteu(&gb);
1233  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1234  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1235  }
1236  }
1237  }
1238  frame->nb_samples--;
1239  ) /* End of CASE */
1240  CASE(ADPCM_4XM,
1241  for (int i = 0; i < channels; i++)
1242  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1243 
1244  for (int i = 0; i < channels; i++) {
1245  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1246  if (c->status[i].step_index > 88u) {
1247  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1248  i, c->status[i].step_index);
1249  return AVERROR_INVALIDDATA;
1250  }
1251  }
1252 
1253  for (int i = 0; i < channels; i++) {
1254  ADPCMChannelStatus *cs = &c->status[i];
1255  samples = (int16_t *)frame->data[i];
1256  for (int n = nb_samples >> 1; n > 0; n--) {
1257  int v = bytestream2_get_byteu(&gb);
1258  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1259  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1260  }
1261  }
1262  ) /* End of CASE */
1263  CASE(ADPCM_AGM,
1264  for (int i = 0; i < channels; i++)
1265  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1266  for (int i = 0; i < channels; i++)
1267  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1268 
1269  for (int n = 0; n < nb_samples >> (1 - st); n++) {
1270  int v = bytestream2_get_byteu(&gb);
1271  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1272  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1273  }
1274  ) /* End of CASE */
1275  CASE(ADPCM_MS,
1276  int block_predictor;
1277 
1278  if (avctx->ch_layout.nb_channels > 2) {
1279  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
1280  samples = samples_p[channel];
1281  block_predictor = bytestream2_get_byteu(&gb);
1282  if (block_predictor > 6) {
1283  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1284  channel, block_predictor);
1285  return AVERROR_INVALIDDATA;
1286  }
1287  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1288  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1289  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1290  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1291  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1292  *samples++ = c->status[channel].sample2;
1293  *samples++ = c->status[channel].sample1;
1294  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
1295  int byte = bytestream2_get_byteu(&gb);
1296  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1297  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1298  }
1299  }
1300  } else {
1301  block_predictor = bytestream2_get_byteu(&gb);
1302  if (block_predictor > 6) {
1303  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1304  block_predictor);
1305  return AVERROR_INVALIDDATA;
1306  }
1307  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1308  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1309  if (st) {
1310  block_predictor = bytestream2_get_byteu(&gb);
1311  if (block_predictor > 6) {
1312  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1313  block_predictor);
1314  return AVERROR_INVALIDDATA;
1315  }
1316  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1317  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1318  }
1319  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1320  if (st){
1321  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1322  }
1323 
1324  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1325  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1326  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1327  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1328 
1329  *samples++ = c->status[0].sample2;
1330  if (st) *samples++ = c->status[1].sample2;
1331  *samples++ = c->status[0].sample1;
1332  if (st) *samples++ = c->status[1].sample1;
1333  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1334  int byte = bytestream2_get_byteu(&gb);
1335  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1336  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1337  }
1338  }
1339  ) /* End of CASE */
1340  CASE(ADPCM_MTAF,
1341  for (int channel = 0; channel < channels; channel += 2) {
1342  bytestream2_skipu(&gb, 4);
1343  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1344  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1345  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1346  bytestream2_skipu(&gb, 2);
1347  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1348  bytestream2_skipu(&gb, 2);
1349  for (int n = 0; n < nb_samples; n += 2) {
1350  int v = bytestream2_get_byteu(&gb);
1351  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1352  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1353  }
1354  for (int n = 0; n < nb_samples; n += 2) {
1355  int v = bytestream2_get_byteu(&gb);
1356  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1357  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1358  }
1359  }
1360  ) /* End of CASE */
1361  CASE(ADPCM_IMA_DK4,
1362  for (int channel = 0; channel < channels; channel++) {
1363  ADPCMChannelStatus *cs = &c->status[channel];
1364  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1365  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1366  if (cs->step_index > 88u){
1367  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1368  channel, cs->step_index);
1369  return AVERROR_INVALIDDATA;
1370  }
1371  }
1372  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1373  int v = bytestream2_get_byteu(&gb);
1374  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1375  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1376  }
1377  ) /* End of CASE */
1378 
1379  /* DK3 ADPCM support macro */
1380 #define DK3_GET_NEXT_NIBBLE() \
1381  if (decode_top_nibble_next) { \
1382  nibble = last_byte >> 4; \
1383  decode_top_nibble_next = 0; \
1384  } else { \
1385  last_byte = bytestream2_get_byteu(&gb); \
1386  nibble = last_byte & 0x0F; \
1387  decode_top_nibble_next = 1; \
1388  }
1389  CASE(ADPCM_IMA_DK3,
1390  int last_byte = 0;
1391  int nibble;
1392  int decode_top_nibble_next = 0;
1393  int diff_channel;
1394  const int16_t *samples_end = samples + channels * nb_samples;
1395 
1396  bytestream2_skipu(&gb, 10);
1397  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1398  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1399  c->status[0].step_index = bytestream2_get_byteu(&gb);
1400  c->status[1].step_index = bytestream2_get_byteu(&gb);
1401  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1402  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1403  c->status[0].step_index, c->status[1].step_index);
1404  return AVERROR_INVALIDDATA;
1405  }
1406  /* sign extend the predictors */
1407  diff_channel = c->status[1].predictor;
1408 
1409  while (samples < samples_end) {
1410 
1411  /* for this algorithm, c->status[0] is the sum channel and
1412  * c->status[1] is the diff channel */
1413 
1414  /* process the first predictor of the sum channel */
1416  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1417 
1418  /* process the diff channel predictor */
1420  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1421 
1422  /* process the first pair of stereo PCM samples */
1423  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1424  *samples++ = c->status[0].predictor + c->status[1].predictor;
1425  *samples++ = c->status[0].predictor - c->status[1].predictor;
1426 
1427  /* process the second predictor of the sum channel */
1429  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1430 
1431  /* process the second pair of stereo PCM samples */
1432  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1433  *samples++ = c->status[0].predictor + c->status[1].predictor;
1434  *samples++ = c->status[0].predictor - c->status[1].predictor;
1435  }
1436 
1437  if ((bytestream2_tell(&gb) & 1))
1438  bytestream2_skip(&gb, 1);
1439  ) /* End of CASE */
1440  CASE(ADPCM_IMA_ISS,
1441  for (int channel = 0; channel < channels; channel++) {
1442  ADPCMChannelStatus *cs = &c->status[channel];
1443  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1444  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1445  if (cs->step_index > 88u){
1446  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1447  channel, cs->step_index);
1448  return AVERROR_INVALIDDATA;
1449  }
1450  }
1451 
1452  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1453  int v1, v2;
1454  int v = bytestream2_get_byteu(&gb);
1455  /* nibbles are swapped for mono */
1456  if (st) {
1457  v1 = v >> 4;
1458  v2 = v & 0x0F;
1459  } else {
1460  v2 = v >> 4;
1461  v1 = v & 0x0F;
1462  }
1463  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1464  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1465  }
1466  ) /* End of CASE */
1467  CASE(ADPCM_IMA_MOFLEX,
1468  for (int channel = 0; channel < channels; channel++) {
1469  ADPCMChannelStatus *cs = &c->status[channel];
1470  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1471  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1472  if (cs->step_index > 88u){
1473  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1474  channel, cs->step_index);
1475  return AVERROR_INVALIDDATA;
1476  }
1477  }
1478 
1479  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1480  for (int channel = 0; channel < channels; channel++) {
1481  samples = samples_p[channel] + 256 * subframe;
1482  for (int n = 0; n < 256; n += 2) {
1483  int v = bytestream2_get_byteu(&gb);
1484  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1485  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1486  }
1487  }
1488  }
1489  ) /* End of CASE */
1490  CASE(ADPCM_IMA_DAT4,
1491  for (int channel = 0; channel < channels; channel++) {
1492  ADPCMChannelStatus *cs = &c->status[channel];
1493  samples = samples_p[channel];
1494  bytestream2_skip(&gb, 4);
1495  for (int n = 0; n < nb_samples; n += 2) {
1496  int v = bytestream2_get_byteu(&gb);
1497  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1498  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1499  }
1500  }
1501  ) /* End of CASE */
1502  CASE(ADPCM_IMA_APC,
1503  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1504  int v = bytestream2_get_byteu(&gb);
1505  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1506  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1507  }
1508  ) /* End of CASE */
1509  CASE(ADPCM_IMA_SSI,
1510  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1511  int v = bytestream2_get_byteu(&gb);
1512  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1513  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1514  }
1515  ) /* End of CASE */
1516  CASE(ADPCM_IMA_APM,
1517  for (int n = nb_samples / 2; n > 0; n--) {
1518  for (int channel = 0; channel < channels; channel++) {
1519  int v = bytestream2_get_byteu(&gb);
1520  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1521  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1522  }
1523  samples += channels;
1524  }
1525  ) /* End of CASE */
1526  CASE(ADPCM_IMA_ALP,
1527  for (int n = nb_samples / 2; n > 0; n--) {
1528  for (int channel = 0; channel < channels; channel++) {
1529  int v = bytestream2_get_byteu(&gb);
1530  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1531  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1532  }
1533  samples += channels;
1534  }
1535  ) /* End of CASE */
1536  CASE(ADPCM_IMA_CUNNING,
1537  for (int channel = 0; channel < channels; channel++) {
1538  int16_t *smp = samples_p[channel];
1539  for (int n = 0; n < nb_samples / 2; n++) {
1540  int v = bytestream2_get_byteu(&gb);
1541  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1542  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1543  }
1544  }
1545  ) /* End of CASE */
1546  CASE(ADPCM_IMA_OKI,
1547  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1548  int v = bytestream2_get_byteu(&gb);
1549  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1550  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1551  }
1552  ) /* End of CASE */
1553  CASE(ADPCM_IMA_RAD,
1554  for (int channel = 0; channel < channels; channel++) {
1555  ADPCMChannelStatus *cs = &c->status[channel];
1556  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1557  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1558  if (cs->step_index > 88u){
1559  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1560  channel, cs->step_index);
1561  return AVERROR_INVALIDDATA;
1562  }
1563  }
1564  for (int n = 0; n < nb_samples / 2; n++) {
1565  int byte[2];
1566 
1567  byte[0] = bytestream2_get_byteu(&gb);
1568  if (st)
1569  byte[1] = bytestream2_get_byteu(&gb);
1570  for (int channel = 0; channel < channels; channel++) {
1571  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1572  }
1573  for (int channel = 0; channel < channels; channel++) {
1574  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1575  }
1576  }
1577  ) /* End of CASE */
1578  CASE(ADPCM_IMA_WS,
1579  if (c->vqa_version == 3) {
1580  for (int channel = 0; channel < channels; channel++) {
1581  int16_t *smp = samples_p[channel];
1582 
1583  for (int n = nb_samples / 2; n > 0; n--) {
1584  int v = bytestream2_get_byteu(&gb);
1585  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1586  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1587  }
1588  }
1589  } else {
1590  for (int n = nb_samples / 2; n > 0; n--) {
1591  for (int channel = 0; channel < channels; channel++) {
1592  int v = bytestream2_get_byteu(&gb);
1593  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1594  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1595  }
1596  samples += channels;
1597  }
1598  }
1599  bytestream2_seek(&gb, 0, SEEK_END);
1600  ) /* End of CASE */
1601  CASE(ADPCM_XMD,
1602  int bytes_remaining, block = 0;
1603  while (bytestream2_get_bytes_left(&gb) >= 21 * channels) {
1604  for (int channel = 0; channel < channels; channel++) {
1605  int16_t *out = samples_p[channel] + block * 32;
1606  int16_t history[2];
1607  uint16_t scale;
1608 
1609  history[1] = sign_extend(bytestream2_get_le16(&gb), 16);
1610  history[0] = sign_extend(bytestream2_get_le16(&gb), 16);
1611  scale = bytestream2_get_le16(&gb);
1612 
1613  out[0] = history[1];
1614  out[1] = history[0];
1615 
1616  for (int n = 0; n < 15; n++) {
1617  unsigned byte = bytestream2_get_byte(&gb);
1618  int32_t nibble[2];
1619 
1620  nibble[0] = sign_extend(byte & 15, 4);
1621  nibble[1] = sign_extend(byte >> 4, 4);
1622 
1623  out[2+n*2] = nibble[0]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1624  history[1] = history[0];
1625  history[0] = out[2+n*2];
1626 
1627  out[2+n*2+1] = nibble[1]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1628  history[1] = history[0];
1629  history[0] = out[2+n*2+1];
1630  }
1631  }
1632 
1633  block++;
1634  }
1635  bytes_remaining = bytestream2_get_bytes_left(&gb);
1636  if (bytes_remaining > 0) {
1637  bytestream2_skip(&gb, bytes_remaining);
1638  }
1639  ) /* End of CASE */
1640  CASE(ADPCM_XA,
1641  int16_t *out0 = samples_p[0];
1642  int16_t *out1 = samples_p[1];
1643  int samples_per_block = 28 * (3 - channels) * 4;
1644  int sample_offset = 0;
1645  int bytes_remaining;
1646  while (bytestream2_get_bytes_left(&gb) >= 128) {
1647  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1648  &c->status[0], &c->status[1],
1649  channels, sample_offset)) < 0)
1650  return ret;
1651  bytestream2_skipu(&gb, 128);
1652  sample_offset += samples_per_block;
1653  }
1654  /* Less than a full block of data left, e.g. when reading from
1655  * 2324 byte per sector XA; the remainder is padding */
1656  bytes_remaining = bytestream2_get_bytes_left(&gb);
1657  if (bytes_remaining > 0) {
1658  bytestream2_skip(&gb, bytes_remaining);
1659  }
1660  ) /* End of CASE */
1661  CASE(ADPCM_IMA_EA_EACS,
1662  for (int i = 0; i <= st; i++) {
1663  c->status[i].step_index = bytestream2_get_le32u(&gb);
1664  if (c->status[i].step_index > 88u) {
1665  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1666  i, c->status[i].step_index);
1667  return AVERROR_INVALIDDATA;
1668  }
1669  }
1670  for (int i = 0; i <= st; i++) {
1671  c->status[i].predictor = bytestream2_get_le32u(&gb);
1672  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1673  return AVERROR_INVALIDDATA;
1674  }
1675 
1676  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1677  int byte = bytestream2_get_byteu(&gb);
1678  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1679  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1680  }
1681  ) /* End of CASE */
1682  CASE(ADPCM_IMA_EA_SEAD,
1683  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1684  int byte = bytestream2_get_byteu(&gb);
1685  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1686  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1687  }
1688  ) /* End of CASE */
1689  CASE(ADPCM_EA,
1690  int previous_left_sample, previous_right_sample;
1691  int current_left_sample, current_right_sample;
1692  int next_left_sample, next_right_sample;
1693  int coeff1l, coeff2l, coeff1r, coeff2r;
1694  int shift_left, shift_right;
1695 
1696  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte (stereo) or 15-byte (mono) pieces,
1697  each coding 28 stereo/mono samples. */
1698 
1699  if (channels != 2 && channels != 1)
1700  return AVERROR_INVALIDDATA;
1701 
1702  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1703  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1704  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1705  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1706 
1707  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
1708  int byte = bytestream2_get_byteu(&gb);
1709  coeff1l = ea_adpcm_table[ byte >> 4 ];
1710  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1711  coeff1r = ea_adpcm_table[ byte & 0x0F];
1712  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1713 
1714  if (channels == 2){
1715  byte = bytestream2_get_byteu(&gb);
1716  shift_left = 20 - (byte >> 4);
1717  shift_right = 20 - (byte & 0x0F);
1718  } else{
1719  /* Mono packs the shift into the coefficient byte's lower nibble instead */
1720  shift_left = 20 - (byte & 0x0F);
1721  }
1722 
1723  for (int count2 = 0; count2 < (channels == 2 ? 28 : 14); count2++) {
1724  byte = bytestream2_get_byteu(&gb);
1725  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1726 
1727  next_left_sample = (next_left_sample +
1728  (current_left_sample * coeff1l) +
1729  (previous_left_sample * coeff2l) + 0x80) >> 8;
1730 
1731  previous_left_sample = current_left_sample;
1732  current_left_sample = av_clip_int16(next_left_sample);
1733  *samples++ = current_left_sample;
1734 
1735  if (channels == 2){
1736  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1737 
1738  next_right_sample = (next_right_sample +
1739  (current_right_sample * coeff1r) +
1740  (previous_right_sample * coeff2r) + 0x80) >> 8;
1741 
1742  previous_right_sample = current_right_sample;
1743  current_right_sample = av_clip_int16(next_right_sample);
1744  *samples++ = current_right_sample;
1745  } else {
1746  next_left_sample = sign_extend(byte, 4) * (1 << shift_left);
1747 
1748  next_left_sample = (next_left_sample +
1749  (current_left_sample * coeff1l) +
1750  (previous_left_sample * coeff2l) + 0x80) >> 8;
1751 
1752  previous_left_sample = current_left_sample;
1753  current_left_sample = av_clip_int16(next_left_sample);
1754 
1755  *samples++ = current_left_sample;
1756  }
1757  }
1758  }
1759  bytestream2_skip(&gb, channels == 2 ? 2 : 3); // Skip terminating NULs
1760  ) /* End of CASE */
1761  CASE(ADPCM_EA_MAXIS_XA,
1762  int coeff[2][2], shift[2];
1763 
1764  for (int channel = 0; channel < channels; channel++) {
1765  int byte = bytestream2_get_byteu(&gb);
1766  for (int i = 0; i < 2; i++)
1767  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1768  shift[channel] = 20 - (byte & 0x0F);
1769  }
1770  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
1771  int byte[2];
1772 
1773  byte[0] = bytestream2_get_byteu(&gb);
1774  if (st) byte[1] = bytestream2_get_byteu(&gb);
1775  for (int i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1776  for (int channel = 0; channel < channels; channel++) {
1777  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1778  sample = (sample +
1779  c->status[channel].sample1 * coeff[channel][0] +
1780  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1781  c->status[channel].sample2 = c->status[channel].sample1;
1782  c->status[channel].sample1 = av_clip_int16(sample);
1783  *samples++ = c->status[channel].sample1;
1784  }
1785  }
1786  }
1787  bytestream2_seek(&gb, 0, SEEK_END);
1788  ) /* End of CASE */
1789 #if CONFIG_ADPCM_EA_R1_DECODER || CONFIG_ADPCM_EA_R2_DECODER || CONFIG_ADPCM_EA_R3_DECODER
1792  case AV_CODEC_ID_ADPCM_EA_R3: {
1793  /* channel numbering
1794  2chan: 0=fl, 1=fr
1795  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1796  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1797  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1798  int previous_sample, current_sample, next_sample;
1799  int coeff1, coeff2;
1800  int shift;
1801  uint16_t *samplesC;
1802  int count = 0;
1803  int offsets[6];
1804 
1805  for (unsigned channel = 0; channel < channels; channel++)
1806  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1807  bytestream2_get_le32(&gb)) +
1808  (channels + 1) * 4;
1809 
1810  for (unsigned channel = 0; channel < channels; channel++) {
1811  int count1;
1812 
1813  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1814  samplesC = samples_p[channel];
1815 
1816  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1817  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1818  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1819  } else {
1820  current_sample = c->status[channel].predictor;
1821  previous_sample = c->status[channel].prev_sample;
1822  }
1823 
1824  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1825  int byte = bytestream2_get_byte(&gb);
1826  if (byte == 0xEE) { /* only seen in R2 and R3 */
1827  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1828  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1829 
1830  for (int count2 = 0; count2 < 28; count2++)
1831  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1832  } else {
1833  coeff1 = ea_adpcm_table[ byte >> 4 ];
1834  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1835  shift = 20 - (byte & 0x0F);
1836 
1837  for (int count2 = 0; count2 < 28; count2++) {
1838  if (count2 & 1)
1839  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1840  else {
1841  byte = bytestream2_get_byte(&gb);
1842  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1843  }
1844 
1845  next_sample += (current_sample * coeff1) +
1846  (previous_sample * coeff2);
1847  next_sample = av_clip_int16(next_sample >> 8);
1848 
1849  previous_sample = current_sample;
1850  current_sample = next_sample;
1851  *samplesC++ = current_sample;
1852  }
1853  }
1854  }
1855  if (!count) {
1856  count = count1;
1857  } else if (count != count1) {
1858  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1859  count = FFMAX(count, count1);
1860  }
1861 
1862  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1863  c->status[channel].predictor = current_sample;
1864  c->status[channel].prev_sample = previous_sample;
1865  }
1866  }
1867 
1868  frame->nb_samples = count * 28;
1869  bytestream2_seek(&gb, 0, SEEK_END);
1870  break;
1871  }
1872 #endif /* CONFIG_ADPCM_EA_Rx_DECODER */
1873  CASE(ADPCM_EA_XAS,
1874  for (int channel=0; channel < channels; channel++) {
1875  int coeff[2][4], shift[4];
1876  int16_t *s = samples_p[channel];
1877  for (int n = 0; n < 4; n++, s += 32) {
1878  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1879  for (int i = 0; i < 2; i++)
1880  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1881  s[0] = val & ~0x0F;
1882 
1883  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1884  shift[n] = 20 - (val & 0x0F);
1885  s[1] = val & ~0x0F;
1886  }
1887 
1888  for (int m = 2; m < 32; m += 2) {
1889  s = &samples_p[channel][m];
1890  for (int n = 0; n < 4; n++, s += 32) {
1891  int level, pred;
1892  int byte = bytestream2_get_byteu(&gb);
1893 
1894  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1895  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1896  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1897 
1898  level = sign_extend(byte, 4) * (1 << shift[n]);
1899  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1900  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1901  }
1902  }
1903  }
1904  ) /* End of CASE */
1905  CASE(ADPCM_IMA_ACORN,
1906  for (int channel = 0; channel < channels; channel++) {
1907  ADPCMChannelStatus *cs = &c->status[channel];
1908  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1909  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
1910  if (cs->step_index > 88u){
1911  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1912  channel, cs->step_index);
1913  return AVERROR_INVALIDDATA;
1914  }
1915  }
1916  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1917  int byte = bytestream2_get_byteu(&gb);
1918  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
1919  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
1920  }
1921  ) /* End of CASE */
1922  CASE(ADPCM_IMA_AMV,
1923  av_assert0(channels == 1);
1924 
1925  /*
1926  * Header format:
1927  * int16_t predictor;
1928  * uint8_t step_index;
1929  * uint8_t reserved;
1930  * uint32_t frame_size;
1931  *
1932  * Some implementations have step_index as 16-bits, but others
1933  * only use the lower 8 and store garbage in the upper 8.
1934  */
1935  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1936  c->status[0].step_index = bytestream2_get_byteu(&gb);
1937  bytestream2_skipu(&gb, 5);
1938  if (c->status[0].step_index > 88u) {
1939  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1940  c->status[0].step_index);
1941  return AVERROR_INVALIDDATA;
1942  }
1943 
1944  for (int n = nb_samples >> 1; n > 0; n--) {
1945  int v = bytestream2_get_byteu(&gb);
1946 
1947  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1948  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1949  }
1950 
1951  if (nb_samples & 1) {
1952  int v = bytestream2_get_byteu(&gb);
1953  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1954 
1955  if (v & 0x0F) {
1956  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1957  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1958  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1959  }
1960  }
1961  ) /* End of CASE */
1962  CASE(ADPCM_IMA_SMJPEG,
1963  for (int i = 0; i < channels; i++) {
1964  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1965  c->status[i].step_index = bytestream2_get_byteu(&gb);
1966  bytestream2_skipu(&gb, 1);
1967  if (c->status[i].step_index > 88u) {
1968  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1969  c->status[i].step_index);
1970  return AVERROR_INVALIDDATA;
1971  }
1972  }
1973 
1974  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1975  int v = bytestream2_get_byteu(&gb);
1976 
1977  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1978  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1979  }
1980  ) /* End of CASE */
1981  CASE(ADPCM_CT,
1982  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1983  int v = bytestream2_get_byteu(&gb);
1984  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1985  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1986  }
1987  ) /* End of CASE */
1988 #if CONFIG_ADPCM_SBPRO_2_DECODER || CONFIG_ADPCM_SBPRO_3_DECODER || \
1989  CONFIG_ADPCM_SBPRO_4_DECODER
1993  if (!c->status[0].step_index) {
1994  /* the first byte is a raw sample */
1995  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1996  if (st)
1997  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1998  c->status[0].step_index = 1;
1999  nb_samples--;
2000  }
2001  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
2002  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2003  int byte = bytestream2_get_byteu(&gb);
2004  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2005  byte >> 4, 4, 0);
2006  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2007  byte & 0x0F, 4, 0);
2008  }
2009  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
2010  for (int n = (nb_samples<<st) / 3; n > 0; n--) {
2011  int byte = bytestream2_get_byteu(&gb);
2012  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2013  byte >> 5 , 3, 0);
2014  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2015  (byte >> 2) & 0x07, 3, 0);
2016  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2017  byte & 0x03, 2, 0);
2018  }
2019  } else {
2020  for (int n = nb_samples >> (2 - st); n > 0; n--) {
2021  int byte = bytestream2_get_byteu(&gb);
2022  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2023  byte >> 6 , 2, 2);
2024  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2025  (byte >> 4) & 0x03, 2, 2);
2026  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2027  (byte >> 2) & 0x03, 2, 2);
2028  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2029  byte & 0x03, 2, 2);
2030  }
2031  }
2032  break;
2033 #endif /* CONFIG_ADPCM_SBPRO_x_DECODER */
2034  CASE(ADPCM_SWF,
2035  adpcm_swf_decode(avctx, buf, buf_size, samples);
2036  bytestream2_seek(&gb, 0, SEEK_END);
2037  ) /* End of CASE */
2038  CASE(ADPCM_YAMAHA,
2039  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2040  int v = bytestream2_get_byteu(&gb);
2041  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
2042  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
2043  }
2044  ) /* End of CASE */
2045  CASE(ADPCM_AICA,
2046  for (int channel = 0; channel < channels; channel++) {
2047  samples = samples_p[channel];
2048  for (int n = nb_samples >> 1; n > 0; n--) {
2049  int v = bytestream2_get_byteu(&gb);
2050  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
2051  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
2052  }
2053  }
2054  ) /* End of CASE */
2055  CASE(ADPCM_AFC,
2056  int samples_per_block;
2057  int blocks;
2058 
2059  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
2060  samples_per_block = avctx->extradata[0] / 16;
2061  blocks = nb_samples / avctx->extradata[0];
2062  } else {
2063  samples_per_block = nb_samples / 16;
2064  blocks = 1;
2065  }
2066 
2067  for (int m = 0; m < blocks; m++) {
2068  for (int channel = 0; channel < channels; channel++) {
2069  int prev1 = c->status[channel].sample1;
2070  int prev2 = c->status[channel].sample2;
2071 
2072  samples = samples_p[channel] + m * 16;
2073  /* Read in every sample for this channel. */
2074  for (int i = 0; i < samples_per_block; i++) {
2075  int byte = bytestream2_get_byteu(&gb);
2076  int scale = 1 << (byte >> 4);
2077  int index = byte & 0xf;
2078  int factor1 = afc_coeffs[0][index];
2079  int factor2 = afc_coeffs[1][index];
2080 
2081  /* Decode 16 samples. */
2082  for (int n = 0; n < 16; n++) {
2083  int32_t sampledat;
2084 
2085  if (n & 1) {
2086  sampledat = sign_extend(byte, 4);
2087  } else {
2088  byte = bytestream2_get_byteu(&gb);
2089  sampledat = sign_extend(byte >> 4, 4);
2090  }
2091 
2092  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
2093  sampledat * scale;
2094  *samples = av_clip_int16(sampledat);
2095  prev2 = prev1;
2096  prev1 = *samples++;
2097  }
2098  }
2099 
2100  c->status[channel].sample1 = prev1;
2101  c->status[channel].sample2 = prev2;
2102  }
2103  }
2104  bytestream2_seek(&gb, 0, SEEK_END);
2105  ) /* End of CASE */
2106 #if CONFIG_ADPCM_THP_DECODER || CONFIG_ADPCM_THP_LE_DECODER
2107  case AV_CODEC_ID_ADPCM_THP:
2109  {
2110  int table[14][16];
2111 
2112 #define THP_GET16(g) \
2113  sign_extend( \
2114  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
2115  bytestream2_get_le16u(&(g)) : \
2116  bytestream2_get_be16u(&(g)), 16)
2117 
2118  if (avctx->extradata) {
2119  GetByteContext tb;
2120  if (avctx->extradata_size < 32 * channels) {
2121  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2122  return AVERROR_INVALIDDATA;
2123  }
2124 
2125  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
2126  for (int i = 0; i < channels; i++)
2127  for (int n = 0; n < 16; n++)
2128  table[i][n] = THP_GET16(tb);
2129  } else {
2130  for (int i = 0; i < channels; i++)
2131  for (int n = 0; n < 16; n++)
2132  table[i][n] = THP_GET16(gb);
2133 
2134  if (!c->has_status) {
2135  /* Initialize the previous sample. */
2136  for (int i = 0; i < channels; i++) {
2137  c->status[i].sample1 = THP_GET16(gb);
2138  c->status[i].sample2 = THP_GET16(gb);
2139  }
2140  c->has_status = 1;
2141  } else {
2142  bytestream2_skip(&gb, channels * 4);
2143  }
2144  }
2145 
2146  for (int ch = 0; ch < channels; ch++) {
2147  samples = samples_p[ch];
2148 
2149  /* Read in every sample for this channel. */
2150  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2151  int byte = bytestream2_get_byteu(&gb);
2152  int index = (byte >> 4) & 7;
2153  unsigned int exp = byte & 0x0F;
2154  int64_t factor1 = table[ch][index * 2];
2155  int64_t factor2 = table[ch][index * 2 + 1];
2156 
2157  /* Decode 14 samples. */
2158  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2159  int32_t sampledat;
2160 
2161  if (n & 1) {
2162  sampledat = sign_extend(byte, 4);
2163  } else {
2164  byte = bytestream2_get_byteu(&gb);
2165  sampledat = sign_extend(byte >> 4, 4);
2166  }
2167 
2168  sampledat = ((c->status[ch].sample1 * factor1
2169  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
2170  *samples = av_clip_int16(sampledat);
2171  c->status[ch].sample2 = c->status[ch].sample1;
2172  c->status[ch].sample1 = *samples++;
2173  }
2174  }
2175  }
2176  break;
2177  }
2178 #endif /* CONFIG_ADPCM_THP(_LE)_DECODER */
2179  CASE(ADPCM_DTK,
2180  for (int channel = 0; channel < channels; channel++) {
2181  samples = samples_p[channel];
2182 
2183  /* Read in every sample for this channel. */
2184  for (int i = 0; i < nb_samples / 28; i++) {
2185  int byte, header;
2186  if (channel)
2187  bytestream2_skipu(&gb, 1);
2188  header = bytestream2_get_byteu(&gb);
2189  bytestream2_skipu(&gb, 3 - channel);
2190 
2191  /* Decode 28 samples. */
2192  for (int n = 0; n < 28; n++) {
2193  int32_t sampledat, prev;
2194 
2195  switch (header >> 4) {
2196  case 1:
2197  prev = (c->status[channel].sample1 * 0x3c);
2198  break;
2199  case 2:
2200  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
2201  break;
2202  case 3:
2203  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
2204  break;
2205  default:
2206  prev = 0;
2207  }
2208 
2209  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2210 
2211  byte = bytestream2_get_byteu(&gb);
2212  if (!channel)
2213  sampledat = sign_extend(byte, 4);
2214  else
2215  sampledat = sign_extend(byte >> 4, 4);
2216 
2217  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2218  *samples++ = av_clip_int16(sampledat >> 6);
2219  c->status[channel].sample2 = c->status[channel].sample1;
2220  c->status[channel].sample1 = sampledat;
2221  }
2222  }
2223  if (!channel)
2224  bytestream2_seek(&gb, 0, SEEK_SET);
2225  }
2226  ) /* End of CASE */
2227  CASE(ADPCM_PSX,
2228  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * channels); block++) {
2229  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * channels) / (16 * channels);
2230  for (int channel = 0; channel < channels; channel++) {
2231  samples = samples_p[channel] + block * nb_samples_per_block;
2232  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2233 
2234  /* Read in every sample for this channel. */
2235  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2236  int filter, shift, flag, byte;
2237 
2238  filter = bytestream2_get_byteu(&gb);
2239  shift = filter & 0xf;
2240  filter = filter >> 4;
2242  return AVERROR_INVALIDDATA;
2243  flag = bytestream2_get_byteu(&gb) & 0x7;
2244 
2245  /* Decode 28 samples. */
2246  for (int n = 0; n < 28; n++) {
2247  int sample = 0, scale;
2248 
2249  if (n & 1) {
2250  scale = sign_extend(byte >> 4, 4);
2251  } else {
2252  byte = bytestream2_get_byteu(&gb);
2253  scale = sign_extend(byte, 4);
2254  }
2255 
2256  if (flag < 0x07) {
2257  scale = scale * (1 << 12);
2258  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2259  }
2261  c->status[channel].sample2 = c->status[channel].sample1;
2262  c->status[channel].sample1 = sample;
2263  }
2264  }
2265  }
2266  }
2267  ) /* End of CASE */
2268  CASE(ADPCM_ARGO,
2269  /*
2270  * The format of each block:
2271  * uint8_t left_control;
2272  * uint4_t left_samples[nb_samples];
2273  * ---- and if stereo ----
2274  * uint8_t right_control;
2275  * uint4_t right_samples[nb_samples];
2276  *
2277  * Format of the control byte:
2278  * MSB [SSSSRDRR] LSB
2279  * S = (Shift Amount - 2)
2280  * D = Decoder flag.
2281  * R = Reserved
2282  *
2283  * Each block relies on the previous two samples of each channel.
2284  * They should be 0 initially.
2285  */
2286  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2287  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
2288  ADPCMChannelStatus *cs = c->status + channel;
2289  int control, shift;
2290 
2291  samples = samples_p[channel] + block * 32;
2292 
2293  /* Get the control byte and decode the samples, 2 at a time. */
2294  control = bytestream2_get_byteu(&gb);
2295  shift = (control >> 4) + 2;
2296 
2297  for (int n = 0; n < 16; n++) {
2298  int sample = bytestream2_get_byteu(&gb);
2299  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2300  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2301  }
2302  }
2303  }
2304  ) /* End of CASE */
2305  CASE(ADPCM_ZORK,
2306  for (int n = 0; n < nb_samples * channels; n++) {
2307  int v = bytestream2_get_byteu(&gb);
2308  *samples++ = adpcm_zork_expand_nibble(&c->status[n % channels], v);
2309  }
2310  ) /* End of CASE */
2311  CASE(ADPCM_IMA_MTF,
2312  for (int n = nb_samples / 2; n > 0; n--) {
2313  for (int channel = 0; channel < channels; channel++) {
2314  int v = bytestream2_get_byteu(&gb);
2315  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2316  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2317  }
2318  samples += channels;
2319  }
2320  ) /* End of CASE */
2321  default:
2322  av_assert0(0); // unsupported codec_id should not happen
2323  }
2324 
2325  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2326  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2327  return AVERROR_INVALIDDATA;
2328  }
2329 
2330  *got_frame_ptr = 1;
2331 
2332  if (avpkt->size < bytestream2_tell(&gb)) {
2333  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2334  return avpkt->size;
2335  }
2336 
2337  return bytestream2_tell(&gb);
2338 }
2339 
2340 static void adpcm_flush(AVCodecContext *avctx)
2341 {
2342  ADPCMDecodeContext *c = avctx->priv_data;
2343 
2344  /* Just nuke the entire state and re-init. */
2345  memset(c, 0, sizeof(ADPCMDecodeContext));
2346 
2347  switch(avctx->codec_id) {
2348  case AV_CODEC_ID_ADPCM_CT:
2349  c->status[0].step = c->status[1].step = 511;
2350  break;
2351 
2353  if (avctx->extradata && avctx->extradata_size >= 8) {
2354  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2355  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2356  }
2357  break;
2358 
2360  if (avctx->extradata && avctx->extradata_size >= 28) {
2361  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2362  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2363  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2364  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2365  }
2366  break;
2367 
2369  if (avctx->extradata && avctx->extradata_size >= 2)
2370  c->vqa_version = AV_RL16(avctx->extradata);
2371  break;
2372  default:
2373  /* Other codecs may want to handle this during decoding. */
2374  c->has_status = 0;
2375  return;
2376  }
2377 
2378  c->has_status = 1;
2379 }
2380 
2381 
2389 
2390 #define ADPCM_DECODER_0(id_, sample_fmts_, name_, long_name_)
2391 #define ADPCM_DECODER_1(id_, sample_fmts_, name_, long_name_) \
2392 const FFCodec ff_ ## name_ ## _decoder = { \
2393  .p.name = #name_, \
2394  CODEC_LONG_NAME(long_name_), \
2395  .p.type = AVMEDIA_TYPE_AUDIO, \
2396  .p.id = id_, \
2397  .p.capabilities = AV_CODEC_CAP_DR1, \
2398  .p.sample_fmts = sample_fmts_, \
2399  .priv_data_size = sizeof(ADPCMDecodeContext), \
2400  .init = adpcm_decode_init, \
2401  FF_CODEC_DECODE_CB(adpcm_decode_frame), \
2402  .flush = adpcm_flush, \
2403 };
2404 #define ADPCM_DECODER_2(enabled, codec_id, name, sample_fmts, long_name) \
2405  ADPCM_DECODER_ ## enabled(codec_id, name, sample_fmts, long_name)
2406 #define ADPCM_DECODER_3(config, codec_id, name, sample_fmts, long_name) \
2407  ADPCM_DECODER_2(config, codec_id, name, sample_fmts, long_name)
2408 #define ADPCM_DECODER(codec, name, sample_fmts, long_name) \
2409  ADPCM_DECODER_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, \
2410  name, sample_fmts, long_name)
2411 
2412 /* Note: Do not forget to add new entries to the Makefile as well. */
2413 ADPCM_DECODER(ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie")
2414 ADPCM_DECODER(ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC")
2415 ADPCM_DECODER(ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie")
2416 ADPCM_DECODER(ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA")
2417 ADPCM_DECODER(ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games")
2418 ADPCM_DECODER(ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology")
2419 ADPCM_DECODER(ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK")
2420 ADPCM_DECODER(ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts")
2421 ADPCM_DECODER(ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA")
2422 ADPCM_DECODER(ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1")
2423 ADPCM_DECODER(ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2")
2424 ADPCM_DECODER(ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3")
2425 ADPCM_DECODER(ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS")
2426 ADPCM_DECODER(ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay")
2427 ADPCM_DECODER(ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV")
2428 ADPCM_DECODER(ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC")
2429 ADPCM_DECODER(ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM")
2430 ADPCM_DECODER(ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments")
2431 ADPCM_DECODER(ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4")
2432 ADPCM_DECODER(ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3")
2433 ADPCM_DECODER(ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4")
2434 ADPCM_DECODER(ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS")
2435 ADPCM_DECODER(ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD")
2436 ADPCM_DECODER(ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS")
2437 ADPCM_DECODER(ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX")
2438 ADPCM_DECODER(ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework")
2439 ADPCM_DECODER(ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI")
2440 ADPCM_DECODER(ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime")
2441 ADPCM_DECODER(ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical")
2442 ADPCM_DECODER(ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive")
2443 ADPCM_DECODER(ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG")
2444 ADPCM_DECODER(ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP")
2445 ADPCM_DECODER(ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV")
2446 ADPCM_DECODER(ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood")
2447 ADPCM_DECODER(ADPCM_IMA_XBOX, sample_fmts_s16p, adpcm_ima_xbox, "ADPCM IMA Xbox")
2448 ADPCM_DECODER(ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft")
2449 ADPCM_DECODER(ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF")
2450 ADPCM_DECODER(ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation")
2451 ADPCM_DECODER(ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit")
2452 ADPCM_DECODER(ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit")
2453 ADPCM_DECODER(ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit")
2454 ADPCM_DECODER(ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash")
2455 ADPCM_DECODER(ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)")
2456 ADPCM_DECODER(ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP")
2457 ADPCM_DECODER(ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA")
2458 ADPCM_DECODER(ADPCM_XMD, sample_fmts_s16p, adpcm_xmd, "ADPCM Konami XMD")
2459 ADPCM_DECODER(ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha")
2460 ADPCM_DECODER(ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork")
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:380
adpcm_index_table5
static const int8_t adpcm_index_table5[32]
Definition: adpcm.c:137
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:374
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
out
FILE * out
Definition: movenc.c:55
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:407
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
R3
#define R3
Definition: simple_idct.c:173
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:232
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
int64_t
long long int64_t
Definition: coverity.c:34
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:403
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:422
AVPacket::data
uint8_t * data
Definition: packet.h:539
table
static const uint16_t table[]
Definition: prosumer.c:203
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:395
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:412
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:406
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:512
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
R1
#define R1
Definition: simple_idct.c:171
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
AV_CODEC_ID_ADPCM_XMD
@ AV_CODEC_ID_ADPCM_XMD
Definition: codec_id.h:425
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:410
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:599
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:386
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1079
GetBitContext
Definition: get_bits.h:108
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:453
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:404
val
static double val(void *priv, double ch)
Definition: aeval.c:77
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2340
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:391
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2384
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:430
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:619
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:538
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:424
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:642
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:405
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:397
g
const char * g
Definition: vf_curves.c:128
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:376
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:403
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:356
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:401
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:379
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:538
AV_CODEC_ID_ADPCM_IMA_XBOX
@ AV_CODEC_ID_ADPCM_IMA_XBOX
Definition: codec_id.h:426
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2382
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:399
av_clip_int16
#define av_clip_int16
Definition: common.h:115
NULL
#define NULL
Definition: coverity.c:32
ADPCM_DECODER
#define ADPCM_DECODER(codec, name, sample_fmts, long_name)
Definition: adpcm.c:2408
bits_left
#define bits_left
Definition: bitstream.h:114
av_clip_intp2
#define av_clip_intp2
Definition: common.h:121
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:388
oki_step_table
static const int16_t oki_step_table[49]
Definition: adpcm.c:216
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:378
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:398
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:416
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:377
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:393
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:94
ima_cunning_index_table
static const int8_t ima_cunning_index_table[9]
Definition: adpcm.c:108
exp
int8_t exp
Definition: eval.c:73
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:382
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:578
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:557
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:1084
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:418
afc_coeffs
static const int16_t afc_coeffs[2][16]
Definition: adpcm.c:89
ADPCMDecodeContext
Definition: adpcm.c:243
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
AVPacket::size
int size
Definition: packet.h:540
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:408
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:469
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:420
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
bps
unsigned bps
Definition: movenc.c:1880
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1071
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:851
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
R2
#define R2
Definition: simple_idct.c:172
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:387
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:68
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:169
av_zero_extend
#define av_zero_extend
Definition: common.h:151
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:677
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
adpcm_index_table3
static const int8_t adpcm_index_table3[8]
Definition: adpcm.c:132
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
flag
#define flag(name)
Definition: cbs_av1.c:474
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1586
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2386
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:414
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:400
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:537
adpcm_index_tables
static const int8_t *const adpcm_index_tables[4]
Definition: adpcm.c:142
MT
#define MT(...)
Definition: codec_desc.c:32
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:598
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:419
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:245
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:413
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:823
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:81
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:384
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:421
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1097
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:489
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:415
mtaf_stepsize
static const int16_t mtaf_stepsize[32][16]
Definition: adpcm.c:149
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:394
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:396
temp
else temp
Definition: vf_mcdeint.c:263
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:392
adpcm_index_table2
static const int8_t adpcm_index_table2[4]
Definition: adpcm.c:127
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:389
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:765
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:417
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:251
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:246
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:423
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:375
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:381
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:360
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:411
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:633
CASE
#define CASE(codec,...)
Definition: adpcm.c:77
ima_cunning_step_table
static const int16_t ima_cunning_step_table[61]
Definition: adpcm.c:118
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:236
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:390
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:244
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:225