FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 
38 #include "config_components.h"
39 
40 #include "avcodec.h"
41 #include "get_bits.h"
42 #include "bytestream.h"
43 #include "adpcm.h"
44 #include "adpcm_data.h"
45 #include "codec_internal.h"
46 #include "decode.h"
47 
48 /**
49  * @file
50  * ADPCM decoders
51  * Features and limitations:
52  *
53  * Reference documents:
54  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
55  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
56  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
57  * http://openquicktime.sourceforge.net/
58  * XAnim sources (xa_codec.c) http://xanim.polter.net/
59  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
60  * SoX source code http://sox.sourceforge.net/
61  *
62  * CD-ROM XA:
63  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
64  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
65  * readstr http://www.geocities.co.jp/Playtown/2004/
66  */
67 
68 #define CASE_0(codec_id, ...)
69 #define CASE_1(codec_id, ...) \
70  case codec_id: \
71  { __VA_ARGS__ } \
72  break;
73 #define CASE_2(enabled, codec_id, ...) \
74  CASE_ ## enabled(codec_id, __VA_ARGS__)
75 #define CASE_3(config, codec_id, ...) \
76  CASE_2(config, codec_id, __VA_ARGS__)
77 #define CASE(codec, ...) \
78  CASE_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
79 
80 /* These are for CD-ROM XA ADPCM */
81 static const int8_t xa_adpcm_table[5][2] = {
82  { 0, 0 },
83  { 60, 0 },
84  { 115, -52 },
85  { 98, -55 },
86  { 122, -60 }
87 };
88 
89 static const int16_t afc_coeffs[2][16] = {
90  { 0, 2048, 0, 1024, 4096, 3584, 3072, 4608, 4200, 4800, 5120, 2048, 1024, -1024, -1024, -2048 },
91  { 0, 0, 2048, 1024, -2048, -1536, -1024, -2560, -2248, -2300, -3072, -2048, -1024, 1024, 0, 0 }
92 };
93 
94 static const int16_t ea_adpcm_table[] = {
95  0, 240, 460, 392,
96  0, 0, -208, -220,
97  0, 1, 3, 4,
98  7, 8, 10, 11,
99  0, -1, -3, -4
100 };
101 
102 /*
103  * Dumped from the binaries:
104  * - FantasticJourney.exe - 0x794D2, DGROUP:0x47A4D2
105  * - BigRaceUSA.exe - 0x9B8AA, DGROUP:0x49C4AA
106  * - Timeshock!.exe - 0x8506A, DGROUP:0x485C6A
107  */
108 static const int8_t ima_cunning_index_table[9] = {
109  -1, -1, -1, -1, 1, 2, 3, 4, -1
110 };
111 
112 /*
113  * Dumped from the binaries:
114  * - FantasticJourney.exe - 0x79458, DGROUP:0x47A458
115  * - BigRaceUSA.exe - 0x9B830, DGROUP:0x49C430
116  * - Timeshock!.exe - 0x84FF0, DGROUP:0x485BF0
117  */
118 static const int16_t ima_cunning_step_table[61] = {
119  1, 1, 1, 1, 2, 2, 3, 3, 4, 5,
120  6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
121  32, 40, 48, 56, 64, 80, 96, 112, 128, 160,
122  192, 224, 256, 320, 384, 448, 512, 640, 768, 896,
123  1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120,
124  6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 0
125 };
126 
127 static const int8_t adpcm_index_table2[4] = {
128  -1, 2,
129  -1, 2,
130 };
131 
132 static const int8_t adpcm_index_table3[8] = {
133  -1, -1, 1, 2,
134  -1, -1, 1, 2,
135 };
136 
137 static const int8_t adpcm_index_table5[32] = {
138  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
139  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
140 };
141 
142 static const int8_t * const adpcm_index_tables[4] = {
143  &adpcm_index_table2[0],
144  &adpcm_index_table3[0],
146  &adpcm_index_table5[0],
147 };
148 
149 static const int16_t mtaf_stepsize[32][16] = {
150  { 1, 5, 9, 13, 16, 20, 24, 28,
151  -1, -5, -9, -13, -16, -20, -24, -28, },
152  { 2, 6, 11, 15, 20, 24, 29, 33,
153  -2, -6, -11, -15, -20, -24, -29, -33, },
154  { 2, 7, 13, 18, 23, 28, 34, 39,
155  -2, -7, -13, -18, -23, -28, -34, -39, },
156  { 3, 9, 15, 21, 28, 34, 40, 46,
157  -3, -9, -15, -21, -28, -34, -40, -46, },
158  { 3, 11, 18, 26, 33, 41, 48, 56,
159  -3, -11, -18, -26, -33, -41, -48, -56, },
160  { 4, 13, 22, 31, 40, 49, 58, 67,
161  -4, -13, -22, -31, -40, -49, -58, -67, },
162  { 5, 16, 26, 37, 48, 59, 69, 80,
163  -5, -16, -26, -37, -48, -59, -69, -80, },
164  { 6, 19, 31, 44, 57, 70, 82, 95,
165  -6, -19, -31, -44, -57, -70, -82, -95, },
166  { 7, 22, 38, 53, 68, 83, 99, 114,
167  -7, -22, -38, -53, -68, -83, -99, -114, },
168  { 9, 27, 45, 63, 81, 99, 117, 135,
169  -9, -27, -45, -63, -81, -99, -117, -135, },
170  { 10, 32, 53, 75, 96, 118, 139, 161,
171  -10, -32, -53, -75, -96, -118, -139, -161, },
172  { 12, 38, 64, 90, 115, 141, 167, 193,
173  -12, -38, -64, -90, -115, -141, -167, -193, },
174  { 15, 45, 76, 106, 137, 167, 198, 228,
175  -15, -45, -76, -106, -137, -167, -198, -228, },
176  { 18, 54, 91, 127, 164, 200, 237, 273,
177  -18, -54, -91, -127, -164, -200, -237, -273, },
178  { 21, 65, 108, 152, 195, 239, 282, 326,
179  -21, -65, -108, -152, -195, -239, -282, -326, },
180  { 25, 77, 129, 181, 232, 284, 336, 388,
181  -25, -77, -129, -181, -232, -284, -336, -388, },
182  { 30, 92, 153, 215, 276, 338, 399, 461,
183  -30, -92, -153, -215, -276, -338, -399, -461, },
184  { 36, 109, 183, 256, 329, 402, 476, 549,
185  -36, -109, -183, -256, -329, -402, -476, -549, },
186  { 43, 130, 218, 305, 392, 479, 567, 654,
187  -43, -130, -218, -305, -392, -479, -567, -654, },
188  { 52, 156, 260, 364, 468, 572, 676, 780,
189  -52, -156, -260, -364, -468, -572, -676, -780, },
190  { 62, 186, 310, 434, 558, 682, 806, 930,
191  -62, -186, -310, -434, -558, -682, -806, -930, },
192  { 73, 221, 368, 516, 663, 811, 958, 1106,
193  -73, -221, -368, -516, -663, -811, -958, -1106, },
194  { 87, 263, 439, 615, 790, 966, 1142, 1318,
195  -87, -263, -439, -615, -790, -966, -1142, -1318, },
196  { 104, 314, 523, 733, 942, 1152, 1361, 1571,
197  -104, -314, -523, -733, -942, -1152, -1361, -1571, },
198  { 124, 374, 623, 873, 1122, 1372, 1621, 1871,
199  -124, -374, -623, -873, -1122, -1372, -1621, -1871, },
200  { 148, 445, 743, 1040, 1337, 1634, 1932, 2229,
201  -148, -445, -743, -1040, -1337, -1634, -1932, -2229, },
202  { 177, 531, 885, 1239, 1593, 1947, 2301, 2655,
203  -177, -531, -885, -1239, -1593, -1947, -2301, -2655, },
204  { 210, 632, 1053, 1475, 1896, 2318, 2739, 3161,
205  -210, -632, -1053, -1475, -1896, -2318, -2739, -3161, },
206  { 251, 753, 1255, 1757, 2260, 2762, 3264, 3766,
207  -251, -753, -1255, -1757, -2260, -2762, -3264, -3766, },
208  { 299, 897, 1495, 2093, 2692, 3290, 3888, 4486,
209  -299, -897, -1495, -2093, -2692, -3290, -3888, -4486, },
210  { 356, 1068, 1781, 2493, 3206, 3918, 4631, 5343,
211  -356, -1068, -1781, -2493, -3206, -3918, -4631, -5343, },
212  { 424, 1273, 2121, 2970, 3819, 4668, 5516, 6365,
213  -424, -1273, -2121, -2970, -3819, -4668, -5516, -6365, },
214 };
215 
216 static const int16_t oki_step_table[49] = {
217  16, 17, 19, 21, 23, 25, 28, 31, 34, 37,
218  41, 45, 50, 55, 60, 66, 73, 80, 88, 97,
219  107, 118, 130, 143, 157, 173, 190, 209, 230, 253,
220  279, 307, 337, 371, 408, 449, 494, 544, 598, 658,
221  724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552
222 };
223 
224 // padded to zero where table size is less then 16
225 static const int8_t swf_index_tables[4][16] = {
226  /*2*/ { -1, 2 },
227  /*3*/ { -1, -1, 2, 4 },
228  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
229  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
230 };
231 
232 static const int8_t zork_index_table[8] = {
233  -1, -1, -1, 1, 4, 7, 10, 12,
234 };
235 
236 static const int8_t mtf_index_table[16] = {
237  8, 6, 4, 2, -1, -1, -1, -1,
238  -1, -1, -1, -1, 2, 4, 6, 8,
239 };
240 
241 /* end of tables */
242 
243 typedef struct ADPCMDecodeContext {
245  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
246  int has_status; /**< Status flag. Reset to 0 after a flush. */
248 
249 static void adpcm_flush(AVCodecContext *avctx);
250 
252 {
253  ADPCMDecodeContext *c = avctx->priv_data;
254  unsigned int min_channels = 1;
255  unsigned int max_channels = 2;
256 
257  adpcm_flush(avctx);
258 
259  switch(avctx->codec->id) {
261  max_channels = 1;
262  break;
269  max_channels = 6;
270  break;
272  min_channels = 2;
273  max_channels = 8;
274  if (avctx->ch_layout.nb_channels & 1) {
275  avpriv_request_sample(avctx, "channel count %d", avctx->ch_layout.nb_channels);
276  return AVERROR_PATCHWELCOME;
277  }
278  break;
280  min_channels = 2;
281  break;
283  max_channels = 8;
284  if (avctx->ch_layout.nb_channels <= 0 ||
285  avctx->block_align % (16 * avctx->ch_layout.nb_channels))
286  return AVERROR_INVALIDDATA;
287  break;
291  max_channels = 14;
292  break;
293  }
294  if (avctx->ch_layout.nb_channels < min_channels ||
295  avctx->ch_layout.nb_channels > max_channels) {
296  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
297  return AVERROR(EINVAL);
298  }
299 
300  switch(avctx->codec->id) {
302  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
303  return AVERROR_INVALIDDATA;
304  break;
306  if (avctx->bits_per_coded_sample != 4 ||
307  avctx->block_align != 17 * avctx->ch_layout.nb_channels)
308  return AVERROR_INVALIDDATA;
309  break;
311  if (avctx->bits_per_coded_sample != 8)
312  return AVERROR_INVALIDDATA;
313  break;
314  default:
315  break;
316  }
317 
318  switch (avctx->codec->id) {
340  break;
342  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
344  break;
346  avctx->sample_fmt = avctx->ch_layout.nb_channels > 2 ? AV_SAMPLE_FMT_S16P :
348  break;
349  default:
350  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
351  }
352  return 0;
353 }
354 
355 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
356 {
357  int delta, pred, step, add;
358 
359  pred = c->predictor;
360  delta = nibble & 7;
361  step = c->step;
362  add = (delta * 2 + 1) * step;
363  if (add < 0)
364  add = add + 7;
365 
366  if ((nibble & 8) == 0)
367  pred = av_clip(pred + (add >> 3), -32767, 32767);
368  else
369  pred = av_clip(pred - (add >> 3), -32767, 32767);
370 
371  switch (delta) {
372  case 7:
373  step *= 0x99;
374  break;
375  case 6:
376  c->step = av_clip(c->step * 2, 127, 24576);
377  c->predictor = pred;
378  return pred;
379  case 5:
380  step *= 0x66;
381  break;
382  case 4:
383  step *= 0x4d;
384  break;
385  default:
386  step *= 0x39;
387  break;
388  }
389 
390  if (step < 0)
391  step += 0x3f;
392 
393  c->step = step >> 6;
394  c->step = av_clip(c->step, 127, 24576);
395  c->predictor = pred;
396  return pred;
397 }
398 
399 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
400 {
401  int step_index;
402  int predictor;
403  int sign, delta, diff, step;
404 
405  step = ff_adpcm_step_table[c->step_index];
406  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
407  step_index = av_clip(step_index, 0, 88);
408 
409  sign = nibble & 8;
410  delta = nibble & 7;
411  /* perform direct multiplication instead of series of jumps proposed by
412  * the reference ADPCM implementation since modern CPUs can do the mults
413  * quickly enough */
414  diff = ((2 * delta + 1) * step) >> shift;
415  predictor = c->predictor;
416  if (sign) predictor -= diff;
417  else predictor += diff;
418 
419  c->predictor = av_clip_int16(predictor);
420  c->step_index = step_index;
421 
422  return (int16_t)c->predictor;
423 }
424 
425 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
426 {
427  int step_index;
428  int predictor;
429  int sign, delta, diff, step;
430 
431  step = ff_adpcm_step_table[c->step_index];
432  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
433  step_index = av_clip(step_index, 0, 88);
434 
435  sign = nibble & 8;
436  delta = nibble & 7;
437  diff = (delta * step) >> shift;
438  predictor = c->predictor;
439  if (sign) predictor -= diff;
440  else predictor += diff;
441 
442  c->predictor = av_clip_int16(predictor);
443  c->step_index = step_index;
444 
445  return (int16_t)c->predictor;
446 }
447 
448 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
449 {
450  int step_index, step, delta, predictor;
451 
452  step = ff_adpcm_step_table[c->step_index];
453 
454  delta = step * (2 * nibble - 15);
455  predictor = c->predictor + delta;
456 
457  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
458  c->predictor = av_clip_int16(predictor >> 4);
459  c->step_index = av_clip(step_index, 0, 88);
460 
461  return (int16_t)c->predictor;
462 }
463 
464 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
465 {
466  int step_index;
467  int predictor;
468  int step;
469 
470  nibble = sign_extend(nibble & 0xF, 4);
471 
472  step = ima_cunning_step_table[c->step_index];
473  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
474  step_index = av_clip(step_index, 0, 60);
475 
476  predictor = c->predictor + step * nibble;
477 
478  c->predictor = av_clip_int16(predictor);
479  c->step_index = step_index;
480 
481  return c->predictor;
482 }
483 
485 {
486  int nibble, step_index, predictor, sign, delta, diff, step, shift;
487 
488  shift = bps - 1;
489  nibble = get_bits_le(gb, bps),
490  step = ff_adpcm_step_table[c->step_index];
491  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
492  step_index = av_clip(step_index, 0, 88);
493 
494  sign = nibble & (1 << shift);
495  delta = av_zero_extend(nibble, shift);
496  diff = ((2 * delta + 1) * step) >> shift;
497  predictor = c->predictor;
498  if (sign) predictor -= diff;
499  else predictor += diff;
500 
501  c->predictor = av_clip_int16(predictor);
502  c->step_index = step_index;
503 
504  return (int16_t)c->predictor;
505 }
506 
507 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
508 {
509  int step_index;
510  int predictor;
511  int diff, step;
512 
513  step = ff_adpcm_step_table[c->step_index];
514  step_index = c->step_index + ff_adpcm_index_table[nibble];
515  step_index = av_clip(step_index, 0, 88);
516 
517  diff = step >> 3;
518  if (nibble & 4) diff += step;
519  if (nibble & 2) diff += step >> 1;
520  if (nibble & 1) diff += step >> 2;
521 
522  if (nibble & 8)
523  predictor = c->predictor - diff;
524  else
525  predictor = c->predictor + diff;
526 
527  c->predictor = av_clip_int16(predictor);
528  c->step_index = step_index;
529 
530  return c->predictor;
531 }
532 
533 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
534 {
535  int predictor;
536 
537  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
538  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
539 
540  c->sample2 = c->sample1;
541  c->sample1 = av_clip_int16(predictor);
542  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
543  if (c->idelta < 16) c->idelta = 16;
544  if (c->idelta > INT_MAX/768) {
545  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
546  c->idelta = INT_MAX/768;
547  }
548 
549  return c->sample1;
550 }
551 
552 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
553 {
554  int step_index, predictor, sign, delta, diff, step;
555 
556  step = oki_step_table[c->step_index];
557  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
558  step_index = av_clip(step_index, 0, 48);
559 
560  sign = nibble & 8;
561  delta = nibble & 7;
562  diff = ((2 * delta + 1) * step) >> 3;
563  predictor = c->predictor;
564  if (sign) predictor -= diff;
565  else predictor += diff;
566 
567  c->predictor = av_clip_intp2(predictor, 11);
568  c->step_index = step_index;
569 
570  return c->predictor * 16;
571 }
572 
573 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
574 {
575  int sign, delta, diff;
576  int new_step;
577 
578  sign = nibble & 8;
579  delta = nibble & 7;
580  /* perform direct multiplication instead of series of jumps proposed by
581  * the reference ADPCM implementation since modern CPUs can do the mults
582  * quickly enough */
583  diff = ((2 * delta + 1) * c->step) >> 3;
584  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
585  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
586  c->predictor = av_clip_int16(c->predictor);
587  /* calculate new step and clamp it to range 511..32767 */
588  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
589  c->step = av_clip(new_step, 511, 32767);
590 
591  return (int16_t)c->predictor;
592 }
593 
594 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
595 {
596  int sign, delta, diff;
597 
598  sign = nibble & (1<<(size-1));
599  delta = nibble & ((1<<(size-1))-1);
600  diff = delta << (7 + c->step + shift);
601 
602  /* clamp result */
603  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
604 
605  /* calculate new step */
606  if (delta >= (2*size - 3) && c->step < 3)
607  c->step++;
608  else if (delta == 0 && c->step > 0)
609  c->step--;
610 
611  return (int16_t) c->predictor;
612 }
613 
614 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
615 {
616  if(!c->step) {
617  c->predictor = 0;
618  c->step = 127;
619  }
620 
621  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
622  c->predictor = av_clip_int16(c->predictor);
623  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
624  c->step = av_clip(c->step, 127, 24576);
625  return c->predictor;
626 }
627 
628 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
629 {
630  c->predictor += mtaf_stepsize[c->step][nibble];
631  c->predictor = av_clip_int16(c->predictor);
632  c->step += ff_adpcm_index_table[nibble];
633  c->step = av_clip_uintp2(c->step, 5);
634  return c->predictor;
635 }
636 
637 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
638 {
639  int16_t index = c->step_index;
640  uint32_t lookup_sample = ff_adpcm_step_table[index];
641  int32_t sample = 0;
642 
643  if (nibble & 0x40)
644  sample += lookup_sample;
645  if (nibble & 0x20)
646  sample += lookup_sample >> 1;
647  if (nibble & 0x10)
648  sample += lookup_sample >> 2;
649  if (nibble & 0x08)
650  sample += lookup_sample >> 3;
651  if (nibble & 0x04)
652  sample += lookup_sample >> 4;
653  if (nibble & 0x02)
654  sample += lookup_sample >> 5;
655  if (nibble & 0x01)
656  sample += lookup_sample >> 6;
657  if (nibble & 0x80)
658  sample = -sample;
659 
660  sample += c->predictor;
662 
663  index += zork_index_table[(nibble >> 4) & 7];
664  index = av_clip(index, 0, 88);
665 
666  c->predictor = sample;
667  c->step_index = index;
668 
669  return sample;
670 }
671 
672 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
673  const uint8_t *in, ADPCMChannelStatus *left,
674  ADPCMChannelStatus *right, int channels, int sample_offset)
675 {
676  int i, j;
677  int shift,filter,f0,f1;
678  int s_1,s_2;
679  int d,s,t;
680 
681  out0 += sample_offset;
682  if (channels == 1)
683  out1 = out0 + 28;
684  else
685  out1 += sample_offset;
686 
687  for(i=0;i<4;i++) {
688  shift = 12 - (in[4+i*2] & 15);
689  filter = in[4+i*2] >> 4;
691  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
692  filter=0;
693  }
694  if (shift < 0) {
695  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
696  shift = 0;
697  }
698  f0 = xa_adpcm_table[filter][0];
699  f1 = xa_adpcm_table[filter][1];
700 
701  s_1 = left->sample1;
702  s_2 = left->sample2;
703 
704  for(j=0;j<28;j++) {
705  d = in[16+i+j*4];
706 
707  t = sign_extend(d, 4);
708  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
709  s_2 = s_1;
710  s_1 = av_clip_int16(s);
711  out0[j] = s_1;
712  }
713 
714  if (channels == 2) {
715  left->sample1 = s_1;
716  left->sample2 = s_2;
717  s_1 = right->sample1;
718  s_2 = right->sample2;
719  }
720 
721  shift = 12 - (in[5+i*2] & 15);
722  filter = in[5+i*2] >> 4;
723  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
724  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
725  filter=0;
726  }
727  if (shift < 0) {
728  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
729  shift = 0;
730  }
731 
732  f0 = xa_adpcm_table[filter][0];
733  f1 = xa_adpcm_table[filter][1];
734 
735  for(j=0;j<28;j++) {
736  d = in[16+i+j*4];
737 
738  t = sign_extend(d >> 4, 4);
739  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
740  s_2 = s_1;
741  s_1 = av_clip_int16(s);
742  out1[j] = s_1;
743  }
744 
745  if (channels == 2) {
746  right->sample1 = s_1;
747  right->sample2 = s_2;
748  } else {
749  left->sample1 = s_1;
750  left->sample2 = s_2;
751  }
752 
753  out0 += 28 * (3 - channels);
754  out1 += 28 * (3 - channels);
755  }
756 
757  return 0;
758 }
759 
760 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
761 {
762  ADPCMDecodeContext *c = avctx->priv_data;
763  GetBitContext gb;
764  const int8_t *table;
765  int channels = avctx->ch_layout.nb_channels;
766  int k0, signmask, nb_bits, count;
767  int size = buf_size*8;
768  int i;
769 
770  init_get_bits(&gb, buf, size);
771 
772  //read bits & initial values
773  nb_bits = get_bits(&gb, 2)+2;
774  table = swf_index_tables[nb_bits-2];
775  k0 = 1 << (nb_bits-2);
776  signmask = 1 << (nb_bits-1);
777 
778  while (get_bits_count(&gb) <= size - 22 * channels) {
779  for (i = 0; i < channels; i++) {
780  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
781  c->status[i].step_index = get_bits(&gb, 6);
782  }
783 
784  for (count = 0; get_bits_count(&gb) <= size - nb_bits * channels && count < 4095; count++) {
785  int i;
786 
787  for (i = 0; i < channels; i++) {
788  // similar to IMA adpcm
789  int delta = get_bits(&gb, nb_bits);
790  int step = ff_adpcm_step_table[c->status[i].step_index];
791  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
792  int k = k0;
793 
794  do {
795  if (delta & k)
796  vpdiff += step;
797  step >>= 1;
798  k >>= 1;
799  } while(k);
800  vpdiff += step;
801 
802  if (delta & signmask)
803  c->status[i].predictor -= vpdiff;
804  else
805  c->status[i].predictor += vpdiff;
806 
807  c->status[i].step_index += table[delta & (~signmask)];
808 
809  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
810  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
811 
812  *samples++ = c->status[i].predictor;
813  }
814  }
815  }
816 }
817 
818 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
819 {
820  int sample = sign_extend(nibble, 4) * (1 << shift);
821 
822  if (flag)
823  sample += (8 * cs->sample1) - (4 * cs->sample2);
824  else
825  sample += 4 * cs->sample1;
826 
827  sample = av_clip_int16(sample >> 2);
828 
829  cs->sample2 = cs->sample1;
830  cs->sample1 = sample;
831 
832  return sample;
833 }
834 
835 /**
836  * Get the number of samples (per channel) that will be decoded from the packet.
837  * In one case, this is actually the maximum number of samples possible to
838  * decode with the given buf_size.
839  *
840  * @param[out] coded_samples set to the number of samples as coded in the
841  * packet, or 0 if the codec does not encode the
842  * number of samples in each frame.
843  * @param[out] approx_nb_samples set to non-zero if the number of samples
844  * returned is an approximation.
845  */
847  int buf_size, int *coded_samples, int *approx_nb_samples)
848 {
849  ADPCMDecodeContext *s = avctx->priv_data;
850  int nb_samples = 0;
851  int ch = avctx->ch_layout.nb_channels;
852  int has_coded_samples = 0;
853  int header_size;
854 
855  *coded_samples = 0;
856  *approx_nb_samples = 0;
857 
858  if(ch <= 0)
859  return 0;
860 
861  switch (avctx->codec->id) {
862  /* constant, only check buf_size */
864  if (buf_size < 76 * ch)
865  return 0;
866  nb_samples = 128;
867  break;
869  if (buf_size < 34 * ch)
870  return 0;
871  nb_samples = 64;
872  break;
873  /* simple 4-bit adpcm */
886  nb_samples = buf_size * 2 / ch;
887  break;
888  }
889  if (nb_samples)
890  return nb_samples;
891 
892  /* simple 4-bit adpcm, with header */
893  header_size = 0;
894  switch (avctx->codec->id) {
900  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
901  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
902  }
903  if (header_size > 0)
904  return (buf_size - header_size) * 2 / ch;
905 
906  /* more complex formats */
907  switch (avctx->codec->id) {
909  bytestream2_skip(gb, 4);
910  has_coded_samples = 1;
911  *coded_samples = bytestream2_get_le32u(gb);
912  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
913  bytestream2_seek(gb, -8, SEEK_CUR);
914  break;
916  /* Stereo is 30 bytes per block */
917  /* Mono is 15 bytes per block */
918  has_coded_samples = 1;
919  *coded_samples = bytestream2_get_le32(gb);
920  *coded_samples -= *coded_samples % 28;
921  nb_samples = (buf_size - 12) / (ch == 2 ? 30 : 15) * 28;
922  break;
924  has_coded_samples = 1;
925  *coded_samples = bytestream2_get_le32(gb);
926  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
927  break;
929  nb_samples = (buf_size - ch) / ch * 2;
930  break;
934  /* maximum number of samples */
935  /* has internal offsets and a per-frame switch to signal raw 16-bit */
936  has_coded_samples = 1;
937  switch (avctx->codec->id) {
939  header_size = 4 + 9 * ch;
940  *coded_samples = bytestream2_get_le32(gb);
941  break;
943  header_size = 4 + 5 * ch;
944  *coded_samples = bytestream2_get_le32(gb);
945  break;
947  header_size = 4 + 5 * ch;
948  *coded_samples = bytestream2_get_be32(gb);
949  break;
950  }
951  *coded_samples -= *coded_samples % 28;
952  nb_samples = (buf_size - header_size) * 2 / ch;
953  nb_samples -= nb_samples % 28;
954  *approx_nb_samples = 1;
955  break;
957  if (avctx->block_align > 0)
958  buf_size = FFMIN(buf_size, avctx->block_align);
959  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
960  break;
962  if (avctx->block_align > 0)
963  buf_size = FFMIN(buf_size, avctx->block_align);
964  if (buf_size < 4 * ch)
965  return AVERROR_INVALIDDATA;
966  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
967  break;
969  if (avctx->block_align > 0)
970  buf_size = FFMIN(buf_size, avctx->block_align);
971  nb_samples = (buf_size - 4 * ch) * 2 / ch;
972  break;
973  CASE(ADPCM_IMA_WAV,
974  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
975  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
976  if (avctx->block_align > 0)
977  buf_size = FFMIN(buf_size, avctx->block_align);
978  if (buf_size < 4 * ch)
979  return AVERROR_INVALIDDATA;
980  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
981  ) /* End of CASE */
983  if (avctx->block_align > 0)
984  buf_size = FFMIN(buf_size, avctx->block_align);
985  nb_samples = (buf_size - 6 * ch) * 2 / ch;
986  break;
988  if (avctx->block_align > 0)
989  buf_size = FFMIN(buf_size, avctx->block_align);
990  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
991  break;
995  {
996  int samples_per_byte;
997  switch (avctx->codec->id) {
998  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
999  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
1000  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
1001  }
1002  if (!s->status[0].step_index) {
1003  if (buf_size < ch)
1004  return AVERROR_INVALIDDATA;
1005  nb_samples++;
1006  buf_size -= ch;
1007  }
1008  nb_samples += buf_size * samples_per_byte / ch;
1009  break;
1010  }
1011  case AV_CODEC_ID_ADPCM_SWF:
1012  {
1013  int buf_bits = buf_size * 8 - 2;
1014  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
1015  int block_hdr_size = 22 * ch;
1016  int block_size = block_hdr_size + nbits * ch * 4095;
1017  int nblocks = buf_bits / block_size;
1018  int bits_left = buf_bits - nblocks * block_size;
1019  nb_samples = nblocks * 4096;
1020  if (bits_left >= block_hdr_size)
1021  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
1022  break;
1023  }
1024  case AV_CODEC_ID_ADPCM_THP:
1026  if (avctx->extradata) {
1027  nb_samples = buf_size * 14 / (8 * ch);
1028  break;
1029  }
1030  has_coded_samples = 1;
1031  bytestream2_skip(gb, 4); // channel size
1032  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
1033  bytestream2_get_le32(gb) :
1034  bytestream2_get_be32(gb);
1035  buf_size -= 8 + 36 * ch;
1036  buf_size /= ch;
1037  nb_samples = buf_size / 8 * 14;
1038  if (buf_size % 8 > 1)
1039  nb_samples += (buf_size % 8 - 1) * 2;
1040  *approx_nb_samples = 1;
1041  break;
1042  case AV_CODEC_ID_ADPCM_AFC:
1043  nb_samples = buf_size / (9 * ch) * 16;
1044  break;
1045  case AV_CODEC_ID_ADPCM_XA:
1046  nb_samples = (buf_size / 128) * 224 / ch;
1047  break;
1048  case AV_CODEC_ID_ADPCM_XMD:
1049  nb_samples = buf_size / (21 * ch) * 32;
1050  break;
1051  case AV_CODEC_ID_ADPCM_DTK:
1052  case AV_CODEC_ID_ADPCM_PSX:
1053  nb_samples = buf_size / (16 * ch) * 28;
1054  break;
1056  nb_samples = buf_size / avctx->block_align * 32;
1057  break;
1059  nb_samples = buf_size / ch;
1060  break;
1061  }
1062 
1063  /* validate coded sample count */
1064  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
1065  return AVERROR_INVALIDDATA;
1066 
1067  return nb_samples;
1068 }
1069 
1071  int *got_frame_ptr, AVPacket *avpkt)
1072 {
1073  const uint8_t *buf = avpkt->data;
1074  int buf_size = avpkt->size;
1075  ADPCMDecodeContext *c = avctx->priv_data;
1076  int channels = avctx->ch_layout.nb_channels;
1077  int16_t *samples;
1078  int16_t **samples_p;
1079  int st; /* stereo */
1080  int nb_samples, coded_samples, approx_nb_samples, ret;
1081  GetByteContext gb;
1082 
1083  bytestream2_init(&gb, buf, buf_size);
1084  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
1085  if (nb_samples <= 0) {
1086  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
1087  return AVERROR_INVALIDDATA;
1088  }
1089 
1090  /* get output buffer */
1091  frame->nb_samples = nb_samples;
1092  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1093  return ret;
1094  samples = (int16_t *)frame->data[0];
1095  samples_p = (int16_t **)frame->extended_data;
1096 
1097  /* use coded_samples when applicable */
1098  /* it is always <= nb_samples, so the output buffer will be large enough */
1099  if (coded_samples) {
1100  if (!approx_nb_samples && coded_samples != nb_samples)
1101  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
1102  frame->nb_samples = nb_samples = coded_samples;
1103  }
1104 
1105  st = channels == 2 ? 1 : 0;
1106 
1107  switch(avctx->codec->id) {
1108  CASE(ADPCM_IMA_QT,
1109  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
1110  Channel data is interleaved per-chunk. */
1111  for (int channel = 0; channel < channels; channel++) {
1112  ADPCMChannelStatus *cs = &c->status[channel];
1113  int predictor;
1114  int step_index;
1115  /* (pppppp) (piiiiiii) */
1116 
1117  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
1118  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1119  step_index = predictor & 0x7F;
1120  predictor &= ~0x7F;
1121 
1122  if (cs->step_index == step_index) {
1123  int diff = predictor - cs->predictor;
1124  if (diff < 0)
1125  diff = - diff;
1126  if (diff > 0x7f)
1127  goto update;
1128  } else {
1129  update:
1130  cs->step_index = step_index;
1131  cs->predictor = predictor;
1132  }
1133 
1134  if (cs->step_index > 88u){
1135  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1136  channel, cs->step_index);
1137  return AVERROR_INVALIDDATA;
1138  }
1139 
1140  samples = samples_p[channel];
1141 
1142  for (int m = 0; m < 64; m += 2) {
1143  int byte = bytestream2_get_byteu(&gb);
1144  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1145  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1146  }
1147  }
1148  ) /* End of CASE */
1149  CASE(ADPCM_IMA_WAV,
1150  for (int i = 0; i < channels; i++) {
1151  ADPCMChannelStatus *cs = &c->status[i];
1152  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1153 
1154  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1155  if (cs->step_index > 88u){
1156  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1157  i, cs->step_index);
1158  return AVERROR_INVALIDDATA;
1159  }
1160  }
1161 
1162  if (avctx->bits_per_coded_sample != 4) {
1163  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1164  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1165  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1166  GetBitContext g;
1167 
1168  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1169  for (int i = 0; i < channels; i++) {
1170  ADPCMChannelStatus *cs = &c->status[i];
1171  samples = &samples_p[i][1 + n * samples_per_block];
1172  for (int j = 0; j < block_size; j++) {
1173  temp[j] = buf[4 * channels + block_size * n * channels +
1174  (j % 4) + (j / 4) * (channels * 4) + i * 4];
1175  }
1176  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1177  if (ret < 0)
1178  return ret;
1179  for (int m = 0; m < samples_per_block; m++) {
1181  avctx->bits_per_coded_sample);
1182  }
1183  }
1184  }
1185  bytestream2_skip(&gb, avctx->block_align - channels * 4);
1186  } else {
1187  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
1188  for (int i = 0; i < channels; i++) {
1189  ADPCMChannelStatus *cs = &c->status[i];
1190  samples = &samples_p[i][1 + n * 8];
1191  for (int m = 0; m < 8; m += 2) {
1192  int v = bytestream2_get_byteu(&gb);
1193  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1194  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1195  }
1196  }
1197  }
1198  }
1199  ) /* End of CASE */
1200  CASE(ADPCM_4XM,
1201  for (int i = 0; i < channels; i++)
1202  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1203 
1204  for (int i = 0; i < channels; i++) {
1205  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1206  if (c->status[i].step_index > 88u) {
1207  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1208  i, c->status[i].step_index);
1209  return AVERROR_INVALIDDATA;
1210  }
1211  }
1212 
1213  for (int i = 0; i < channels; i++) {
1214  ADPCMChannelStatus *cs = &c->status[i];
1215  samples = (int16_t *)frame->data[i];
1216  for (int n = nb_samples >> 1; n > 0; n--) {
1217  int v = bytestream2_get_byteu(&gb);
1218  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1219  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1220  }
1221  }
1222  ) /* End of CASE */
1223  CASE(ADPCM_AGM,
1224  for (int i = 0; i < channels; i++)
1225  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1226  for (int i = 0; i < channels; i++)
1227  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1228 
1229  for (int n = 0; n < nb_samples >> (1 - st); n++) {
1230  int v = bytestream2_get_byteu(&gb);
1231  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1232  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1233  }
1234  ) /* End of CASE */
1235  CASE(ADPCM_MS,
1236  int block_predictor;
1237 
1238  if (avctx->ch_layout.nb_channels > 2) {
1239  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
1240  samples = samples_p[channel];
1241  block_predictor = bytestream2_get_byteu(&gb);
1242  if (block_predictor > 6) {
1243  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1244  channel, block_predictor);
1245  return AVERROR_INVALIDDATA;
1246  }
1247  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1248  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1249  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1250  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1251  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1252  *samples++ = c->status[channel].sample2;
1253  *samples++ = c->status[channel].sample1;
1254  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
1255  int byte = bytestream2_get_byteu(&gb);
1256  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1257  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1258  }
1259  }
1260  } else {
1261  block_predictor = bytestream2_get_byteu(&gb);
1262  if (block_predictor > 6) {
1263  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1264  block_predictor);
1265  return AVERROR_INVALIDDATA;
1266  }
1267  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1268  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1269  if (st) {
1270  block_predictor = bytestream2_get_byteu(&gb);
1271  if (block_predictor > 6) {
1272  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1273  block_predictor);
1274  return AVERROR_INVALIDDATA;
1275  }
1276  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1277  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1278  }
1279  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1280  if (st){
1281  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1282  }
1283 
1284  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1285  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1286  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1287  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1288 
1289  *samples++ = c->status[0].sample2;
1290  if (st) *samples++ = c->status[1].sample2;
1291  *samples++ = c->status[0].sample1;
1292  if (st) *samples++ = c->status[1].sample1;
1293  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1294  int byte = bytestream2_get_byteu(&gb);
1295  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1296  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1297  }
1298  }
1299  ) /* End of CASE */
1300  CASE(ADPCM_MTAF,
1301  for (int channel = 0; channel < channels; channel += 2) {
1302  bytestream2_skipu(&gb, 4);
1303  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1304  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1305  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1306  bytestream2_skipu(&gb, 2);
1307  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1308  bytestream2_skipu(&gb, 2);
1309  for (int n = 0; n < nb_samples; n += 2) {
1310  int v = bytestream2_get_byteu(&gb);
1311  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1312  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1313  }
1314  for (int n = 0; n < nb_samples; n += 2) {
1315  int v = bytestream2_get_byteu(&gb);
1316  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1317  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1318  }
1319  }
1320  ) /* End of CASE */
1321  CASE(ADPCM_IMA_DK4,
1322  for (int channel = 0; channel < channels; channel++) {
1323  ADPCMChannelStatus *cs = &c->status[channel];
1324  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1325  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1326  if (cs->step_index > 88u){
1327  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1328  channel, cs->step_index);
1329  return AVERROR_INVALIDDATA;
1330  }
1331  }
1332  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1333  int v = bytestream2_get_byteu(&gb);
1334  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1335  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1336  }
1337  ) /* End of CASE */
1338 
1339  /* DK3 ADPCM support macro */
1340 #define DK3_GET_NEXT_NIBBLE() \
1341  if (decode_top_nibble_next) { \
1342  nibble = last_byte >> 4; \
1343  decode_top_nibble_next = 0; \
1344  } else { \
1345  last_byte = bytestream2_get_byteu(&gb); \
1346  nibble = last_byte & 0x0F; \
1347  decode_top_nibble_next = 1; \
1348  }
1349  CASE(ADPCM_IMA_DK3,
1350  int last_byte = 0;
1351  int nibble;
1352  int decode_top_nibble_next = 0;
1353  int diff_channel;
1354  const int16_t *samples_end = samples + channels * nb_samples;
1355 
1356  bytestream2_skipu(&gb, 10);
1357  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1358  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1359  c->status[0].step_index = bytestream2_get_byteu(&gb);
1360  c->status[1].step_index = bytestream2_get_byteu(&gb);
1361  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1362  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1363  c->status[0].step_index, c->status[1].step_index);
1364  return AVERROR_INVALIDDATA;
1365  }
1366  /* sign extend the predictors */
1367  diff_channel = c->status[1].predictor;
1368 
1369  while (samples < samples_end) {
1370 
1371  /* for this algorithm, c->status[0] is the sum channel and
1372  * c->status[1] is the diff channel */
1373 
1374  /* process the first predictor of the sum channel */
1376  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1377 
1378  /* process the diff channel predictor */
1380  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1381 
1382  /* process the first pair of stereo PCM samples */
1383  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1384  *samples++ = c->status[0].predictor + c->status[1].predictor;
1385  *samples++ = c->status[0].predictor - c->status[1].predictor;
1386 
1387  /* process the second predictor of the sum channel */
1389  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1390 
1391  /* process the second pair of stereo PCM samples */
1392  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1393  *samples++ = c->status[0].predictor + c->status[1].predictor;
1394  *samples++ = c->status[0].predictor - c->status[1].predictor;
1395  }
1396 
1397  if ((bytestream2_tell(&gb) & 1))
1398  bytestream2_skip(&gb, 1);
1399  ) /* End of CASE */
1400  CASE(ADPCM_IMA_ISS,
1401  for (int channel = 0; channel < channels; channel++) {
1402  ADPCMChannelStatus *cs = &c->status[channel];
1403  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1404  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1405  if (cs->step_index > 88u){
1406  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1407  channel, cs->step_index);
1408  return AVERROR_INVALIDDATA;
1409  }
1410  }
1411 
1412  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1413  int v1, v2;
1414  int v = bytestream2_get_byteu(&gb);
1415  /* nibbles are swapped for mono */
1416  if (st) {
1417  v1 = v >> 4;
1418  v2 = v & 0x0F;
1419  } else {
1420  v2 = v >> 4;
1421  v1 = v & 0x0F;
1422  }
1423  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1424  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1425  }
1426  ) /* End of CASE */
1427  CASE(ADPCM_IMA_MOFLEX,
1428  for (int channel = 0; channel < channels; channel++) {
1429  ADPCMChannelStatus *cs = &c->status[channel];
1430  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1431  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1432  if (cs->step_index > 88u){
1433  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1434  channel, cs->step_index);
1435  return AVERROR_INVALIDDATA;
1436  }
1437  }
1438 
1439  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1440  for (int channel = 0; channel < channels; channel++) {
1441  samples = samples_p[channel] + 256 * subframe;
1442  for (int n = 0; n < 256; n += 2) {
1443  int v = bytestream2_get_byteu(&gb);
1444  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1445  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1446  }
1447  }
1448  }
1449  ) /* End of CASE */
1450  CASE(ADPCM_IMA_DAT4,
1451  for (int channel = 0; channel < channels; channel++) {
1452  ADPCMChannelStatus *cs = &c->status[channel];
1453  samples = samples_p[channel];
1454  bytestream2_skip(&gb, 4);
1455  for (int n = 0; n < nb_samples; n += 2) {
1456  int v = bytestream2_get_byteu(&gb);
1457  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1458  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1459  }
1460  }
1461  ) /* End of CASE */
1462  CASE(ADPCM_IMA_APC,
1463  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1464  int v = bytestream2_get_byteu(&gb);
1465  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1466  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1467  }
1468  ) /* End of CASE */
1469  CASE(ADPCM_IMA_SSI,
1470  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1471  int v = bytestream2_get_byteu(&gb);
1472  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1473  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1474  }
1475  ) /* End of CASE */
1476  CASE(ADPCM_IMA_APM,
1477  for (int n = nb_samples / 2; n > 0; n--) {
1478  for (int channel = 0; channel < channels; channel++) {
1479  int v = bytestream2_get_byteu(&gb);
1480  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1481  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1482  }
1483  samples += channels;
1484  }
1485  ) /* End of CASE */
1486  CASE(ADPCM_IMA_ALP,
1487  for (int n = nb_samples / 2; n > 0; n--) {
1488  for (int channel = 0; channel < channels; channel++) {
1489  int v = bytestream2_get_byteu(&gb);
1490  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1491  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1492  }
1493  samples += channels;
1494  }
1495  ) /* End of CASE */
1496  CASE(ADPCM_IMA_CUNNING,
1497  for (int channel = 0; channel < channels; channel++) {
1498  int16_t *smp = samples_p[channel];
1499  for (int n = 0; n < nb_samples / 2; n++) {
1500  int v = bytestream2_get_byteu(&gb);
1501  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1502  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1503  }
1504  }
1505  ) /* End of CASE */
1506  CASE(ADPCM_IMA_OKI,
1507  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1508  int v = bytestream2_get_byteu(&gb);
1509  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1510  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1511  }
1512  ) /* End of CASE */
1513  CASE(ADPCM_IMA_RAD,
1514  for (int channel = 0; channel < channels; channel++) {
1515  ADPCMChannelStatus *cs = &c->status[channel];
1516  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1517  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1518  if (cs->step_index > 88u){
1519  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1520  channel, cs->step_index);
1521  return AVERROR_INVALIDDATA;
1522  }
1523  }
1524  for (int n = 0; n < nb_samples / 2; n++) {
1525  int byte[2];
1526 
1527  byte[0] = bytestream2_get_byteu(&gb);
1528  if (st)
1529  byte[1] = bytestream2_get_byteu(&gb);
1530  for (int channel = 0; channel < channels; channel++) {
1531  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1532  }
1533  for (int channel = 0; channel < channels; channel++) {
1534  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1535  }
1536  }
1537  ) /* End of CASE */
1538  CASE(ADPCM_IMA_WS,
1539  if (c->vqa_version == 3) {
1540  for (int channel = 0; channel < channels; channel++) {
1541  int16_t *smp = samples_p[channel];
1542 
1543  for (int n = nb_samples / 2; n > 0; n--) {
1544  int v = bytestream2_get_byteu(&gb);
1545  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1546  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1547  }
1548  }
1549  } else {
1550  for (int n = nb_samples / 2; n > 0; n--) {
1551  for (int channel = 0; channel < channels; channel++) {
1552  int v = bytestream2_get_byteu(&gb);
1553  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1554  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1555  }
1556  samples += channels;
1557  }
1558  }
1559  bytestream2_seek(&gb, 0, SEEK_END);
1560  ) /* End of CASE */
1561  CASE(ADPCM_XMD,
1562  int bytes_remaining, block = 0;
1563  while (bytestream2_get_bytes_left(&gb) >= 21 * channels) {
1564  for (int channel = 0; channel < channels; channel++) {
1565  int16_t *out = samples_p[channel] + block * 32;
1566  int16_t history[2];
1567  uint16_t scale;
1568 
1569  history[1] = sign_extend(bytestream2_get_le16(&gb), 16);
1570  history[0] = sign_extend(bytestream2_get_le16(&gb), 16);
1571  scale = bytestream2_get_le16(&gb);
1572 
1573  out[0] = history[1];
1574  out[1] = history[0];
1575 
1576  for (int n = 0; n < 15; n++) {
1577  unsigned byte = bytestream2_get_byte(&gb);
1578  int32_t nibble[2];
1579 
1580  nibble[0] = sign_extend(byte & 15, 4);
1581  nibble[1] = sign_extend(byte >> 4, 4);
1582 
1583  out[2+n*2] = nibble[0]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1584  history[1] = history[0];
1585  history[0] = out[2+n*2];
1586 
1587  out[2+n*2+1] = nibble[1]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1588  history[1] = history[0];
1589  history[0] = out[2+n*2+1];
1590  }
1591  }
1592 
1593  block++;
1594  }
1595  bytes_remaining = bytestream2_get_bytes_left(&gb);
1596  if (bytes_remaining > 0) {
1597  bytestream2_skip(&gb, bytes_remaining);
1598  }
1599  ) /* End of CASE */
1600  CASE(ADPCM_XA,
1601  int16_t *out0 = samples_p[0];
1602  int16_t *out1 = samples_p[1];
1603  int samples_per_block = 28 * (3 - channels) * 4;
1604  int sample_offset = 0;
1605  int bytes_remaining;
1606  while (bytestream2_get_bytes_left(&gb) >= 128) {
1607  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1608  &c->status[0], &c->status[1],
1609  channels, sample_offset)) < 0)
1610  return ret;
1611  bytestream2_skipu(&gb, 128);
1612  sample_offset += samples_per_block;
1613  }
1614  /* Less than a full block of data left, e.g. when reading from
1615  * 2324 byte per sector XA; the remainder is padding */
1616  bytes_remaining = bytestream2_get_bytes_left(&gb);
1617  if (bytes_remaining > 0) {
1618  bytestream2_skip(&gb, bytes_remaining);
1619  }
1620  ) /* End of CASE */
1621  CASE(ADPCM_IMA_EA_EACS,
1622  for (int i = 0; i <= st; i++) {
1623  c->status[i].step_index = bytestream2_get_le32u(&gb);
1624  if (c->status[i].step_index > 88u) {
1625  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1626  i, c->status[i].step_index);
1627  return AVERROR_INVALIDDATA;
1628  }
1629  }
1630  for (int i = 0; i <= st; i++) {
1631  c->status[i].predictor = bytestream2_get_le32u(&gb);
1632  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1633  return AVERROR_INVALIDDATA;
1634  }
1635 
1636  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1637  int byte = bytestream2_get_byteu(&gb);
1638  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1639  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1640  }
1641  ) /* End of CASE */
1642  CASE(ADPCM_IMA_EA_SEAD,
1643  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1644  int byte = bytestream2_get_byteu(&gb);
1645  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1646  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1647  }
1648  ) /* End of CASE */
1649  CASE(ADPCM_EA,
1650  int previous_left_sample, previous_right_sample;
1651  int current_left_sample, current_right_sample;
1652  int next_left_sample, next_right_sample;
1653  int coeff1l, coeff2l, coeff1r, coeff2r;
1654  int shift_left, shift_right;
1655 
1656  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte (stereo) or 15-byte (mono) pieces,
1657  each coding 28 stereo/mono samples. */
1658 
1659  if (channels != 2 && channels != 1)
1660  return AVERROR_INVALIDDATA;
1661 
1662  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1663  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1664  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1665  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1666 
1667  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
1668  int byte = bytestream2_get_byteu(&gb);
1669  coeff1l = ea_adpcm_table[ byte >> 4 ];
1670  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1671  coeff1r = ea_adpcm_table[ byte & 0x0F];
1672  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1673 
1674  if (channels == 2){
1675  byte = bytestream2_get_byteu(&gb);
1676  shift_left = 20 - (byte >> 4);
1677  shift_right = 20 - (byte & 0x0F);
1678  } else{
1679  /* Mono packs the shift into the coefficient byte's lower nibble instead */
1680  shift_left = 20 - (byte & 0x0F);
1681  }
1682 
1683  for (int count2 = 0; count2 < (channels == 2 ? 28 : 14); count2++) {
1684  byte = bytestream2_get_byteu(&gb);
1685  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1686 
1687  next_left_sample = (next_left_sample +
1688  (current_left_sample * coeff1l) +
1689  (previous_left_sample * coeff2l) + 0x80) >> 8;
1690 
1691  previous_left_sample = current_left_sample;
1692  current_left_sample = av_clip_int16(next_left_sample);
1693  *samples++ = current_left_sample;
1694 
1695  if (channels == 2){
1696  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1697 
1698  next_right_sample = (next_right_sample +
1699  (current_right_sample * coeff1r) +
1700  (previous_right_sample * coeff2r) + 0x80) >> 8;
1701 
1702  previous_right_sample = current_right_sample;
1703  current_right_sample = av_clip_int16(next_right_sample);
1704  *samples++ = current_right_sample;
1705  } else {
1706  next_left_sample = sign_extend(byte, 4) * (1 << shift_left);
1707 
1708  next_left_sample = (next_left_sample +
1709  (current_left_sample * coeff1l) +
1710  (previous_left_sample * coeff2l) + 0x80) >> 8;
1711 
1712  previous_left_sample = current_left_sample;
1713  current_left_sample = av_clip_int16(next_left_sample);
1714 
1715  *samples++ = current_left_sample;
1716  }
1717  }
1718  }
1719  bytestream2_skip(&gb, channels == 2 ? 2 : 3); // Skip terminating NULs
1720  ) /* End of CASE */
1721  CASE(ADPCM_EA_MAXIS_XA,
1722  int coeff[2][2], shift[2];
1723 
1724  for (int channel = 0; channel < channels; channel++) {
1725  int byte = bytestream2_get_byteu(&gb);
1726  for (int i = 0; i < 2; i++)
1727  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1728  shift[channel] = 20 - (byte & 0x0F);
1729  }
1730  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
1731  int byte[2];
1732 
1733  byte[0] = bytestream2_get_byteu(&gb);
1734  if (st) byte[1] = bytestream2_get_byteu(&gb);
1735  for (int i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1736  for (int channel = 0; channel < channels; channel++) {
1737  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1738  sample = (sample +
1739  c->status[channel].sample1 * coeff[channel][0] +
1740  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1741  c->status[channel].sample2 = c->status[channel].sample1;
1742  c->status[channel].sample1 = av_clip_int16(sample);
1743  *samples++ = c->status[channel].sample1;
1744  }
1745  }
1746  }
1747  bytestream2_seek(&gb, 0, SEEK_END);
1748  ) /* End of CASE */
1749 #if CONFIG_ADPCM_EA_R1_DECODER || CONFIG_ADPCM_EA_R2_DECODER || CONFIG_ADPCM_EA_R3_DECODER
1752  case AV_CODEC_ID_ADPCM_EA_R3: {
1753  /* channel numbering
1754  2chan: 0=fl, 1=fr
1755  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1756  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1757  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1758  int previous_sample, current_sample, next_sample;
1759  int coeff1, coeff2;
1760  int shift;
1761  uint16_t *samplesC;
1762  int count = 0;
1763  int offsets[6];
1764 
1765  for (unsigned channel = 0; channel < channels; channel++)
1766  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1767  bytestream2_get_le32(&gb)) +
1768  (channels + 1) * 4;
1769 
1770  for (unsigned channel = 0; channel < channels; channel++) {
1771  int count1;
1772 
1773  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1774  samplesC = samples_p[channel];
1775 
1776  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1777  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1778  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1779  } else {
1780  current_sample = c->status[channel].predictor;
1781  previous_sample = c->status[channel].prev_sample;
1782  }
1783 
1784  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1785  int byte = bytestream2_get_byte(&gb);
1786  if (byte == 0xEE) { /* only seen in R2 and R3 */
1787  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1788  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1789 
1790  for (int count2 = 0; count2 < 28; count2++)
1791  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1792  } else {
1793  coeff1 = ea_adpcm_table[ byte >> 4 ];
1794  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1795  shift = 20 - (byte & 0x0F);
1796 
1797  for (int count2 = 0; count2 < 28; count2++) {
1798  if (count2 & 1)
1799  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1800  else {
1801  byte = bytestream2_get_byte(&gb);
1802  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1803  }
1804 
1805  next_sample += (current_sample * coeff1) +
1806  (previous_sample * coeff2);
1807  next_sample = av_clip_int16(next_sample >> 8);
1808 
1809  previous_sample = current_sample;
1810  current_sample = next_sample;
1811  *samplesC++ = current_sample;
1812  }
1813  }
1814  }
1815  if (!count) {
1816  count = count1;
1817  } else if (count != count1) {
1818  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1819  count = FFMAX(count, count1);
1820  }
1821 
1822  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1823  c->status[channel].predictor = current_sample;
1824  c->status[channel].prev_sample = previous_sample;
1825  }
1826  }
1827 
1828  frame->nb_samples = count * 28;
1829  bytestream2_seek(&gb, 0, SEEK_END);
1830  break;
1831  }
1832 #endif /* CONFIG_ADPCM_EA_Rx_DECODER */
1833  CASE(ADPCM_EA_XAS,
1834  for (int channel=0; channel < channels; channel++) {
1835  int coeff[2][4], shift[4];
1836  int16_t *s = samples_p[channel];
1837  for (int n = 0; n < 4; n++, s += 32) {
1838  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1839  for (int i = 0; i < 2; i++)
1840  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1841  s[0] = val & ~0x0F;
1842 
1843  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1844  shift[n] = 20 - (val & 0x0F);
1845  s[1] = val & ~0x0F;
1846  }
1847 
1848  for (int m = 2; m < 32; m += 2) {
1849  s = &samples_p[channel][m];
1850  for (int n = 0; n < 4; n++, s += 32) {
1851  int level, pred;
1852  int byte = bytestream2_get_byteu(&gb);
1853 
1854  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1855  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1856  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1857 
1858  level = sign_extend(byte, 4) * (1 << shift[n]);
1859  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1860  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1861  }
1862  }
1863  }
1864  ) /* End of CASE */
1865  CASE(ADPCM_IMA_ACORN,
1866  for (int channel = 0; channel < channels; channel++) {
1867  ADPCMChannelStatus *cs = &c->status[channel];
1868  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1869  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
1870  if (cs->step_index > 88u){
1871  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1872  channel, cs->step_index);
1873  return AVERROR_INVALIDDATA;
1874  }
1875  }
1876  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1877  int byte = bytestream2_get_byteu(&gb);
1878  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
1879  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
1880  }
1881  ) /* End of CASE */
1882  CASE(ADPCM_IMA_AMV,
1883  av_assert0(channels == 1);
1884 
1885  /*
1886  * Header format:
1887  * int16_t predictor;
1888  * uint8_t step_index;
1889  * uint8_t reserved;
1890  * uint32_t frame_size;
1891  *
1892  * Some implementations have step_index as 16-bits, but others
1893  * only use the lower 8 and store garbage in the upper 8.
1894  */
1895  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1896  c->status[0].step_index = bytestream2_get_byteu(&gb);
1897  bytestream2_skipu(&gb, 5);
1898  if (c->status[0].step_index > 88u) {
1899  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1900  c->status[0].step_index);
1901  return AVERROR_INVALIDDATA;
1902  }
1903 
1904  for (int n = nb_samples >> 1; n > 0; n--) {
1905  int v = bytestream2_get_byteu(&gb);
1906 
1907  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1908  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1909  }
1910 
1911  if (nb_samples & 1) {
1912  int v = bytestream2_get_byteu(&gb);
1913  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1914 
1915  if (v & 0x0F) {
1916  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1917  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1918  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1919  }
1920  }
1921  ) /* End of CASE */
1922  CASE(ADPCM_IMA_SMJPEG,
1923  for (int i = 0; i < channels; i++) {
1924  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1925  c->status[i].step_index = bytestream2_get_byteu(&gb);
1926  bytestream2_skipu(&gb, 1);
1927  if (c->status[i].step_index > 88u) {
1928  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1929  c->status[i].step_index);
1930  return AVERROR_INVALIDDATA;
1931  }
1932  }
1933 
1934  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1935  int v = bytestream2_get_byteu(&gb);
1936 
1937  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1938  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1939  }
1940  ) /* End of CASE */
1941  CASE(ADPCM_CT,
1942  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1943  int v = bytestream2_get_byteu(&gb);
1944  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1945  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1946  }
1947  ) /* End of CASE */
1948 #if CONFIG_ADPCM_SBPRO_2_DECODER || CONFIG_ADPCM_SBPRO_3_DECODER || \
1949  CONFIG_ADPCM_SBPRO_4_DECODER
1953  if (!c->status[0].step_index) {
1954  /* the first byte is a raw sample */
1955  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1956  if (st)
1957  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1958  c->status[0].step_index = 1;
1959  nb_samples--;
1960  }
1961  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1962  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1963  int byte = bytestream2_get_byteu(&gb);
1964  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1965  byte >> 4, 4, 0);
1966  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1967  byte & 0x0F, 4, 0);
1968  }
1969  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1970  for (int n = (nb_samples<<st) / 3; n > 0; n--) {
1971  int byte = bytestream2_get_byteu(&gb);
1972  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1973  byte >> 5 , 3, 0);
1974  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1975  (byte >> 2) & 0x07, 3, 0);
1976  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1977  byte & 0x03, 2, 0);
1978  }
1979  } else {
1980  for (int n = nb_samples >> (2 - st); n > 0; n--) {
1981  int byte = bytestream2_get_byteu(&gb);
1982  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1983  byte >> 6 , 2, 2);
1984  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1985  (byte >> 4) & 0x03, 2, 2);
1986  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1987  (byte >> 2) & 0x03, 2, 2);
1988  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1989  byte & 0x03, 2, 2);
1990  }
1991  }
1992  break;
1993 #endif /* CONFIG_ADPCM_SBPRO_x_DECODER */
1994  CASE(ADPCM_SWF,
1995  adpcm_swf_decode(avctx, buf, buf_size, samples);
1996  bytestream2_seek(&gb, 0, SEEK_END);
1997  ) /* End of CASE */
1998  CASE(ADPCM_YAMAHA,
1999  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2000  int v = bytestream2_get_byteu(&gb);
2001  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
2002  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
2003  }
2004  ) /* End of CASE */
2005  CASE(ADPCM_AICA,
2006  for (int channel = 0; channel < channels; channel++) {
2007  samples = samples_p[channel];
2008  for (int n = nb_samples >> 1; n > 0; n--) {
2009  int v = bytestream2_get_byteu(&gb);
2010  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
2011  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
2012  }
2013  }
2014  ) /* End of CASE */
2015  CASE(ADPCM_AFC,
2016  int samples_per_block;
2017  int blocks;
2018 
2019  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
2020  samples_per_block = avctx->extradata[0] / 16;
2021  blocks = nb_samples / avctx->extradata[0];
2022  } else {
2023  samples_per_block = nb_samples / 16;
2024  blocks = 1;
2025  }
2026 
2027  for (int m = 0; m < blocks; m++) {
2028  for (int channel = 0; channel < channels; channel++) {
2029  int prev1 = c->status[channel].sample1;
2030  int prev2 = c->status[channel].sample2;
2031 
2032  samples = samples_p[channel] + m * 16;
2033  /* Read in every sample for this channel. */
2034  for (int i = 0; i < samples_per_block; i++) {
2035  int byte = bytestream2_get_byteu(&gb);
2036  int scale = 1 << (byte >> 4);
2037  int index = byte & 0xf;
2038  int factor1 = afc_coeffs[0][index];
2039  int factor2 = afc_coeffs[1][index];
2040 
2041  /* Decode 16 samples. */
2042  for (int n = 0; n < 16; n++) {
2043  int32_t sampledat;
2044 
2045  if (n & 1) {
2046  sampledat = sign_extend(byte, 4);
2047  } else {
2048  byte = bytestream2_get_byteu(&gb);
2049  sampledat = sign_extend(byte >> 4, 4);
2050  }
2051 
2052  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
2053  sampledat * scale;
2054  *samples = av_clip_int16(sampledat);
2055  prev2 = prev1;
2056  prev1 = *samples++;
2057  }
2058  }
2059 
2060  c->status[channel].sample1 = prev1;
2061  c->status[channel].sample2 = prev2;
2062  }
2063  }
2064  bytestream2_seek(&gb, 0, SEEK_END);
2065  ) /* End of CASE */
2066 #if CONFIG_ADPCM_THP_DECODER || CONFIG_ADPCM_THP_LE_DECODER
2067  case AV_CODEC_ID_ADPCM_THP:
2069  {
2070  int table[14][16];
2071 
2072 #define THP_GET16(g) \
2073  sign_extend( \
2074  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
2075  bytestream2_get_le16u(&(g)) : \
2076  bytestream2_get_be16u(&(g)), 16)
2077 
2078  if (avctx->extradata) {
2079  GetByteContext tb;
2080  if (avctx->extradata_size < 32 * channels) {
2081  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2082  return AVERROR_INVALIDDATA;
2083  }
2084 
2085  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
2086  for (int i = 0; i < channels; i++)
2087  for (int n = 0; n < 16; n++)
2088  table[i][n] = THP_GET16(tb);
2089  } else {
2090  for (int i = 0; i < channels; i++)
2091  for (int n = 0; n < 16; n++)
2092  table[i][n] = THP_GET16(gb);
2093 
2094  if (!c->has_status) {
2095  /* Initialize the previous sample. */
2096  for (int i = 0; i < channels; i++) {
2097  c->status[i].sample1 = THP_GET16(gb);
2098  c->status[i].sample2 = THP_GET16(gb);
2099  }
2100  c->has_status = 1;
2101  } else {
2102  bytestream2_skip(&gb, channels * 4);
2103  }
2104  }
2105 
2106  for (int ch = 0; ch < channels; ch++) {
2107  samples = samples_p[ch];
2108 
2109  /* Read in every sample for this channel. */
2110  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2111  int byte = bytestream2_get_byteu(&gb);
2112  int index = (byte >> 4) & 7;
2113  unsigned int exp = byte & 0x0F;
2114  int64_t factor1 = table[ch][index * 2];
2115  int64_t factor2 = table[ch][index * 2 + 1];
2116 
2117  /* Decode 14 samples. */
2118  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2119  int32_t sampledat;
2120 
2121  if (n & 1) {
2122  sampledat = sign_extend(byte, 4);
2123  } else {
2124  byte = bytestream2_get_byteu(&gb);
2125  sampledat = sign_extend(byte >> 4, 4);
2126  }
2127 
2128  sampledat = ((c->status[ch].sample1 * factor1
2129  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
2130  *samples = av_clip_int16(sampledat);
2131  c->status[ch].sample2 = c->status[ch].sample1;
2132  c->status[ch].sample1 = *samples++;
2133  }
2134  }
2135  }
2136  break;
2137  }
2138 #endif /* CONFIG_ADPCM_THP(_LE)_DECODER */
2139  CASE(ADPCM_DTK,
2140  for (int channel = 0; channel < channels; channel++) {
2141  samples = samples_p[channel];
2142 
2143  /* Read in every sample for this channel. */
2144  for (int i = 0; i < nb_samples / 28; i++) {
2145  int byte, header;
2146  if (channel)
2147  bytestream2_skipu(&gb, 1);
2148  header = bytestream2_get_byteu(&gb);
2149  bytestream2_skipu(&gb, 3 - channel);
2150 
2151  /* Decode 28 samples. */
2152  for (int n = 0; n < 28; n++) {
2153  int32_t sampledat, prev;
2154 
2155  switch (header >> 4) {
2156  case 1:
2157  prev = (c->status[channel].sample1 * 0x3c);
2158  break;
2159  case 2:
2160  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
2161  break;
2162  case 3:
2163  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
2164  break;
2165  default:
2166  prev = 0;
2167  }
2168 
2169  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2170 
2171  byte = bytestream2_get_byteu(&gb);
2172  if (!channel)
2173  sampledat = sign_extend(byte, 4);
2174  else
2175  sampledat = sign_extend(byte >> 4, 4);
2176 
2177  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2178  *samples++ = av_clip_int16(sampledat >> 6);
2179  c->status[channel].sample2 = c->status[channel].sample1;
2180  c->status[channel].sample1 = sampledat;
2181  }
2182  }
2183  if (!channel)
2184  bytestream2_seek(&gb, 0, SEEK_SET);
2185  }
2186  ) /* End of CASE */
2187  CASE(ADPCM_PSX,
2188  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * channels); block++) {
2189  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * channels) / (16 * channels);
2190  for (int channel = 0; channel < channels; channel++) {
2191  samples = samples_p[channel] + block * nb_samples_per_block;
2192  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2193 
2194  /* Read in every sample for this channel. */
2195  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2196  int filter, shift, flag, byte;
2197 
2198  filter = bytestream2_get_byteu(&gb);
2199  shift = filter & 0xf;
2200  filter = filter >> 4;
2202  return AVERROR_INVALIDDATA;
2203  flag = bytestream2_get_byteu(&gb) & 0x7;
2204 
2205  /* Decode 28 samples. */
2206  for (int n = 0; n < 28; n++) {
2207  int sample = 0, scale;
2208 
2209  if (n & 1) {
2210  scale = sign_extend(byte >> 4, 4);
2211  } else {
2212  byte = bytestream2_get_byteu(&gb);
2213  scale = sign_extend(byte, 4);
2214  }
2215 
2216  if (flag < 0x07) {
2217  scale = scale * (1 << 12);
2218  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2219  }
2221  c->status[channel].sample2 = c->status[channel].sample1;
2222  c->status[channel].sample1 = sample;
2223  }
2224  }
2225  }
2226  }
2227  ) /* End of CASE */
2228  CASE(ADPCM_ARGO,
2229  /*
2230  * The format of each block:
2231  * uint8_t left_control;
2232  * uint4_t left_samples[nb_samples];
2233  * ---- and if stereo ----
2234  * uint8_t right_control;
2235  * uint4_t right_samples[nb_samples];
2236  *
2237  * Format of the control byte:
2238  * MSB [SSSSRDRR] LSB
2239  * S = (Shift Amount - 2)
2240  * D = Decoder flag.
2241  * R = Reserved
2242  *
2243  * Each block relies on the previous two samples of each channel.
2244  * They should be 0 initially.
2245  */
2246  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2247  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
2248  ADPCMChannelStatus *cs = c->status + channel;
2249  int control, shift;
2250 
2251  samples = samples_p[channel] + block * 32;
2252 
2253  /* Get the control byte and decode the samples, 2 at a time. */
2254  control = bytestream2_get_byteu(&gb);
2255  shift = (control >> 4) + 2;
2256 
2257  for (int n = 0; n < 16; n++) {
2258  int sample = bytestream2_get_byteu(&gb);
2259  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2260  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2261  }
2262  }
2263  }
2264  ) /* End of CASE */
2265  CASE(ADPCM_ZORK,
2266  for (int n = 0; n < nb_samples * channels; n++) {
2267  int v = bytestream2_get_byteu(&gb);
2268  *samples++ = adpcm_zork_expand_nibble(&c->status[n % channels], v);
2269  }
2270  ) /* End of CASE */
2271  CASE(ADPCM_IMA_MTF,
2272  for (int n = nb_samples / 2; n > 0; n--) {
2273  for (int channel = 0; channel < channels; channel++) {
2274  int v = bytestream2_get_byteu(&gb);
2275  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2276  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2277  }
2278  samples += channels;
2279  }
2280  ) /* End of CASE */
2281  default:
2282  av_assert0(0); // unsupported codec_id should not happen
2283  }
2284 
2285  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2286  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2287  return AVERROR_INVALIDDATA;
2288  }
2289 
2290  *got_frame_ptr = 1;
2291 
2292  if (avpkt->size < bytestream2_tell(&gb)) {
2293  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2294  return avpkt->size;
2295  }
2296 
2297  return bytestream2_tell(&gb);
2298 }
2299 
2300 static void adpcm_flush(AVCodecContext *avctx)
2301 {
2302  ADPCMDecodeContext *c = avctx->priv_data;
2303 
2304  /* Just nuke the entire state and re-init. */
2305  memset(c, 0, sizeof(ADPCMDecodeContext));
2306 
2307  switch(avctx->codec_id) {
2308  case AV_CODEC_ID_ADPCM_CT:
2309  c->status[0].step = c->status[1].step = 511;
2310  break;
2311 
2313  if (avctx->extradata && avctx->extradata_size >= 8) {
2314  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2315  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2316  }
2317  break;
2318 
2320  if (avctx->extradata && avctx->extradata_size >= 28) {
2321  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2322  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2323  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2324  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2325  }
2326  break;
2327 
2329  if (avctx->extradata && avctx->extradata_size >= 2)
2330  c->vqa_version = AV_RL16(avctx->extradata);
2331  break;
2332  default:
2333  /* Other codecs may want to handle this during decoding. */
2334  c->has_status = 0;
2335  return;
2336  }
2337 
2338  c->has_status = 1;
2339 }
2340 
2341 
2349 
2350 #define ADPCM_DECODER_0(id_, sample_fmts_, name_, long_name_)
2351 #define ADPCM_DECODER_1(id_, sample_fmts_, name_, long_name_) \
2352 const FFCodec ff_ ## name_ ## _decoder = { \
2353  .p.name = #name_, \
2354  CODEC_LONG_NAME(long_name_), \
2355  .p.type = AVMEDIA_TYPE_AUDIO, \
2356  .p.id = id_, \
2357  .p.capabilities = AV_CODEC_CAP_DR1, \
2358  .p.sample_fmts = sample_fmts_, \
2359  .priv_data_size = sizeof(ADPCMDecodeContext), \
2360  .init = adpcm_decode_init, \
2361  FF_CODEC_DECODE_CB(adpcm_decode_frame), \
2362  .flush = adpcm_flush, \
2363 };
2364 #define ADPCM_DECODER_2(enabled, codec_id, name, sample_fmts, long_name) \
2365  ADPCM_DECODER_ ## enabled(codec_id, name, sample_fmts, long_name)
2366 #define ADPCM_DECODER_3(config, codec_id, name, sample_fmts, long_name) \
2367  ADPCM_DECODER_2(config, codec_id, name, sample_fmts, long_name)
2368 #define ADPCM_DECODER(codec, name, sample_fmts, long_name) \
2369  ADPCM_DECODER_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, \
2370  name, sample_fmts, long_name)
2371 
2372 /* Note: Do not forget to add new entries to the Makefile as well. */
2373 ADPCM_DECODER(ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie")
2374 ADPCM_DECODER(ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC")
2375 ADPCM_DECODER(ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie")
2376 ADPCM_DECODER(ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA")
2377 ADPCM_DECODER(ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games")
2378 ADPCM_DECODER(ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology")
2379 ADPCM_DECODER(ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK")
2380 ADPCM_DECODER(ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts")
2381 ADPCM_DECODER(ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA")
2382 ADPCM_DECODER(ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1")
2383 ADPCM_DECODER(ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2")
2384 ADPCM_DECODER(ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3")
2385 ADPCM_DECODER(ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS")
2386 ADPCM_DECODER(ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay")
2387 ADPCM_DECODER(ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV")
2388 ADPCM_DECODER(ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC")
2389 ADPCM_DECODER(ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM")
2390 ADPCM_DECODER(ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments")
2391 ADPCM_DECODER(ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4")
2392 ADPCM_DECODER(ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3")
2393 ADPCM_DECODER(ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4")
2394 ADPCM_DECODER(ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS")
2395 ADPCM_DECODER(ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD")
2396 ADPCM_DECODER(ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS")
2397 ADPCM_DECODER(ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX")
2398 ADPCM_DECODER(ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework")
2399 ADPCM_DECODER(ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI")
2400 ADPCM_DECODER(ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime")
2401 ADPCM_DECODER(ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical")
2402 ADPCM_DECODER(ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive")
2403 ADPCM_DECODER(ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG")
2404 ADPCM_DECODER(ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP")
2405 ADPCM_DECODER(ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV")
2406 ADPCM_DECODER(ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood")
2407 ADPCM_DECODER(ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft")
2408 ADPCM_DECODER(ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF")
2409 ADPCM_DECODER(ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation")
2410 ADPCM_DECODER(ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit")
2411 ADPCM_DECODER(ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit")
2412 ADPCM_DECODER(ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit")
2413 ADPCM_DECODER(ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash")
2414 ADPCM_DECODER(ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)")
2415 ADPCM_DECODER(ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP")
2416 ADPCM_DECODER(ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA")
2417 ADPCM_DECODER(ADPCM_XMD, sample_fmts_s16p, adpcm_xmd, "ADPCM Konami XMD")
2418 ADPCM_DECODER(ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha")
2419 ADPCM_DECODER(ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork")
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:378
adpcm_index_table5
static const int8_t adpcm_index_table5[32]
Definition: adpcm.c:137
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:372
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
out
FILE * out
Definition: movenc.c:55
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:405
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
R3
#define R3
Definition: simple_idct.c:173
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:232
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
int64_t
long long int64_t
Definition: coverity.c:34
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:420
AVPacket::data
uint8_t * data
Definition: packet.h:539
table
static const uint16_t table[]
Definition: prosumer.c:203
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:393
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:410
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:404
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:507
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
R1
#define R1
Definition: simple_idct.c:171
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
AV_CODEC_ID_ADPCM_XMD
@ AV_CODEC_ID_ADPCM_XMD
Definition: codec_id.h:423
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:408
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:594
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:384
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1071
GetBitContext
Definition: get_bits.h:108
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:448
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:399
val
static double val(void *priv, double ch)
Definition: aeval.c:77
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2300
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:389
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2344
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:425
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:614
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:530
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:422
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:637
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:403
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:395
g
const char * g
Definition: vf_curves.c:128
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:374
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:401
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:356
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:399
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:377
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:533
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2342
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:397
av_clip_int16
#define av_clip_int16
Definition: common.h:115
NULL
#define NULL
Definition: coverity.c:32
ADPCM_DECODER
#define ADPCM_DECODER(codec, name, sample_fmts, long_name)
Definition: adpcm.c:2368
bits_left
#define bits_left
Definition: bitstream.h:114
av_clip_intp2
#define av_clip_intp2
Definition: common.h:121
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:386
oki_step_table
static const int16_t oki_step_table[49]
Definition: adpcm.c:216
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:376
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:396
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:414
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:375
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:391
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:94
ima_cunning_index_table
static const int8_t ima_cunning_index_table[9]
Definition: adpcm.c:108
exp
int8_t exp
Definition: eval.c:73
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:380
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:573
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:552
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:1070
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:416
afc_coeffs
static const int16_t afc_coeffs[2][16]
Definition: adpcm.c:89
ADPCMDecodeContext
Definition: adpcm.c:243
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1692
AVPacket::size
int size
Definition: packet.h:540
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:406
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:464
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:418
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1877
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1063
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:846
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
R2
#define R2
Definition: simple_idct.c:172
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:385
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:68
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:169
av_zero_extend
#define av_zero_extend
Definition: common.h:151
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:672
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
adpcm_index_table3
static const int8_t adpcm_index_table3[8]
Definition: adpcm.c:132
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
flag
#define flag(name)
Definition: cbs_av1.c:474
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1578
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2346
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:412
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:398
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:529
adpcm_index_tables
static const int8_t *const adpcm_index_tables[4]
Definition: adpcm.c:142
MT
#define MT(...)
Definition: codec_desc.c:32
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:598
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:417
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:245
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:411
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:818
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:81
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:382
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:419
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1089
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:484
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:413
mtaf_stepsize
static const int16_t mtaf_stepsize[32][16]
Definition: adpcm.c:149
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:392
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:394
temp
else temp
Definition: vf_mcdeint.c:263
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:390
adpcm_index_table2
static const int8_t adpcm_index_table2[4]
Definition: adpcm.c:127
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:387
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:760
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:415
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:251
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:246
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:421
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:373
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:379
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:355
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:409
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:628
CASE
#define CASE(codec,...)
Definition: adpcm.c:77
ima_cunning_step_table
static const int16_t ima_cunning_step_table[61]
Definition: adpcm.c:118
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:236
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:388
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:244
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:225