FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 
38 #include "config_components.h"
39 
40 #include "avcodec.h"
41 #include "get_bits.h"
42 #include "bytestream.h"
43 #include "adpcm.h"
44 #include "adpcm_data.h"
45 #include "codec_internal.h"
46 #include "decode.h"
47 
48 /**
49  * @file
50  * ADPCM decoders
51  * Features and limitations:
52  *
53  * Reference documents:
54  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
55  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
56  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
57  * http://openquicktime.sourceforge.net/
58  * XAnim sources (xa_codec.c) http://xanim.polter.net/
59  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
60  * SoX source code http://sox.sourceforge.net/
61  *
62  * CD-ROM XA:
63  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
64  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
65  * readstr http://www.geocities.co.jp/Playtown/2004/
66  */
67 
68 #define CASE_0(codec_id, ...)
69 #define CASE_1(codec_id, ...) \
70  case codec_id: \
71  { __VA_ARGS__ } \
72  break;
73 #define CASE_2(enabled, codec_id, ...) \
74  CASE_ ## enabled(codec_id, __VA_ARGS__)
75 #define CASE_3(config, codec_id, ...) \
76  CASE_2(config, codec_id, __VA_ARGS__)
77 #define CASE(codec, ...) \
78  CASE_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
79 
80 /* These are for CD-ROM XA ADPCM */
81 static const int8_t xa_adpcm_table[5][2] = {
82  { 0, 0 },
83  { 60, 0 },
84  { 115, -52 },
85  { 98, -55 },
86  { 122, -60 }
87 };
88 
89 static const int16_t afc_coeffs[2][16] = {
90  { 0, 2048, 0, 1024, 4096, 3584, 3072, 4608, 4200, 4800, 5120, 2048, 1024, -1024, -1024, -2048 },
91  { 0, 0, 2048, 1024, -2048, -1536, -1024, -2560, -2248, -2300, -3072, -2048, -1024, 1024, 0, 0 }
92 };
93 
94 static const int16_t ea_adpcm_table[] = {
95  0, 240, 460, 392,
96  0, 0, -208, -220,
97  0, 1, 3, 4,
98  7, 8, 10, 11,
99  0, -1, -3, -4
100 };
101 
102 /*
103  * Dumped from the binaries:
104  * - FantasticJourney.exe - 0x794D2, DGROUP:0x47A4D2
105  * - BigRaceUSA.exe - 0x9B8AA, DGROUP:0x49C4AA
106  * - Timeshock!.exe - 0x8506A, DGROUP:0x485C6A
107  */
108 static const int8_t ima_cunning_index_table[9] = {
109  -1, -1, -1, -1, 1, 2, 3, 4, -1
110 };
111 
112 /*
113  * Dumped from the binaries:
114  * - FantasticJourney.exe - 0x79458, DGROUP:0x47A458
115  * - BigRaceUSA.exe - 0x9B830, DGROUP:0x49C430
116  * - Timeshock!.exe - 0x84FF0, DGROUP:0x485BF0
117  */
118 static const int16_t ima_cunning_step_table[61] = {
119  1, 1, 1, 1, 2, 2, 3, 3, 4, 5,
120  6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
121  32, 40, 48, 56, 64, 80, 96, 112, 128, 160,
122  192, 224, 256, 320, 384, 448, 512, 640, 768, 896,
123  1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120,
124  6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 0
125 };
126 
127 static const int8_t adpcm_index_table2[4] = {
128  -1, 2,
129  -1, 2,
130 };
131 
132 static const int8_t adpcm_index_table3[8] = {
133  -1, -1, 1, 2,
134  -1, -1, 1, 2,
135 };
136 
137 static const int8_t adpcm_index_table5[32] = {
138  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
139  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
140 };
141 
142 static const int8_t * const adpcm_index_tables[4] = {
143  &adpcm_index_table2[0],
144  &adpcm_index_table3[0],
146  &adpcm_index_table5[0],
147 };
148 
149 static const int16_t mtaf_stepsize[32][16] = {
150  { 1, 5, 9, 13, 16, 20, 24, 28,
151  -1, -5, -9, -13, -16, -20, -24, -28, },
152  { 2, 6, 11, 15, 20, 24, 29, 33,
153  -2, -6, -11, -15, -20, -24, -29, -33, },
154  { 2, 7, 13, 18, 23, 28, 34, 39,
155  -2, -7, -13, -18, -23, -28, -34, -39, },
156  { 3, 9, 15, 21, 28, 34, 40, 46,
157  -3, -9, -15, -21, -28, -34, -40, -46, },
158  { 3, 11, 18, 26, 33, 41, 48, 56,
159  -3, -11, -18, -26, -33, -41, -48, -56, },
160  { 4, 13, 22, 31, 40, 49, 58, 67,
161  -4, -13, -22, -31, -40, -49, -58, -67, },
162  { 5, 16, 26, 37, 48, 59, 69, 80,
163  -5, -16, -26, -37, -48, -59, -69, -80, },
164  { 6, 19, 31, 44, 57, 70, 82, 95,
165  -6, -19, -31, -44, -57, -70, -82, -95, },
166  { 7, 22, 38, 53, 68, 83, 99, 114,
167  -7, -22, -38, -53, -68, -83, -99, -114, },
168  { 9, 27, 45, 63, 81, 99, 117, 135,
169  -9, -27, -45, -63, -81, -99, -117, -135, },
170  { 10, 32, 53, 75, 96, 118, 139, 161,
171  -10, -32, -53, -75, -96, -118, -139, -161, },
172  { 12, 38, 64, 90, 115, 141, 167, 193,
173  -12, -38, -64, -90, -115, -141, -167, -193, },
174  { 15, 45, 76, 106, 137, 167, 198, 228,
175  -15, -45, -76, -106, -137, -167, -198, -228, },
176  { 18, 54, 91, 127, 164, 200, 237, 273,
177  -18, -54, -91, -127, -164, -200, -237, -273, },
178  { 21, 65, 108, 152, 195, 239, 282, 326,
179  -21, -65, -108, -152, -195, -239, -282, -326, },
180  { 25, 77, 129, 181, 232, 284, 336, 388,
181  -25, -77, -129, -181, -232, -284, -336, -388, },
182  { 30, 92, 153, 215, 276, 338, 399, 461,
183  -30, -92, -153, -215, -276, -338, -399, -461, },
184  { 36, 109, 183, 256, 329, 402, 476, 549,
185  -36, -109, -183, -256, -329, -402, -476, -549, },
186  { 43, 130, 218, 305, 392, 479, 567, 654,
187  -43, -130, -218, -305, -392, -479, -567, -654, },
188  { 52, 156, 260, 364, 468, 572, 676, 780,
189  -52, -156, -260, -364, -468, -572, -676, -780, },
190  { 62, 186, 310, 434, 558, 682, 806, 930,
191  -62, -186, -310, -434, -558, -682, -806, -930, },
192  { 73, 221, 368, 516, 663, 811, 958, 1106,
193  -73, -221, -368, -516, -663, -811, -958, -1106, },
194  { 87, 263, 439, 615, 790, 966, 1142, 1318,
195  -87, -263, -439, -615, -790, -966, -1142, -1318, },
196  { 104, 314, 523, 733, 942, 1152, 1361, 1571,
197  -104, -314, -523, -733, -942, -1152, -1361, -1571, },
198  { 124, 374, 623, 873, 1122, 1372, 1621, 1871,
199  -124, -374, -623, -873, -1122, -1372, -1621, -1871, },
200  { 148, 445, 743, 1040, 1337, 1634, 1932, 2229,
201  -148, -445, -743, -1040, -1337, -1634, -1932, -2229, },
202  { 177, 531, 885, 1239, 1593, 1947, 2301, 2655,
203  -177, -531, -885, -1239, -1593, -1947, -2301, -2655, },
204  { 210, 632, 1053, 1475, 1896, 2318, 2739, 3161,
205  -210, -632, -1053, -1475, -1896, -2318, -2739, -3161, },
206  { 251, 753, 1255, 1757, 2260, 2762, 3264, 3766,
207  -251, -753, -1255, -1757, -2260, -2762, -3264, -3766, },
208  { 299, 897, 1495, 2093, 2692, 3290, 3888, 4486,
209  -299, -897, -1495, -2093, -2692, -3290, -3888, -4486, },
210  { 356, 1068, 1781, 2493, 3206, 3918, 4631, 5343,
211  -356, -1068, -1781, -2493, -3206, -3918, -4631, -5343, },
212  { 424, 1273, 2121, 2970, 3819, 4668, 5516, 6365,
213  -424, -1273, -2121, -2970, -3819, -4668, -5516, -6365, },
214 };
215 
216 static const int16_t oki_step_table[49] = {
217  16, 17, 19, 21, 23, 25, 28, 31, 34, 37,
218  41, 45, 50, 55, 60, 66, 73, 80, 88, 97,
219  107, 118, 130, 143, 157, 173, 190, 209, 230, 253,
220  279, 307, 337, 371, 408, 449, 494, 544, 598, 658,
221  724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552
222 };
223 
224 // padded to zero where table size is less then 16
225 static const int8_t swf_index_tables[4][16] = {
226  /*2*/ { -1, 2 },
227  /*3*/ { -1, -1, 2, 4 },
228  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
229  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
230 };
231 
232 static const int8_t zork_index_table[8] = {
233  -1, -1, -1, 1, 4, 7, 10, 12,
234 };
235 
236 static const int8_t mtf_index_table[16] = {
237  8, 6, 4, 2, -1, -1, -1, -1,
238  -1, -1, -1, -1, 2, 4, 6, 8,
239 };
240 
241 /* end of tables */
242 
243 typedef struct ADPCMDecodeContext {
245  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
246  int has_status; /**< Status flag. Reset to 0 after a flush. */
248 
249 static void adpcm_flush(AVCodecContext *avctx);
250 
252 {
253  ADPCMDecodeContext *c = avctx->priv_data;
254  unsigned int min_channels = 1;
255  unsigned int max_channels = 2;
256 
257  adpcm_flush(avctx);
258 
259  switch(avctx->codec->id) {
261  max_channels = 1;
262  break;
265  min_channels = 1;
266  break;
273  max_channels = 6;
274  break;
276  min_channels = 2;
277  max_channels = 8;
278  if (avctx->ch_layout.nb_channels & 1) {
279  avpriv_request_sample(avctx, "channel count %d", avctx->ch_layout.nb_channels);
280  return AVERROR_PATCHWELCOME;
281  }
282  break;
284  max_channels = 8;
285  if (avctx->ch_layout.nb_channels <= 0 ||
286  avctx->block_align % (16 * avctx->ch_layout.nb_channels))
287  return AVERROR_INVALIDDATA;
288  break;
292  max_channels = 14;
293  break;
294  }
295  if (avctx->ch_layout.nb_channels < min_channels ||
296  avctx->ch_layout.nb_channels > max_channels) {
297  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
298  return AVERROR(EINVAL);
299  }
300 
301  switch(avctx->codec->id) {
303  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
304  return AVERROR_INVALIDDATA;
305  break;
307  if (avctx->bits_per_coded_sample != 4 ||
308  avctx->block_align != 17 * avctx->ch_layout.nb_channels)
309  return AVERROR_INVALIDDATA;
310  break;
312  if (avctx->bits_per_coded_sample != 8)
313  return AVERROR_INVALIDDATA;
314  break;
315  default:
316  break;
317  }
318 
319  switch (avctx->codec->id) {
341  break;
343  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
345  break;
347  avctx->sample_fmt = avctx->ch_layout.nb_channels > 2 ? AV_SAMPLE_FMT_S16P :
349  break;
350  default:
351  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
352  }
353  return 0;
354 }
355 
356 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
357 {
358  int delta, pred, step, add;
359 
360  pred = c->predictor;
361  delta = nibble & 7;
362  step = c->step;
363  add = (delta * 2 + 1) * step;
364  if (add < 0)
365  add = add + 7;
366 
367  if ((nibble & 8) == 0)
368  pred = av_clip(pred + (add >> 3), -32767, 32767);
369  else
370  pred = av_clip(pred - (add >> 3), -32767, 32767);
371 
372  switch (delta) {
373  case 7:
374  step *= 0x99;
375  break;
376  case 6:
377  c->step = av_clip(c->step * 2, 127, 24576);
378  c->predictor = pred;
379  return pred;
380  case 5:
381  step *= 0x66;
382  break;
383  case 4:
384  step *= 0x4d;
385  break;
386  default:
387  step *= 0x39;
388  break;
389  }
390 
391  if (step < 0)
392  step += 0x3f;
393 
394  c->step = step >> 6;
395  c->step = av_clip(c->step, 127, 24576);
396  c->predictor = pred;
397  return pred;
398 }
399 
400 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
401 {
402  int step_index;
403  int predictor;
404  int sign, delta, diff, step;
405 
406  step = ff_adpcm_step_table[c->step_index];
407  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
408  step_index = av_clip(step_index, 0, 88);
409 
410  sign = nibble & 8;
411  delta = nibble & 7;
412  /* perform direct multiplication instead of series of jumps proposed by
413  * the reference ADPCM implementation since modern CPUs can do the mults
414  * quickly enough */
415  diff = ((2 * delta + 1) * step) >> shift;
416  predictor = c->predictor;
417  if (sign) predictor -= diff;
418  else predictor += diff;
419 
420  c->predictor = av_clip_int16(predictor);
421  c->step_index = step_index;
422 
423  return (int16_t)c->predictor;
424 }
425 
426 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
427 {
428  int step_index;
429  int predictor;
430  int sign, delta, diff, step;
431 
432  step = ff_adpcm_step_table[c->step_index];
433  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
434  step_index = av_clip(step_index, 0, 88);
435 
436  sign = nibble & 8;
437  delta = nibble & 7;
438  diff = (delta * step) >> shift;
439  predictor = c->predictor;
440  if (sign) predictor -= diff;
441  else predictor += diff;
442 
443  c->predictor = av_clip_int16(predictor);
444  c->step_index = step_index;
445 
446  return (int16_t)c->predictor;
447 }
448 
449 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
450 {
451  int step_index, step, delta, predictor;
452 
453  step = ff_adpcm_step_table[c->step_index];
454 
455  delta = step * (2 * nibble - 15);
456  predictor = c->predictor + delta;
457 
458  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
459  c->predictor = av_clip_int16(predictor >> 4);
460  c->step_index = av_clip(step_index, 0, 88);
461 
462  return (int16_t)c->predictor;
463 }
464 
465 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
466 {
467  int step_index;
468  int predictor;
469  int step;
470 
471  nibble = sign_extend(nibble & 0xF, 4);
472 
473  step = ima_cunning_step_table[c->step_index];
474  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
475  step_index = av_clip(step_index, 0, 60);
476 
477  predictor = c->predictor + step * nibble;
478 
479  c->predictor = av_clip_int16(predictor);
480  c->step_index = step_index;
481 
482  return c->predictor;
483 }
484 
486 {
487  int nibble, step_index, predictor, sign, delta, diff, step, shift;
488 
489  shift = bps - 1;
490  nibble = get_bits_le(gb, bps),
491  step = ff_adpcm_step_table[c->step_index];
492  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
493  step_index = av_clip(step_index, 0, 88);
494 
495  sign = nibble & (1 << shift);
496  delta = av_zero_extend(nibble, shift);
497  diff = ((2 * delta + 1) * step) >> shift;
498  predictor = c->predictor;
499  if (sign) predictor -= diff;
500  else predictor += diff;
501 
502  c->predictor = av_clip_int16(predictor);
503  c->step_index = step_index;
504 
505  return (int16_t)c->predictor;
506 }
507 
508 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
509 {
510  int step_index;
511  int predictor;
512  int diff, step;
513 
514  step = ff_adpcm_step_table[c->step_index];
515  step_index = c->step_index + ff_adpcm_index_table[nibble];
516  step_index = av_clip(step_index, 0, 88);
517 
518  diff = step >> 3;
519  if (nibble & 4) diff += step;
520  if (nibble & 2) diff += step >> 1;
521  if (nibble & 1) diff += step >> 2;
522 
523  if (nibble & 8)
524  predictor = c->predictor - diff;
525  else
526  predictor = c->predictor + diff;
527 
528  c->predictor = av_clip_int16(predictor);
529  c->step_index = step_index;
530 
531  return c->predictor;
532 }
533 
534 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
535 {
536  int predictor;
537 
538  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
539  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
540 
541  c->sample2 = c->sample1;
542  c->sample1 = av_clip_int16(predictor);
543  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
544  if (c->idelta < 16) c->idelta = 16;
545  if (c->idelta > INT_MAX/768) {
546  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
547  c->idelta = INT_MAX/768;
548  }
549 
550  return c->sample1;
551 }
552 
553 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
554 {
555  int step_index, predictor, sign, delta, diff, step;
556 
557  step = oki_step_table[c->step_index];
558  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
559  step_index = av_clip(step_index, 0, 48);
560 
561  sign = nibble & 8;
562  delta = nibble & 7;
563  diff = ((2 * delta + 1) * step) >> 3;
564  predictor = c->predictor;
565  if (sign) predictor -= diff;
566  else predictor += diff;
567 
568  c->predictor = av_clip_intp2(predictor, 11);
569  c->step_index = step_index;
570 
571  return c->predictor * 16;
572 }
573 
574 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
575 {
576  int sign, delta, diff;
577  int new_step;
578 
579  sign = nibble & 8;
580  delta = nibble & 7;
581  /* perform direct multiplication instead of series of jumps proposed by
582  * the reference ADPCM implementation since modern CPUs can do the mults
583  * quickly enough */
584  diff = ((2 * delta + 1) * c->step) >> 3;
585  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
586  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
587  c->predictor = av_clip_int16(c->predictor);
588  /* calculate new step and clamp it to range 511..32767 */
589  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
590  c->step = av_clip(new_step, 511, 32767);
591 
592  return (int16_t)c->predictor;
593 }
594 
595 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
596 {
597  int sign, delta, diff;
598 
599  sign = nibble & (1<<(size-1));
600  delta = nibble & ((1<<(size-1))-1);
601  diff = delta << (7 + c->step + shift);
602 
603  /* clamp result */
604  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
605 
606  /* calculate new step */
607  if (delta >= (2*size - 3) && c->step < 3)
608  c->step++;
609  else if (delta == 0 && c->step > 0)
610  c->step--;
611 
612  return (int16_t) c->predictor;
613 }
614 
615 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
616 {
617  if(!c->step) {
618  c->predictor = 0;
619  c->step = 127;
620  }
621 
622  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
623  c->predictor = av_clip_int16(c->predictor);
624  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
625  c->step = av_clip(c->step, 127, 24576);
626  return c->predictor;
627 }
628 
629 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
630 {
631  c->predictor += mtaf_stepsize[c->step][nibble];
632  c->predictor = av_clip_int16(c->predictor);
633  c->step += ff_adpcm_index_table[nibble];
634  c->step = av_clip_uintp2(c->step, 5);
635  return c->predictor;
636 }
637 
638 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
639 {
640  int16_t index = c->step_index;
641  uint32_t lookup_sample = ff_adpcm_step_table[index];
642  int32_t sample = 0;
643 
644  if (nibble & 0x40)
645  sample += lookup_sample;
646  if (nibble & 0x20)
647  sample += lookup_sample >> 1;
648  if (nibble & 0x10)
649  sample += lookup_sample >> 2;
650  if (nibble & 0x08)
651  sample += lookup_sample >> 3;
652  if (nibble & 0x04)
653  sample += lookup_sample >> 4;
654  if (nibble & 0x02)
655  sample += lookup_sample >> 5;
656  if (nibble & 0x01)
657  sample += lookup_sample >> 6;
658  if (nibble & 0x80)
659  sample = -sample;
660 
661  sample += c->predictor;
663 
664  index += zork_index_table[(nibble >> 4) & 7];
665  index = av_clip(index, 0, 88);
666 
667  c->predictor = sample;
668  c->step_index = index;
669 
670  return sample;
671 }
672 
673 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
674  const uint8_t *in, ADPCMChannelStatus *left,
675  ADPCMChannelStatus *right, int channels, int sample_offset)
676 {
677  int i, j;
678  int shift,filter,f0,f1;
679  int s_1,s_2;
680  int d,s,t;
681 
682  out0 += sample_offset;
683  if (channels == 1)
684  out1 = out0 + 28;
685  else
686  out1 += sample_offset;
687 
688  for(i=0;i<4;i++) {
689  shift = 12 - (in[4+i*2] & 15);
690  filter = in[4+i*2] >> 4;
692  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
693  filter=0;
694  }
695  if (shift < 0) {
696  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
697  shift = 0;
698  }
699  f0 = xa_adpcm_table[filter][0];
700  f1 = xa_adpcm_table[filter][1];
701 
702  s_1 = left->sample1;
703  s_2 = left->sample2;
704 
705  for(j=0;j<28;j++) {
706  d = in[16+i+j*4];
707 
708  t = sign_extend(d, 4);
709  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
710  s_2 = s_1;
711  s_1 = av_clip_int16(s);
712  out0[j] = s_1;
713  }
714 
715  if (channels == 2) {
716  left->sample1 = s_1;
717  left->sample2 = s_2;
718  s_1 = right->sample1;
719  s_2 = right->sample2;
720  }
721 
722  shift = 12 - (in[5+i*2] & 15);
723  filter = in[5+i*2] >> 4;
724  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
725  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
726  filter=0;
727  }
728  if (shift < 0) {
729  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
730  shift = 0;
731  }
732 
733  f0 = xa_adpcm_table[filter][0];
734  f1 = xa_adpcm_table[filter][1];
735 
736  for(j=0;j<28;j++) {
737  d = in[16+i+j*4];
738 
739  t = sign_extend(d >> 4, 4);
740  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
741  s_2 = s_1;
742  s_1 = av_clip_int16(s);
743  out1[j] = s_1;
744  }
745 
746  if (channels == 2) {
747  right->sample1 = s_1;
748  right->sample2 = s_2;
749  } else {
750  left->sample1 = s_1;
751  left->sample2 = s_2;
752  }
753 
754  out0 += 28 * (3 - channels);
755  out1 += 28 * (3 - channels);
756  }
757 
758  return 0;
759 }
760 
761 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
762 {
763  ADPCMDecodeContext *c = avctx->priv_data;
764  GetBitContext gb;
765  const int8_t *table;
766  int channels = avctx->ch_layout.nb_channels;
767  int k0, signmask, nb_bits, count;
768  int size = buf_size*8;
769  int i;
770 
771  init_get_bits(&gb, buf, size);
772 
773  //read bits & initial values
774  nb_bits = get_bits(&gb, 2)+2;
775  table = swf_index_tables[nb_bits-2];
776  k0 = 1 << (nb_bits-2);
777  signmask = 1 << (nb_bits-1);
778 
779  while (get_bits_count(&gb) <= size - 22 * channels) {
780  for (i = 0; i < channels; i++) {
781  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
782  c->status[i].step_index = get_bits(&gb, 6);
783  }
784 
785  for (count = 0; get_bits_count(&gb) <= size - nb_bits * channels && count < 4095; count++) {
786  int i;
787 
788  for (i = 0; i < channels; i++) {
789  // similar to IMA adpcm
790  int delta = get_bits(&gb, nb_bits);
791  int step = ff_adpcm_step_table[c->status[i].step_index];
792  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
793  int k = k0;
794 
795  do {
796  if (delta & k)
797  vpdiff += step;
798  step >>= 1;
799  k >>= 1;
800  } while(k);
801  vpdiff += step;
802 
803  if (delta & signmask)
804  c->status[i].predictor -= vpdiff;
805  else
806  c->status[i].predictor += vpdiff;
807 
808  c->status[i].step_index += table[delta & (~signmask)];
809 
810  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
811  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
812 
813  *samples++ = c->status[i].predictor;
814  }
815  }
816  }
817 }
818 
819 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
820 {
821  int sample = sign_extend(nibble, 4) * (1 << shift);
822 
823  if (flag)
824  sample += (8 * cs->sample1) - (4 * cs->sample2);
825  else
826  sample += 4 * cs->sample1;
827 
828  sample = av_clip_int16(sample >> 2);
829 
830  cs->sample2 = cs->sample1;
831  cs->sample1 = sample;
832 
833  return sample;
834 }
835 
836 /**
837  * Get the number of samples (per channel) that will be decoded from the packet.
838  * In one case, this is actually the maximum number of samples possible to
839  * decode with the given buf_size.
840  *
841  * @param[out] coded_samples set to the number of samples as coded in the
842  * packet, or 0 if the codec does not encode the
843  * number of samples in each frame.
844  * @param[out] approx_nb_samples set to non-zero if the number of samples
845  * returned is an approximation.
846  */
848  int buf_size, int *coded_samples, int *approx_nb_samples)
849 {
850  ADPCMDecodeContext *s = avctx->priv_data;
851  int nb_samples = 0;
852  int ch = avctx->ch_layout.nb_channels;
853  int has_coded_samples = 0;
854  int header_size;
855 
856  *coded_samples = 0;
857  *approx_nb_samples = 0;
858 
859  if(ch <= 0)
860  return 0;
861 
862  switch (avctx->codec->id) {
863  /* constant, only check buf_size */
865  if (buf_size < 76 * ch)
866  return 0;
867  nb_samples = 128;
868  break;
870  if (buf_size < 34 * ch)
871  return 0;
872  nb_samples = 64;
873  break;
874  /* simple 4-bit adpcm */
887  nb_samples = buf_size * 2 / ch;
888  break;
889  }
890  if (nb_samples)
891  return nb_samples;
892 
893  /* simple 4-bit adpcm, with header */
894  header_size = 0;
895  switch (avctx->codec->id) {
901  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
902  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
903  }
904  if (header_size > 0)
905  return (buf_size - header_size) * 2 / ch;
906 
907  /* more complex formats */
908  switch (avctx->codec->id) {
910  bytestream2_skip(gb, 4);
911  has_coded_samples = 1;
912  *coded_samples = bytestream2_get_le32u(gb);
913  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
914  bytestream2_seek(gb, -8, SEEK_CUR);
915  break;
917  /* Stereo is 30 bytes per block */
918  /* Mono is 15 bytes per block */
919  has_coded_samples = 1;
920  *coded_samples = bytestream2_get_le32(gb);
921  *coded_samples -= *coded_samples % 28;
922  nb_samples = (buf_size - 12) / (ch == 2 ? 30 : 15) * 28;
923  break;
925  has_coded_samples = 1;
926  *coded_samples = bytestream2_get_le32(gb);
927  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
928  break;
930  nb_samples = (buf_size - ch) / ch * 2;
931  break;
935  /* maximum number of samples */
936  /* has internal offsets and a per-frame switch to signal raw 16-bit */
937  has_coded_samples = 1;
938  switch (avctx->codec->id) {
940  header_size = 4 + 9 * ch;
941  *coded_samples = bytestream2_get_le32(gb);
942  break;
944  header_size = 4 + 5 * ch;
945  *coded_samples = bytestream2_get_le32(gb);
946  break;
948  header_size = 4 + 5 * ch;
949  *coded_samples = bytestream2_get_be32(gb);
950  break;
951  }
952  *coded_samples -= *coded_samples % 28;
953  nb_samples = (buf_size - header_size) * 2 / ch;
954  nb_samples -= nb_samples % 28;
955  *approx_nb_samples = 1;
956  break;
958  if (avctx->block_align > 0)
959  buf_size = FFMIN(buf_size, avctx->block_align);
960  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
961  break;
963  if (avctx->block_align > 0)
964  buf_size = FFMIN(buf_size, avctx->block_align);
965  if (buf_size < 4 * ch)
966  return AVERROR_INVALIDDATA;
967  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
968  break;
970  if (avctx->block_align > 0)
971  buf_size = FFMIN(buf_size, avctx->block_align);
972  nb_samples = (buf_size - 4 * ch) * 2 / ch;
973  break;
974  CASE(ADPCM_IMA_WAV,
975  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
976  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
977  if (avctx->block_align > 0)
978  buf_size = FFMIN(buf_size, avctx->block_align);
979  if (buf_size < 4 * ch)
980  return AVERROR_INVALIDDATA;
981  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
982  ) /* End of CASE */
984  if (avctx->block_align > 0)
985  buf_size = FFMIN(buf_size, avctx->block_align);
986  nb_samples = (buf_size - 6 * ch) * 2 / ch;
987  break;
989  if (avctx->block_align > 0)
990  buf_size = FFMIN(buf_size, avctx->block_align);
991  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
992  break;
996  {
997  int samples_per_byte;
998  switch (avctx->codec->id) {
999  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
1000  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
1001  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
1002  }
1003  if (!s->status[0].step_index) {
1004  if (buf_size < ch)
1005  return AVERROR_INVALIDDATA;
1006  nb_samples++;
1007  buf_size -= ch;
1008  }
1009  nb_samples += buf_size * samples_per_byte / ch;
1010  break;
1011  }
1012  case AV_CODEC_ID_ADPCM_SWF:
1013  {
1014  int buf_bits = buf_size * 8 - 2;
1015  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
1016  int block_hdr_size = 22 * ch;
1017  int block_size = block_hdr_size + nbits * ch * 4095;
1018  int nblocks = buf_bits / block_size;
1019  int bits_left = buf_bits - nblocks * block_size;
1020  nb_samples = nblocks * 4096;
1021  if (bits_left >= block_hdr_size)
1022  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
1023  break;
1024  }
1025  case AV_CODEC_ID_ADPCM_THP:
1027  if (avctx->extradata) {
1028  nb_samples = buf_size * 14 / (8 * ch);
1029  break;
1030  }
1031  has_coded_samples = 1;
1032  bytestream2_skip(gb, 4); // channel size
1033  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
1034  bytestream2_get_le32(gb) :
1035  bytestream2_get_be32(gb);
1036  buf_size -= 8 + 36 * ch;
1037  buf_size /= ch;
1038  nb_samples = buf_size / 8 * 14;
1039  if (buf_size % 8 > 1)
1040  nb_samples += (buf_size % 8 - 1) * 2;
1041  *approx_nb_samples = 1;
1042  break;
1043  case AV_CODEC_ID_ADPCM_AFC:
1044  nb_samples = buf_size / (9 * ch) * 16;
1045  break;
1046  case AV_CODEC_ID_ADPCM_XA:
1047  nb_samples = (buf_size / 128) * 224 / ch;
1048  break;
1049  case AV_CODEC_ID_ADPCM_XMD:
1050  nb_samples = buf_size / (21 * ch) * 32;
1051  break;
1052  case AV_CODEC_ID_ADPCM_DTK:
1053  case AV_CODEC_ID_ADPCM_PSX:
1054  nb_samples = buf_size / (16 * ch) * 28;
1055  break;
1057  nb_samples = buf_size / avctx->block_align * 32;
1058  break;
1060  nb_samples = buf_size / ch;
1061  break;
1062  }
1063 
1064  /* validate coded sample count */
1065  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
1066  return AVERROR_INVALIDDATA;
1067 
1068  return nb_samples;
1069 }
1070 
1072  int *got_frame_ptr, AVPacket *avpkt)
1073 {
1074  const uint8_t *buf = avpkt->data;
1075  int buf_size = avpkt->size;
1076  ADPCMDecodeContext *c = avctx->priv_data;
1077  int channels = avctx->ch_layout.nb_channels;
1078  int16_t *samples;
1079  int16_t **samples_p;
1080  int st; /* stereo */
1081  int nb_samples, coded_samples, approx_nb_samples, ret;
1082  GetByteContext gb;
1083 
1084  bytestream2_init(&gb, buf, buf_size);
1085  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
1086  if (nb_samples <= 0) {
1087  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
1088  return AVERROR_INVALIDDATA;
1089  }
1090 
1091  /* get output buffer */
1092  frame->nb_samples = nb_samples;
1093  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1094  return ret;
1095  samples = (int16_t *)frame->data[0];
1096  samples_p = (int16_t **)frame->extended_data;
1097 
1098  /* use coded_samples when applicable */
1099  /* it is always <= nb_samples, so the output buffer will be large enough */
1100  if (coded_samples) {
1101  if (!approx_nb_samples && coded_samples != nb_samples)
1102  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
1103  frame->nb_samples = nb_samples = coded_samples;
1104  }
1105 
1106  st = channels == 2 ? 1 : 0;
1107 
1108  switch(avctx->codec->id) {
1109  CASE(ADPCM_IMA_QT,
1110  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
1111  Channel data is interleaved per-chunk. */
1112  for (int channel = 0; channel < channels; channel++) {
1113  ADPCMChannelStatus *cs = &c->status[channel];
1114  int predictor;
1115  int step_index;
1116  /* (pppppp) (piiiiiii) */
1117 
1118  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
1119  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1120  step_index = predictor & 0x7F;
1121  predictor &= ~0x7F;
1122 
1123  if (cs->step_index == step_index) {
1124  int diff = predictor - cs->predictor;
1125  if (diff < 0)
1126  diff = - diff;
1127  if (diff > 0x7f)
1128  goto update;
1129  } else {
1130  update:
1131  cs->step_index = step_index;
1132  cs->predictor = predictor;
1133  }
1134 
1135  if (cs->step_index > 88u){
1136  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1137  channel, cs->step_index);
1138  return AVERROR_INVALIDDATA;
1139  }
1140 
1141  samples = samples_p[channel];
1142 
1143  for (int m = 0; m < 64; m += 2) {
1144  int byte = bytestream2_get_byteu(&gb);
1145  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1146  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1147  }
1148  }
1149  ) /* End of CASE */
1150  CASE(ADPCM_IMA_WAV,
1151  for (int i = 0; i < channels; i++) {
1152  ADPCMChannelStatus *cs = &c->status[i];
1153  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1154 
1155  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1156  if (cs->step_index > 88u){
1157  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1158  i, cs->step_index);
1159  return AVERROR_INVALIDDATA;
1160  }
1161  }
1162 
1163  if (avctx->bits_per_coded_sample != 4) {
1164  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1165  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1166  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1167  GetBitContext g;
1168 
1169  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1170  for (int i = 0; i < channels; i++) {
1171  ADPCMChannelStatus *cs = &c->status[i];
1172  samples = &samples_p[i][1 + n * samples_per_block];
1173  for (int j = 0; j < block_size; j++) {
1174  temp[j] = buf[4 * channels + block_size * n * channels +
1175  (j % 4) + (j / 4) * (channels * 4) + i * 4];
1176  }
1177  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1178  if (ret < 0)
1179  return ret;
1180  for (int m = 0; m < samples_per_block; m++) {
1182  avctx->bits_per_coded_sample);
1183  }
1184  }
1185  }
1186  bytestream2_skip(&gb, avctx->block_align - channels * 4);
1187  } else {
1188  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
1189  for (int i = 0; i < channels; i++) {
1190  ADPCMChannelStatus *cs = &c->status[i];
1191  samples = &samples_p[i][1 + n * 8];
1192  for (int m = 0; m < 8; m += 2) {
1193  int v = bytestream2_get_byteu(&gb);
1194  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1195  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1196  }
1197  }
1198  }
1199  }
1200  ) /* End of CASE */
1201  CASE(ADPCM_4XM,
1202  for (int i = 0; i < channels; i++)
1203  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1204 
1205  for (int i = 0; i < channels; i++) {
1206  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1207  if (c->status[i].step_index > 88u) {
1208  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1209  i, c->status[i].step_index);
1210  return AVERROR_INVALIDDATA;
1211  }
1212  }
1213 
1214  for (int i = 0; i < channels; i++) {
1215  ADPCMChannelStatus *cs = &c->status[i];
1216  samples = (int16_t *)frame->data[i];
1217  for (int n = nb_samples >> 1; n > 0; n--) {
1218  int v = bytestream2_get_byteu(&gb);
1219  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1220  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1221  }
1222  }
1223  ) /* End of CASE */
1224  CASE(ADPCM_AGM,
1225  for (int i = 0; i < channels; i++)
1226  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1227  for (int i = 0; i < channels; i++)
1228  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1229 
1230  for (int n = 0; n < nb_samples >> (1 - st); n++) {
1231  int v = bytestream2_get_byteu(&gb);
1232  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1233  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1234  }
1235  ) /* End of CASE */
1236  CASE(ADPCM_MS,
1237  int block_predictor;
1238 
1239  if (avctx->ch_layout.nb_channels > 2) {
1240  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
1241  samples = samples_p[channel];
1242  block_predictor = bytestream2_get_byteu(&gb);
1243  if (block_predictor > 6) {
1244  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1245  channel, block_predictor);
1246  return AVERROR_INVALIDDATA;
1247  }
1248  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1249  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1250  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1251  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1252  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1253  *samples++ = c->status[channel].sample2;
1254  *samples++ = c->status[channel].sample1;
1255  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
1256  int byte = bytestream2_get_byteu(&gb);
1257  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1258  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1259  }
1260  }
1261  } else {
1262  block_predictor = bytestream2_get_byteu(&gb);
1263  if (block_predictor > 6) {
1264  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1265  block_predictor);
1266  return AVERROR_INVALIDDATA;
1267  }
1268  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1269  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1270  if (st) {
1271  block_predictor = bytestream2_get_byteu(&gb);
1272  if (block_predictor > 6) {
1273  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1274  block_predictor);
1275  return AVERROR_INVALIDDATA;
1276  }
1277  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1278  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1279  }
1280  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1281  if (st){
1282  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1283  }
1284 
1285  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1286  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1287  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1288  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1289 
1290  *samples++ = c->status[0].sample2;
1291  if (st) *samples++ = c->status[1].sample2;
1292  *samples++ = c->status[0].sample1;
1293  if (st) *samples++ = c->status[1].sample1;
1294  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1295  int byte = bytestream2_get_byteu(&gb);
1296  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1297  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1298  }
1299  }
1300  ) /* End of CASE */
1301  CASE(ADPCM_MTAF,
1302  for (int channel = 0; channel < channels; channel += 2) {
1303  bytestream2_skipu(&gb, 4);
1304  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1305  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1306  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1307  bytestream2_skipu(&gb, 2);
1308  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1309  bytestream2_skipu(&gb, 2);
1310  for (int n = 0; n < nb_samples; n += 2) {
1311  int v = bytestream2_get_byteu(&gb);
1312  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1313  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1314  }
1315  for (int n = 0; n < nb_samples; n += 2) {
1316  int v = bytestream2_get_byteu(&gb);
1317  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1318  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1319  }
1320  }
1321  ) /* End of CASE */
1322  CASE(ADPCM_IMA_DK4,
1323  for (int channel = 0; channel < channels; channel++) {
1324  ADPCMChannelStatus *cs = &c->status[channel];
1325  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1326  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1327  if (cs->step_index > 88u){
1328  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1329  channel, cs->step_index);
1330  return AVERROR_INVALIDDATA;
1331  }
1332  }
1333  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1334  int v = bytestream2_get_byteu(&gb);
1335  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1336  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1337  }
1338  ) /* End of CASE */
1339 
1340  /* DK3 ADPCM support macro */
1341 #define DK3_GET_NEXT_NIBBLE() \
1342  if (decode_top_nibble_next) { \
1343  nibble = last_byte >> 4; \
1344  decode_top_nibble_next = 0; \
1345  } else { \
1346  last_byte = bytestream2_get_byteu(&gb); \
1347  nibble = last_byte & 0x0F; \
1348  decode_top_nibble_next = 1; \
1349  }
1350  CASE(ADPCM_IMA_DK3,
1351  int last_byte = 0;
1352  int nibble;
1353  int decode_top_nibble_next = 0;
1354  int diff_channel;
1355  const int16_t *samples_end = samples + channels * nb_samples;
1356 
1357  bytestream2_skipu(&gb, 10);
1358  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1359  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1360  c->status[0].step_index = bytestream2_get_byteu(&gb);
1361  c->status[1].step_index = bytestream2_get_byteu(&gb);
1362  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1363  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1364  c->status[0].step_index, c->status[1].step_index);
1365  return AVERROR_INVALIDDATA;
1366  }
1367  /* sign extend the predictors */
1368  diff_channel = c->status[1].predictor;
1369 
1370  while (samples < samples_end) {
1371 
1372  /* for this algorithm, c->status[0] is the sum channel and
1373  * c->status[1] is the diff channel */
1374 
1375  /* process the first predictor of the sum channel */
1377  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1378 
1379  /* process the diff channel predictor */
1381  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1382 
1383  /* process the first pair of stereo PCM samples */
1384  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1385  *samples++ = c->status[0].predictor + c->status[1].predictor;
1386  *samples++ = c->status[0].predictor - c->status[1].predictor;
1387 
1388  /* process the second predictor of the sum channel */
1390  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1391 
1392  /* process the second pair of stereo PCM samples */
1393  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1394  *samples++ = c->status[0].predictor + c->status[1].predictor;
1395  *samples++ = c->status[0].predictor - c->status[1].predictor;
1396  }
1397 
1398  if ((bytestream2_tell(&gb) & 1))
1399  bytestream2_skip(&gb, 1);
1400  ) /* End of CASE */
1401  CASE(ADPCM_IMA_ISS,
1402  for (int channel = 0; channel < channels; channel++) {
1403  ADPCMChannelStatus *cs = &c->status[channel];
1404  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1405  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1406  if (cs->step_index > 88u){
1407  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1408  channel, cs->step_index);
1409  return AVERROR_INVALIDDATA;
1410  }
1411  }
1412 
1413  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1414  int v1, v2;
1415  int v = bytestream2_get_byteu(&gb);
1416  /* nibbles are swapped for mono */
1417  if (st) {
1418  v1 = v >> 4;
1419  v2 = v & 0x0F;
1420  } else {
1421  v2 = v >> 4;
1422  v1 = v & 0x0F;
1423  }
1424  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1425  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1426  }
1427  ) /* End of CASE */
1428  CASE(ADPCM_IMA_MOFLEX,
1429  for (int channel = 0; channel < channels; channel++) {
1430  ADPCMChannelStatus *cs = &c->status[channel];
1431  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1432  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1433  if (cs->step_index > 88u){
1434  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1435  channel, cs->step_index);
1436  return AVERROR_INVALIDDATA;
1437  }
1438  }
1439 
1440  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1441  for (int channel = 0; channel < channels; channel++) {
1442  samples = samples_p[channel] + 256 * subframe;
1443  for (int n = 0; n < 256; n += 2) {
1444  int v = bytestream2_get_byteu(&gb);
1445  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1446  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1447  }
1448  }
1449  }
1450  ) /* End of CASE */
1451  CASE(ADPCM_IMA_DAT4,
1452  for (int channel = 0; channel < channels; channel++) {
1453  ADPCMChannelStatus *cs = &c->status[channel];
1454  samples = samples_p[channel];
1455  bytestream2_skip(&gb, 4);
1456  for (int n = 0; n < nb_samples; n += 2) {
1457  int v = bytestream2_get_byteu(&gb);
1458  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1459  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1460  }
1461  }
1462  ) /* End of CASE */
1463  CASE(ADPCM_IMA_APC,
1464  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1465  int v = bytestream2_get_byteu(&gb);
1466  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1467  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1468  }
1469  ) /* End of CASE */
1470  CASE(ADPCM_IMA_SSI,
1471  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1472  int v = bytestream2_get_byteu(&gb);
1473  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1474  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1475  }
1476  ) /* End of CASE */
1477  CASE(ADPCM_IMA_APM,
1478  for (int n = nb_samples / 2; n > 0; n--) {
1479  for (int channel = 0; channel < channels; channel++) {
1480  int v = bytestream2_get_byteu(&gb);
1481  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1482  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1483  }
1484  samples += channels;
1485  }
1486  ) /* End of CASE */
1487  CASE(ADPCM_IMA_ALP,
1488  for (int n = nb_samples / 2; n > 0; n--) {
1489  for (int channel = 0; channel < channels; channel++) {
1490  int v = bytestream2_get_byteu(&gb);
1491  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1492  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1493  }
1494  samples += channels;
1495  }
1496  ) /* End of CASE */
1497  CASE(ADPCM_IMA_CUNNING,
1498  for (int channel = 0; channel < channels; channel++) {
1499  int16_t *smp = samples_p[channel];
1500  for (int n = 0; n < nb_samples / 2; n++) {
1501  int v = bytestream2_get_byteu(&gb);
1502  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1503  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1504  }
1505  }
1506  ) /* End of CASE */
1507  CASE(ADPCM_IMA_OKI,
1508  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1509  int v = bytestream2_get_byteu(&gb);
1510  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1511  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1512  }
1513  ) /* End of CASE */
1514  CASE(ADPCM_IMA_RAD,
1515  for (int channel = 0; channel < channels; channel++) {
1516  ADPCMChannelStatus *cs = &c->status[channel];
1517  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1518  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1519  if (cs->step_index > 88u){
1520  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1521  channel, cs->step_index);
1522  return AVERROR_INVALIDDATA;
1523  }
1524  }
1525  for (int n = 0; n < nb_samples / 2; n++) {
1526  int byte[2];
1527 
1528  byte[0] = bytestream2_get_byteu(&gb);
1529  if (st)
1530  byte[1] = bytestream2_get_byteu(&gb);
1531  for (int channel = 0; channel < channels; channel++) {
1532  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1533  }
1534  for (int channel = 0; channel < channels; channel++) {
1535  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1536  }
1537  }
1538  ) /* End of CASE */
1539  CASE(ADPCM_IMA_WS,
1540  if (c->vqa_version == 3) {
1541  for (int channel = 0; channel < channels; channel++) {
1542  int16_t *smp = samples_p[channel];
1543 
1544  for (int n = nb_samples / 2; n > 0; n--) {
1545  int v = bytestream2_get_byteu(&gb);
1546  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1547  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1548  }
1549  }
1550  } else {
1551  for (int n = nb_samples / 2; n > 0; n--) {
1552  for (int channel = 0; channel < channels; channel++) {
1553  int v = bytestream2_get_byteu(&gb);
1554  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1555  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1556  }
1557  samples += channels;
1558  }
1559  }
1560  bytestream2_seek(&gb, 0, SEEK_END);
1561  ) /* End of CASE */
1562  CASE(ADPCM_XMD,
1563  int bytes_remaining, block = 0;
1564  while (bytestream2_get_bytes_left(&gb) >= 21 * channels) {
1565  for (int channel = 0; channel < channels; channel++) {
1566  int16_t *out = samples_p[channel] + block * 32;
1567  int16_t history[2];
1568  uint16_t scale;
1569 
1570  history[1] = sign_extend(bytestream2_get_le16(&gb), 16);
1571  history[0] = sign_extend(bytestream2_get_le16(&gb), 16);
1572  scale = bytestream2_get_le16(&gb);
1573 
1574  out[0] = history[1];
1575  out[1] = history[0];
1576 
1577  for (int n = 0; n < 15; n++) {
1578  unsigned byte = bytestream2_get_byte(&gb);
1579  int32_t nibble[2];
1580 
1581  nibble[0] = sign_extend(byte & 15, 4);
1582  nibble[1] = sign_extend(byte >> 4, 4);
1583 
1584  out[2+n*2] = nibble[0]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1585  history[1] = history[0];
1586  history[0] = out[2+n*2];
1587 
1588  out[2+n*2+1] = nibble[1]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1589  history[1] = history[0];
1590  history[0] = out[2+n*2+1];
1591  }
1592  }
1593 
1594  block++;
1595  }
1596  bytes_remaining = bytestream2_get_bytes_left(&gb);
1597  if (bytes_remaining > 0) {
1598  bytestream2_skip(&gb, bytes_remaining);
1599  }
1600  ) /* End of CASE */
1601  CASE(ADPCM_XA,
1602  int16_t *out0 = samples_p[0];
1603  int16_t *out1 = samples_p[1];
1604  int samples_per_block = 28 * (3 - channels) * 4;
1605  int sample_offset = 0;
1606  int bytes_remaining;
1607  while (bytestream2_get_bytes_left(&gb) >= 128) {
1608  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1609  &c->status[0], &c->status[1],
1610  channels, sample_offset)) < 0)
1611  return ret;
1612  bytestream2_skipu(&gb, 128);
1613  sample_offset += samples_per_block;
1614  }
1615  /* Less than a full block of data left, e.g. when reading from
1616  * 2324 byte per sector XA; the remainder is padding */
1617  bytes_remaining = bytestream2_get_bytes_left(&gb);
1618  if (bytes_remaining > 0) {
1619  bytestream2_skip(&gb, bytes_remaining);
1620  }
1621  ) /* End of CASE */
1622  CASE(ADPCM_IMA_EA_EACS,
1623  for (int i = 0; i <= st; i++) {
1624  c->status[i].step_index = bytestream2_get_le32u(&gb);
1625  if (c->status[i].step_index > 88u) {
1626  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1627  i, c->status[i].step_index);
1628  return AVERROR_INVALIDDATA;
1629  }
1630  }
1631  for (int i = 0; i <= st; i++) {
1632  c->status[i].predictor = bytestream2_get_le32u(&gb);
1633  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1634  return AVERROR_INVALIDDATA;
1635  }
1636 
1637  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1638  int byte = bytestream2_get_byteu(&gb);
1639  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1640  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1641  }
1642  ) /* End of CASE */
1643  CASE(ADPCM_IMA_EA_SEAD,
1644  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1645  int byte = bytestream2_get_byteu(&gb);
1646  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1647  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1648  }
1649  ) /* End of CASE */
1650  CASE(ADPCM_EA,
1651  int previous_left_sample, previous_right_sample;
1652  int current_left_sample, current_right_sample;
1653  int next_left_sample, next_right_sample;
1654  int coeff1l, coeff2l, coeff1r, coeff2r;
1655  int shift_left, shift_right;
1656 
1657  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte (stereo) or 15-byte (mono) pieces,
1658  each coding 28 stereo/mono samples. */
1659 
1660  if (channels != 2 && channels != 1)
1661  return AVERROR_INVALIDDATA;
1662 
1663  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1664  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1665  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1666  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1667 
1668  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
1669  int byte = bytestream2_get_byteu(&gb);
1670  coeff1l = ea_adpcm_table[ byte >> 4 ];
1671  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1672  coeff1r = ea_adpcm_table[ byte & 0x0F];
1673  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1674 
1675  if (channels == 2){
1676  byte = bytestream2_get_byteu(&gb);
1677  shift_left = 20 - (byte >> 4);
1678  shift_right = 20 - (byte & 0x0F);
1679  } else{
1680  /* Mono packs the shift into the coefficient byte's lower nibble instead */
1681  shift_left = 20 - (byte & 0x0F);
1682  }
1683 
1684  for (int count2 = 0; count2 < (channels == 2 ? 28 : 14); count2++) {
1685  byte = bytestream2_get_byteu(&gb);
1686  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1687  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1688 
1689  next_left_sample = (next_left_sample +
1690  (current_left_sample * coeff1l) +
1691  (previous_left_sample * coeff2l) + 0x80) >> 8;
1692  next_right_sample = (next_right_sample +
1693  (current_right_sample * coeff1r) +
1694  (previous_right_sample * coeff2r) + 0x80) >> 8;
1695 
1696  previous_left_sample = current_left_sample;
1697  current_left_sample = av_clip_int16(next_left_sample);
1698  previous_right_sample = current_right_sample;
1699  current_right_sample = av_clip_int16(next_right_sample);
1700  *samples++ = current_left_sample;
1701 
1702  if (channels == 2){
1703  *samples++ = current_right_sample;
1704  } else {
1705  next_left_sample = sign_extend(byte, 4) * (1 << shift_left);
1706 
1707  next_left_sample = (next_left_sample +
1708  (current_left_sample * coeff1l) +
1709  (previous_left_sample * coeff2l) + 0x80) >> 8;
1710 
1711  previous_left_sample = current_left_sample;
1712  current_left_sample = av_clip_int16(next_left_sample);
1713 
1714  *samples++ = current_left_sample;
1715  }
1716  }
1717  }
1718  bytestream2_skip(&gb, channels == 2 ? 2 : 3); // Skip terminating NULs
1719  ) /* End of CASE */
1720  CASE(ADPCM_EA_MAXIS_XA,
1721  int coeff[2][2], shift[2];
1722 
1723  for (int channel = 0; channel < channels; channel++) {
1724  int byte = bytestream2_get_byteu(&gb);
1725  for (int i = 0; i < 2; i++)
1726  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1727  shift[channel] = 20 - (byte & 0x0F);
1728  }
1729  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
1730  int byte[2];
1731 
1732  byte[0] = bytestream2_get_byteu(&gb);
1733  if (st) byte[1] = bytestream2_get_byteu(&gb);
1734  for (int i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1735  for (int channel = 0; channel < channels; channel++) {
1736  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1737  sample = (sample +
1738  c->status[channel].sample1 * coeff[channel][0] +
1739  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1740  c->status[channel].sample2 = c->status[channel].sample1;
1741  c->status[channel].sample1 = av_clip_int16(sample);
1742  *samples++ = c->status[channel].sample1;
1743  }
1744  }
1745  }
1746  bytestream2_seek(&gb, 0, SEEK_END);
1747  ) /* End of CASE */
1748 #if CONFIG_ADPCM_EA_R1_DECODER || CONFIG_ADPCM_EA_R2_DECODER || CONFIG_ADPCM_EA_R3_DECODER
1751  case AV_CODEC_ID_ADPCM_EA_R3: {
1752  /* channel numbering
1753  2chan: 0=fl, 1=fr
1754  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1755  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1756  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1757  int previous_sample, current_sample, next_sample;
1758  int coeff1, coeff2;
1759  int shift;
1760  uint16_t *samplesC;
1761  int count = 0;
1762  int offsets[6];
1763 
1764  for (unsigned channel = 0; channel < channels; channel++)
1765  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1766  bytestream2_get_le32(&gb)) +
1767  (channels + 1) * 4;
1768 
1769  for (unsigned channel = 0; channel < channels; channel++) {
1770  int count1;
1771 
1772  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1773  samplesC = samples_p[channel];
1774 
1775  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1776  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1777  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1778  } else {
1779  current_sample = c->status[channel].predictor;
1780  previous_sample = c->status[channel].prev_sample;
1781  }
1782 
1783  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1784  int byte = bytestream2_get_byte(&gb);
1785  if (byte == 0xEE) { /* only seen in R2 and R3 */
1786  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1787  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1788 
1789  for (int count2 = 0; count2 < 28; count2++)
1790  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1791  } else {
1792  coeff1 = ea_adpcm_table[ byte >> 4 ];
1793  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1794  shift = 20 - (byte & 0x0F);
1795 
1796  for (int count2 = 0; count2 < 28; count2++) {
1797  if (count2 & 1)
1798  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1799  else {
1800  byte = bytestream2_get_byte(&gb);
1801  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1802  }
1803 
1804  next_sample += (current_sample * coeff1) +
1805  (previous_sample * coeff2);
1806  next_sample = av_clip_int16(next_sample >> 8);
1807 
1808  previous_sample = current_sample;
1809  current_sample = next_sample;
1810  *samplesC++ = current_sample;
1811  }
1812  }
1813  }
1814  if (!count) {
1815  count = count1;
1816  } else if (count != count1) {
1817  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1818  count = FFMAX(count, count1);
1819  }
1820 
1821  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1822  c->status[channel].predictor = current_sample;
1823  c->status[channel].prev_sample = previous_sample;
1824  }
1825  }
1826 
1827  frame->nb_samples = count * 28;
1828  bytestream2_seek(&gb, 0, SEEK_END);
1829  break;
1830  }
1831 #endif /* CONFIG_ADPCM_EA_Rx_DECODER */
1832  CASE(ADPCM_EA_XAS,
1833  for (int channel=0; channel < channels; channel++) {
1834  int coeff[2][4], shift[4];
1835  int16_t *s = samples_p[channel];
1836  for (int n = 0; n < 4; n++, s += 32) {
1837  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1838  for (int i = 0; i < 2; i++)
1839  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1840  s[0] = val & ~0x0F;
1841 
1842  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1843  shift[n] = 20 - (val & 0x0F);
1844  s[1] = val & ~0x0F;
1845  }
1846 
1847  for (int m = 2; m < 32; m += 2) {
1848  s = &samples_p[channel][m];
1849  for (int n = 0; n < 4; n++, s += 32) {
1850  int level, pred;
1851  int byte = bytestream2_get_byteu(&gb);
1852 
1853  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1854  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1855  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1856 
1857  level = sign_extend(byte, 4) * (1 << shift[n]);
1858  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1859  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1860  }
1861  }
1862  }
1863  ) /* End of CASE */
1864  CASE(ADPCM_IMA_ACORN,
1865  for (int channel = 0; channel < channels; channel++) {
1866  ADPCMChannelStatus *cs = &c->status[channel];
1867  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1868  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
1869  if (cs->step_index > 88u){
1870  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1871  channel, cs->step_index);
1872  return AVERROR_INVALIDDATA;
1873  }
1874  }
1875  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1876  int byte = bytestream2_get_byteu(&gb);
1877  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
1878  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
1879  }
1880  ) /* End of CASE */
1881  CASE(ADPCM_IMA_AMV,
1882  av_assert0(channels == 1);
1883 
1884  /*
1885  * Header format:
1886  * int16_t predictor;
1887  * uint8_t step_index;
1888  * uint8_t reserved;
1889  * uint32_t frame_size;
1890  *
1891  * Some implementations have step_index as 16-bits, but others
1892  * only use the lower 8 and store garbage in the upper 8.
1893  */
1894  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1895  c->status[0].step_index = bytestream2_get_byteu(&gb);
1896  bytestream2_skipu(&gb, 5);
1897  if (c->status[0].step_index > 88u) {
1898  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1899  c->status[0].step_index);
1900  return AVERROR_INVALIDDATA;
1901  }
1902 
1903  for (int n = nb_samples >> 1; n > 0; n--) {
1904  int v = bytestream2_get_byteu(&gb);
1905 
1906  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1907  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1908  }
1909 
1910  if (nb_samples & 1) {
1911  int v = bytestream2_get_byteu(&gb);
1912  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1913 
1914  if (v & 0x0F) {
1915  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1916  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1917  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1918  }
1919  }
1920  ) /* End of CASE */
1921  CASE(ADPCM_IMA_SMJPEG,
1922  for (int i = 0; i < channels; i++) {
1923  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1924  c->status[i].step_index = bytestream2_get_byteu(&gb);
1925  bytestream2_skipu(&gb, 1);
1926  if (c->status[i].step_index > 88u) {
1927  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1928  c->status[i].step_index);
1929  return AVERROR_INVALIDDATA;
1930  }
1931  }
1932 
1933  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1934  int v = bytestream2_get_byteu(&gb);
1935 
1936  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1937  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1938  }
1939  ) /* End of CASE */
1940  CASE(ADPCM_CT,
1941  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1942  int v = bytestream2_get_byteu(&gb);
1943  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1944  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1945  }
1946  ) /* End of CASE */
1947 #if CONFIG_ADPCM_SBPRO_2_DECODER || CONFIG_ADPCM_SBPRO_3_DECODER || \
1948  CONFIG_ADPCM_SBPRO_4_DECODER
1952  if (!c->status[0].step_index) {
1953  /* the first byte is a raw sample */
1954  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1955  if (st)
1956  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1957  c->status[0].step_index = 1;
1958  nb_samples--;
1959  }
1960  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1961  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1962  int byte = bytestream2_get_byteu(&gb);
1963  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1964  byte >> 4, 4, 0);
1965  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1966  byte & 0x0F, 4, 0);
1967  }
1968  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1969  for (int n = (nb_samples<<st) / 3; n > 0; n--) {
1970  int byte = bytestream2_get_byteu(&gb);
1971  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1972  byte >> 5 , 3, 0);
1973  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1974  (byte >> 2) & 0x07, 3, 0);
1975  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1976  byte & 0x03, 2, 0);
1977  }
1978  } else {
1979  for (int n = nb_samples >> (2 - st); n > 0; n--) {
1980  int byte = bytestream2_get_byteu(&gb);
1981  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1982  byte >> 6 , 2, 2);
1983  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1984  (byte >> 4) & 0x03, 2, 2);
1985  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1986  (byte >> 2) & 0x03, 2, 2);
1987  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1988  byte & 0x03, 2, 2);
1989  }
1990  }
1991  break;
1992 #endif /* CONFIG_ADPCM_SBPRO_x_DECODER */
1993  CASE(ADPCM_SWF,
1994  adpcm_swf_decode(avctx, buf, buf_size, samples);
1995  bytestream2_seek(&gb, 0, SEEK_END);
1996  ) /* End of CASE */
1997  CASE(ADPCM_YAMAHA,
1998  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1999  int v = bytestream2_get_byteu(&gb);
2000  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
2001  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
2002  }
2003  ) /* End of CASE */
2004  CASE(ADPCM_AICA,
2005  for (int channel = 0; channel < channels; channel++) {
2006  samples = samples_p[channel];
2007  for (int n = nb_samples >> 1; n > 0; n--) {
2008  int v = bytestream2_get_byteu(&gb);
2009  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
2010  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
2011  }
2012  }
2013  ) /* End of CASE */
2014  CASE(ADPCM_AFC,
2015  int samples_per_block;
2016  int blocks;
2017 
2018  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
2019  samples_per_block = avctx->extradata[0] / 16;
2020  blocks = nb_samples / avctx->extradata[0];
2021  } else {
2022  samples_per_block = nb_samples / 16;
2023  blocks = 1;
2024  }
2025 
2026  for (int m = 0; m < blocks; m++) {
2027  for (int channel = 0; channel < channels; channel++) {
2028  int prev1 = c->status[channel].sample1;
2029  int prev2 = c->status[channel].sample2;
2030 
2031  samples = samples_p[channel] + m * 16;
2032  /* Read in every sample for this channel. */
2033  for (int i = 0; i < samples_per_block; i++) {
2034  int byte = bytestream2_get_byteu(&gb);
2035  int scale = 1 << (byte >> 4);
2036  int index = byte & 0xf;
2037  int factor1 = afc_coeffs[0][index];
2038  int factor2 = afc_coeffs[1][index];
2039 
2040  /* Decode 16 samples. */
2041  for (int n = 0; n < 16; n++) {
2042  int32_t sampledat;
2043 
2044  if (n & 1) {
2045  sampledat = sign_extend(byte, 4);
2046  } else {
2047  byte = bytestream2_get_byteu(&gb);
2048  sampledat = sign_extend(byte >> 4, 4);
2049  }
2050 
2051  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
2052  sampledat * scale;
2053  *samples = av_clip_int16(sampledat);
2054  prev2 = prev1;
2055  prev1 = *samples++;
2056  }
2057  }
2058 
2059  c->status[channel].sample1 = prev1;
2060  c->status[channel].sample2 = prev2;
2061  }
2062  }
2063  bytestream2_seek(&gb, 0, SEEK_END);
2064  ) /* End of CASE */
2065 #if CONFIG_ADPCM_THP_DECODER || CONFIG_ADPCM_THP_LE_DECODER
2066  case AV_CODEC_ID_ADPCM_THP:
2068  {
2069  int table[14][16];
2070 
2071 #define THP_GET16(g) \
2072  sign_extend( \
2073  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
2074  bytestream2_get_le16u(&(g)) : \
2075  bytestream2_get_be16u(&(g)), 16)
2076 
2077  if (avctx->extradata) {
2078  GetByteContext tb;
2079  if (avctx->extradata_size < 32 * channels) {
2080  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2081  return AVERROR_INVALIDDATA;
2082  }
2083 
2084  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
2085  for (int i = 0; i < channels; i++)
2086  for (int n = 0; n < 16; n++)
2087  table[i][n] = THP_GET16(tb);
2088  } else {
2089  for (int i = 0; i < channels; i++)
2090  for (int n = 0; n < 16; n++)
2091  table[i][n] = THP_GET16(gb);
2092 
2093  if (!c->has_status) {
2094  /* Initialize the previous sample. */
2095  for (int i = 0; i < channels; i++) {
2096  c->status[i].sample1 = THP_GET16(gb);
2097  c->status[i].sample2 = THP_GET16(gb);
2098  }
2099  c->has_status = 1;
2100  } else {
2101  bytestream2_skip(&gb, channels * 4);
2102  }
2103  }
2104 
2105  for (int ch = 0; ch < channels; ch++) {
2106  samples = samples_p[ch];
2107 
2108  /* Read in every sample for this channel. */
2109  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2110  int byte = bytestream2_get_byteu(&gb);
2111  int index = (byte >> 4) & 7;
2112  unsigned int exp = byte & 0x0F;
2113  int64_t factor1 = table[ch][index * 2];
2114  int64_t factor2 = table[ch][index * 2 + 1];
2115 
2116  /* Decode 14 samples. */
2117  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2118  int32_t sampledat;
2119 
2120  if (n & 1) {
2121  sampledat = sign_extend(byte, 4);
2122  } else {
2123  byte = bytestream2_get_byteu(&gb);
2124  sampledat = sign_extend(byte >> 4, 4);
2125  }
2126 
2127  sampledat = ((c->status[ch].sample1 * factor1
2128  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
2129  *samples = av_clip_int16(sampledat);
2130  c->status[ch].sample2 = c->status[ch].sample1;
2131  c->status[ch].sample1 = *samples++;
2132  }
2133  }
2134  }
2135  break;
2136  }
2137 #endif /* CONFIG_ADPCM_THP(_LE)_DECODER */
2138  CASE(ADPCM_DTK,
2139  for (int channel = 0; channel < channels; channel++) {
2140  samples = samples_p[channel];
2141 
2142  /* Read in every sample for this channel. */
2143  for (int i = 0; i < nb_samples / 28; i++) {
2144  int byte, header;
2145  if (channel)
2146  bytestream2_skipu(&gb, 1);
2147  header = bytestream2_get_byteu(&gb);
2148  bytestream2_skipu(&gb, 3 - channel);
2149 
2150  /* Decode 28 samples. */
2151  for (int n = 0; n < 28; n++) {
2152  int32_t sampledat, prev;
2153 
2154  switch (header >> 4) {
2155  case 1:
2156  prev = (c->status[channel].sample1 * 0x3c);
2157  break;
2158  case 2:
2159  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
2160  break;
2161  case 3:
2162  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
2163  break;
2164  default:
2165  prev = 0;
2166  }
2167 
2168  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2169 
2170  byte = bytestream2_get_byteu(&gb);
2171  if (!channel)
2172  sampledat = sign_extend(byte, 4);
2173  else
2174  sampledat = sign_extend(byte >> 4, 4);
2175 
2176  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2177  *samples++ = av_clip_int16(sampledat >> 6);
2178  c->status[channel].sample2 = c->status[channel].sample1;
2179  c->status[channel].sample1 = sampledat;
2180  }
2181  }
2182  if (!channel)
2183  bytestream2_seek(&gb, 0, SEEK_SET);
2184  }
2185  ) /* End of CASE */
2186  CASE(ADPCM_PSX,
2187  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * channels); block++) {
2188  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * channels) / (16 * channels);
2189  for (int channel = 0; channel < channels; channel++) {
2190  samples = samples_p[channel] + block * nb_samples_per_block;
2191  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2192 
2193  /* Read in every sample for this channel. */
2194  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2195  int filter, shift, flag, byte;
2196 
2197  filter = bytestream2_get_byteu(&gb);
2198  shift = filter & 0xf;
2199  filter = filter >> 4;
2201  return AVERROR_INVALIDDATA;
2202  flag = bytestream2_get_byteu(&gb) & 0x7;
2203 
2204  /* Decode 28 samples. */
2205  for (int n = 0; n < 28; n++) {
2206  int sample = 0, scale;
2207 
2208  if (n & 1) {
2209  scale = sign_extend(byte >> 4, 4);
2210  } else {
2211  byte = bytestream2_get_byteu(&gb);
2212  scale = sign_extend(byte, 4);
2213  }
2214 
2215  if (flag < 0x07) {
2216  scale = scale * (1 << 12);
2217  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2218  }
2220  c->status[channel].sample2 = c->status[channel].sample1;
2221  c->status[channel].sample1 = sample;
2222  }
2223  }
2224  }
2225  }
2226  ) /* End of CASE */
2227  CASE(ADPCM_ARGO,
2228  /*
2229  * The format of each block:
2230  * uint8_t left_control;
2231  * uint4_t left_samples[nb_samples];
2232  * ---- and if stereo ----
2233  * uint8_t right_control;
2234  * uint4_t right_samples[nb_samples];
2235  *
2236  * Format of the control byte:
2237  * MSB [SSSSRDRR] LSB
2238  * S = (Shift Amount - 2)
2239  * D = Decoder flag.
2240  * R = Reserved
2241  *
2242  * Each block relies on the previous two samples of each channel.
2243  * They should be 0 initially.
2244  */
2245  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2246  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
2247  ADPCMChannelStatus *cs = c->status + channel;
2248  int control, shift;
2249 
2250  samples = samples_p[channel] + block * 32;
2251 
2252  /* Get the control byte and decode the samples, 2 at a time. */
2253  control = bytestream2_get_byteu(&gb);
2254  shift = (control >> 4) + 2;
2255 
2256  for (int n = 0; n < 16; n++) {
2257  int sample = bytestream2_get_byteu(&gb);
2258  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2259  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2260  }
2261  }
2262  }
2263  ) /* End of CASE */
2264  CASE(ADPCM_ZORK,
2265  for (int n = 0; n < nb_samples * channels; n++) {
2266  int v = bytestream2_get_byteu(&gb);
2267  *samples++ = adpcm_zork_expand_nibble(&c->status[n % channels], v);
2268  }
2269  ) /* End of CASE */
2270  CASE(ADPCM_IMA_MTF,
2271  for (int n = nb_samples / 2; n > 0; n--) {
2272  for (int channel = 0; channel < channels; channel++) {
2273  int v = bytestream2_get_byteu(&gb);
2274  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2275  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2276  }
2277  samples += channels;
2278  }
2279  ) /* End of CASE */
2280  default:
2281  av_assert0(0); // unsupported codec_id should not happen
2282  }
2283 
2284  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2285  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2286  return AVERROR_INVALIDDATA;
2287  }
2288 
2289  *got_frame_ptr = 1;
2290 
2291  if (avpkt->size < bytestream2_tell(&gb)) {
2292  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2293  return avpkt->size;
2294  }
2295 
2296  return bytestream2_tell(&gb);
2297 }
2298 
2299 static void adpcm_flush(AVCodecContext *avctx)
2300 {
2301  ADPCMDecodeContext *c = avctx->priv_data;
2302 
2303  /* Just nuke the entire state and re-init. */
2304  memset(c, 0, sizeof(ADPCMDecodeContext));
2305 
2306  switch(avctx->codec_id) {
2307  case AV_CODEC_ID_ADPCM_CT:
2308  c->status[0].step = c->status[1].step = 511;
2309  break;
2310 
2312  if (avctx->extradata && avctx->extradata_size >= 8) {
2313  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2314  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2315  }
2316  break;
2317 
2319  if (avctx->extradata && avctx->extradata_size >= 28) {
2320  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2321  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2322  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2323  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2324  }
2325  break;
2326 
2328  if (avctx->extradata && avctx->extradata_size >= 2)
2329  c->vqa_version = AV_RL16(avctx->extradata);
2330  break;
2331  default:
2332  /* Other codecs may want to handle this during decoding. */
2333  c->has_status = 0;
2334  return;
2335  }
2336 
2337  c->has_status = 1;
2338 }
2339 
2340 
2348 
2349 #define ADPCM_DECODER_0(id_, sample_fmts_, name_, long_name_)
2350 #define ADPCM_DECODER_1(id_, sample_fmts_, name_, long_name_) \
2351 const FFCodec ff_ ## name_ ## _decoder = { \
2352  .p.name = #name_, \
2353  CODEC_LONG_NAME(long_name_), \
2354  .p.type = AVMEDIA_TYPE_AUDIO, \
2355  .p.id = id_, \
2356  .p.capabilities = AV_CODEC_CAP_DR1, \
2357  .p.sample_fmts = sample_fmts_, \
2358  .priv_data_size = sizeof(ADPCMDecodeContext), \
2359  .init = adpcm_decode_init, \
2360  FF_CODEC_DECODE_CB(adpcm_decode_frame), \
2361  .flush = adpcm_flush, \
2362 };
2363 #define ADPCM_DECODER_2(enabled, codec_id, name, sample_fmts, long_name) \
2364  ADPCM_DECODER_ ## enabled(codec_id, name, sample_fmts, long_name)
2365 #define ADPCM_DECODER_3(config, codec_id, name, sample_fmts, long_name) \
2366  ADPCM_DECODER_2(config, codec_id, name, sample_fmts, long_name)
2367 #define ADPCM_DECODER(codec, name, sample_fmts, long_name) \
2368  ADPCM_DECODER_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, \
2369  name, sample_fmts, long_name)
2370 
2371 /* Note: Do not forget to add new entries to the Makefile as well. */
2372 ADPCM_DECODER(ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie")
2373 ADPCM_DECODER(ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC")
2374 ADPCM_DECODER(ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie")
2375 ADPCM_DECODER(ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA")
2376 ADPCM_DECODER(ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games")
2377 ADPCM_DECODER(ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology")
2378 ADPCM_DECODER(ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK")
2379 ADPCM_DECODER(ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts")
2380 ADPCM_DECODER(ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA")
2381 ADPCM_DECODER(ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1")
2382 ADPCM_DECODER(ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2")
2383 ADPCM_DECODER(ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3")
2384 ADPCM_DECODER(ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS")
2385 ADPCM_DECODER(ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay")
2386 ADPCM_DECODER(ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV")
2387 ADPCM_DECODER(ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC")
2388 ADPCM_DECODER(ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM")
2389 ADPCM_DECODER(ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments")
2390 ADPCM_DECODER(ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4")
2391 ADPCM_DECODER(ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3")
2392 ADPCM_DECODER(ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4")
2393 ADPCM_DECODER(ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS")
2394 ADPCM_DECODER(ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD")
2395 ADPCM_DECODER(ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS")
2396 ADPCM_DECODER(ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX")
2397 ADPCM_DECODER(ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework")
2398 ADPCM_DECODER(ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI")
2399 ADPCM_DECODER(ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime")
2400 ADPCM_DECODER(ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical")
2401 ADPCM_DECODER(ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive")
2402 ADPCM_DECODER(ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG")
2403 ADPCM_DECODER(ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP")
2404 ADPCM_DECODER(ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV")
2405 ADPCM_DECODER(ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood")
2406 ADPCM_DECODER(ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft")
2407 ADPCM_DECODER(ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF")
2408 ADPCM_DECODER(ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation")
2409 ADPCM_DECODER(ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit")
2410 ADPCM_DECODER(ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit")
2411 ADPCM_DECODER(ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit")
2412 ADPCM_DECODER(ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash")
2413 ADPCM_DECODER(ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)")
2414 ADPCM_DECODER(ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP")
2415 ADPCM_DECODER(ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA")
2416 ADPCM_DECODER(ADPCM_XMD, sample_fmts_s16p, adpcm_xmd, "ADPCM Konami XMD")
2417 ADPCM_DECODER(ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha")
2418 ADPCM_DECODER(ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork")
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:373
adpcm_index_table5
static const int8_t adpcm_index_table5[32]
Definition: adpcm.c:137
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:367
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
out
FILE * out
Definition: movenc.c:55
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:400
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
R3
#define R3
Definition: simple_idct.c:173
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:232
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
int64_t
long long int64_t
Definition: coverity.c:34
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:415
AVPacket::data
uint8_t * data
Definition: packet.h:533
table
static const uint16_t table[]
Definition: prosumer.c:203
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:388
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:405
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:399
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:508
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
R1
#define R1
Definition: simple_idct.c:171
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
AV_CODEC_ID_ADPCM_XMD
@ AV_CODEC_ID_ADPCM_XMD
Definition: codec_id.h:418
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:403
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:595
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:379
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1065
GetBitContext
Definition: get_bits.h:108
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:449
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:400
val
static double val(void *priv, double ch)
Definition: aeval.c:78
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2299
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:384
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2343
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:426
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:615
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:417
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:638
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:398
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:390
g
const char * g
Definition: vf_curves.c:128
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:369
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:396
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:356
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:394
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:372
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:534
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2341
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:392
av_clip_int16
#define av_clip_int16
Definition: common.h:115
NULL
#define NULL
Definition: coverity.c:32
ADPCM_DECODER
#define ADPCM_DECODER(codec, name, sample_fmts, long_name)
Definition: adpcm.c:2367
bits_left
#define bits_left
Definition: bitstream.h:114
av_clip_intp2
#define av_clip_intp2
Definition: common.h:121
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:381
oki_step_table
static const int16_t oki_step_table[49]
Definition: adpcm.c:216
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:371
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:391
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:409
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:370
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:386
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:94
ima_cunning_index_table
static const int8_t ima_cunning_index_table[9]
Definition: adpcm.c:108
exp
int8_t exp
Definition: eval.c:73
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:375
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:574
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:553
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:1071
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:411
afc_coeffs
static const int16_t afc_coeffs[2][16]
Definition: adpcm.c:89
ADPCMDecodeContext
Definition: adpcm.c:243
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1575
AVPacket::size
int size
Definition: packet.h:534
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:401
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:465
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:413
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1877
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1057
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:847
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
R2
#define R2
Definition: simple_idct.c:172
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:380
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:68
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:169
av_zero_extend
#define av_zero_extend
Definition: common.h:151
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:673
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
adpcm_index_table3
static const int8_t adpcm_index_table3[8]
Definition: adpcm.c:132
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
flag
#define flag(name)
Definition: cbs_av1.c:474
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1568
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2345
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:407
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:393
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
adpcm_index_tables
static const int8_t *const adpcm_index_tables[4]
Definition: adpcm.c:142
MT
#define MT(...)
Definition: codec_desc.c:32
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:598
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:412
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:245
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:406
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:819
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:81
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:377
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:414
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1083
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:485
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:408
mtaf_stepsize
static const int16_t mtaf_stepsize[32][16]
Definition: adpcm.c:149
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:387
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:389
temp
else temp
Definition: vf_mcdeint.c:263
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:385
adpcm_index_table2
static const int8_t adpcm_index_table2[4]
Definition: adpcm.c:127
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:382
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:761
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:410
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:251
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:246
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:416
AVPacket
This structure stores compressed data.
Definition: packet.h:510
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:368
d
d
Definition: ffmpeg_filter.c:424
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:374
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:356
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:404
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:629
CASE
#define CASE(codec,...)
Definition: adpcm.c:77
ima_cunning_step_table
static const int16_t ima_cunning_step_table[61]
Definition: adpcm.c:118
int
int
Definition: ffmpeg_filter.c:424
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:236
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:383
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:244
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:225