FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 
38 #include "config_components.h"
39 
40 #include "avcodec.h"
41 #include "get_bits.h"
42 #include "bytestream.h"
43 #include "adpcm.h"
44 #include "adpcm_data.h"
45 #include "codec_internal.h"
46 #include "internal.h"
47 
48 /**
49  * @file
50  * ADPCM decoders
51  * Features and limitations:
52  *
53  * Reference documents:
54  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
55  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
56  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
57  * http://openquicktime.sourceforge.net/
58  * XAnim sources (xa_codec.c) http://xanim.polter.net/
59  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
60  * SoX source code http://sox.sourceforge.net/
61  *
62  * CD-ROM XA:
63  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
64  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
65  * readstr http://www.geocities.co.jp/Playtown/2004/
66  */
67 
68 #define CASE_0(codec_id, ...)
69 #define CASE_1(codec_id, ...) \
70  case codec_id: \
71  { __VA_ARGS__ } \
72  break;
73 #define CASE_2(enabled, codec_id, ...) \
74  CASE_ ## enabled(codec_id, __VA_ARGS__)
75 #define CASE_3(config, codec_id, ...) \
76  CASE_2(config, codec_id, __VA_ARGS__)
77 #define CASE(codec, ...) \
78  CASE_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
79 
80 /* These are for CD-ROM XA ADPCM */
81 static const int8_t xa_adpcm_table[5][2] = {
82  { 0, 0 },
83  { 60, 0 },
84  { 115, -52 },
85  { 98, -55 },
86  { 122, -60 }
87 };
88 
89 static const int16_t afc_coeffs[2][16] = {
90  { 0, 2048, 0, 1024, 4096, 3584, 3072, 4608, 4200, 4800, 5120, 2048, 1024, -1024, -1024, -2048 },
91  { 0, 0, 2048, 1024, -2048, -1536, -1024, -2560, -2248, -2300, -3072, -2048, -1024, 1024, 0, 0 }
92 };
93 
94 static const int16_t ea_adpcm_table[] = {
95  0, 240, 460, 392,
96  0, 0, -208, -220,
97  0, 1, 3, 4,
98  7, 8, 10, 11,
99  0, -1, -3, -4
100 };
101 
102 /*
103  * Dumped from the binaries:
104  * - FantasticJourney.exe - 0x794D2, DGROUP:0x47A4D2
105  * - BigRaceUSA.exe - 0x9B8AA, DGROUP:0x49C4AA
106  * - Timeshock!.exe - 0x8506A, DGROUP:0x485C6A
107  */
108 static const int8_t ima_cunning_index_table[9] = {
109  -1, -1, -1, -1, 1, 2, 3, 4, -1
110 };
111 
112 /*
113  * Dumped from the binaries:
114  * - FantasticJourney.exe - 0x79458, DGROUP:0x47A458
115  * - BigRaceUSA.exe - 0x9B830, DGROUP:0x49C430
116  * - Timeshock!.exe - 0x84FF0, DGROUP:0x485BF0
117  */
118 static const int16_t ima_cunning_step_table[61] = {
119  1, 1, 1, 1, 2, 2, 3, 3, 4, 5,
120  6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
121  32, 40, 48, 56, 64, 80, 96, 112, 128, 160,
122  192, 224, 256, 320, 384, 448, 512, 640, 768, 896,
123  1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120,
124  6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 0
125 };
126 
127 static const int8_t adpcm_index_table2[4] = {
128  -1, 2,
129  -1, 2,
130 };
131 
132 static const int8_t adpcm_index_table3[8] = {
133  -1, -1, 1, 2,
134  -1, -1, 1, 2,
135 };
136 
137 static const int8_t adpcm_index_table5[32] = {
138  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
139  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
140 };
141 
142 static const int8_t * const adpcm_index_tables[4] = {
143  &adpcm_index_table2[0],
144  &adpcm_index_table3[0],
146  &adpcm_index_table5[0],
147 };
148 
149 static const int16_t mtaf_stepsize[32][16] = {
150  { 1, 5, 9, 13, 16, 20, 24, 28,
151  -1, -5, -9, -13, -16, -20, -24, -28, },
152  { 2, 6, 11, 15, 20, 24, 29, 33,
153  -2, -6, -11, -15, -20, -24, -29, -33, },
154  { 2, 7, 13, 18, 23, 28, 34, 39,
155  -2, -7, -13, -18, -23, -28, -34, -39, },
156  { 3, 9, 15, 21, 28, 34, 40, 46,
157  -3, -9, -15, -21, -28, -34, -40, -46, },
158  { 3, 11, 18, 26, 33, 41, 48, 56,
159  -3, -11, -18, -26, -33, -41, -48, -56, },
160  { 4, 13, 22, 31, 40, 49, 58, 67,
161  -4, -13, -22, -31, -40, -49, -58, -67, },
162  { 5, 16, 26, 37, 48, 59, 69, 80,
163  -5, -16, -26, -37, -48, -59, -69, -80, },
164  { 6, 19, 31, 44, 57, 70, 82, 95,
165  -6, -19, -31, -44, -57, -70, -82, -95, },
166  { 7, 22, 38, 53, 68, 83, 99, 114,
167  -7, -22, -38, -53, -68, -83, -99, -114, },
168  { 9, 27, 45, 63, 81, 99, 117, 135,
169  -9, -27, -45, -63, -81, -99, -117, -135, },
170  { 10, 32, 53, 75, 96, 118, 139, 161,
171  -10, -32, -53, -75, -96, -118, -139, -161, },
172  { 12, 38, 64, 90, 115, 141, 167, 193,
173  -12, -38, -64, -90, -115, -141, -167, -193, },
174  { 15, 45, 76, 106, 137, 167, 198, 228,
175  -15, -45, -76, -106, -137, -167, -198, -228, },
176  { 18, 54, 91, 127, 164, 200, 237, 273,
177  -18, -54, -91, -127, -164, -200, -237, -273, },
178  { 21, 65, 108, 152, 195, 239, 282, 326,
179  -21, -65, -108, -152, -195, -239, -282, -326, },
180  { 25, 77, 129, 181, 232, 284, 336, 388,
181  -25, -77, -129, -181, -232, -284, -336, -388, },
182  { 30, 92, 153, 215, 276, 338, 399, 461,
183  -30, -92, -153, -215, -276, -338, -399, -461, },
184  { 36, 109, 183, 256, 329, 402, 476, 549,
185  -36, -109, -183, -256, -329, -402, -476, -549, },
186  { 43, 130, 218, 305, 392, 479, 567, 654,
187  -43, -130, -218, -305, -392, -479, -567, -654, },
188  { 52, 156, 260, 364, 468, 572, 676, 780,
189  -52, -156, -260, -364, -468, -572, -676, -780, },
190  { 62, 186, 310, 434, 558, 682, 806, 930,
191  -62, -186, -310, -434, -558, -682, -806, -930, },
192  { 73, 221, 368, 516, 663, 811, 958, 1106,
193  -73, -221, -368, -516, -663, -811, -958, -1106, },
194  { 87, 263, 439, 615, 790, 966, 1142, 1318,
195  -87, -263, -439, -615, -790, -966, -1142, -1318, },
196  { 104, 314, 523, 733, 942, 1152, 1361, 1571,
197  -104, -314, -523, -733, -942, -1152, -1361, -1571, },
198  { 124, 374, 623, 873, 1122, 1372, 1621, 1871,
199  -124, -374, -623, -873, -1122, -1372, -1621, -1871, },
200  { 148, 445, 743, 1040, 1337, 1634, 1932, 2229,
201  -148, -445, -743, -1040, -1337, -1634, -1932, -2229, },
202  { 177, 531, 885, 1239, 1593, 1947, 2301, 2655,
203  -177, -531, -885, -1239, -1593, -1947, -2301, -2655, },
204  { 210, 632, 1053, 1475, 1896, 2318, 2739, 3161,
205  -210, -632, -1053, -1475, -1896, -2318, -2739, -3161, },
206  { 251, 753, 1255, 1757, 2260, 2762, 3264, 3766,
207  -251, -753, -1255, -1757, -2260, -2762, -3264, -3766, },
208  { 299, 897, 1495, 2093, 2692, 3290, 3888, 4486,
209  -299, -897, -1495, -2093, -2692, -3290, -3888, -4486, },
210  { 356, 1068, 1781, 2493, 3206, 3918, 4631, 5343,
211  -356, -1068, -1781, -2493, -3206, -3918, -4631, -5343, },
212  { 424, 1273, 2121, 2970, 3819, 4668, 5516, 6365,
213  -424, -1273, -2121, -2970, -3819, -4668, -5516, -6365, },
214 };
215 
216 static const int16_t oki_step_table[49] = {
217  16, 17, 19, 21, 23, 25, 28, 31, 34, 37,
218  41, 45, 50, 55, 60, 66, 73, 80, 88, 97,
219  107, 118, 130, 143, 157, 173, 190, 209, 230, 253,
220  279, 307, 337, 371, 408, 449, 494, 544, 598, 658,
221  724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552
222 };
223 
224 // padded to zero where table size is less then 16
225 static const int8_t swf_index_tables[4][16] = {
226  /*2*/ { -1, 2 },
227  /*3*/ { -1, -1, 2, 4 },
228  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
229  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
230 };
231 
232 static const int8_t zork_index_table[8] = {
233  -1, -1, -1, 1, 4, 7, 10, 12,
234 };
235 
236 static const int8_t mtf_index_table[16] = {
237  8, 6, 4, 2, -1, -1, -1, -1,
238  -1, -1, -1, -1, 2, 4, 6, 8,
239 };
240 
241 /* end of tables */
242 
243 typedef struct ADPCMDecodeContext {
245  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
246  int has_status; /**< Status flag. Reset to 0 after a flush. */
248 
249 static void adpcm_flush(AVCodecContext *avctx);
250 
252 {
253  ADPCMDecodeContext *c = avctx->priv_data;
254  unsigned int min_channels = 1;
255  unsigned int max_channels = 2;
256 
257  adpcm_flush(avctx);
258 
259  switch(avctx->codec->id) {
261  max_channels = 1;
262  break;
265  min_channels = 2;
266  break;
273  max_channels = 6;
274  break;
276  min_channels = 2;
277  max_channels = 8;
278  if (avctx->ch_layout.nb_channels & 1) {
279  avpriv_request_sample(avctx, "channel count %d", avctx->ch_layout.nb_channels);
280  return AVERROR_PATCHWELCOME;
281  }
282  break;
284  max_channels = 8;
285  if (avctx->ch_layout.nb_channels <= 0 ||
286  avctx->block_align % (16 * avctx->ch_layout.nb_channels))
287  return AVERROR_INVALIDDATA;
288  break;
292  max_channels = 14;
293  break;
294  }
295  if (avctx->ch_layout.nb_channels < min_channels ||
296  avctx->ch_layout.nb_channels > max_channels) {
297  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
298  return AVERROR(EINVAL);
299  }
300 
301  switch(avctx->codec->id) {
303  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
304  return AVERROR_INVALIDDATA;
305  break;
307  if (avctx->bits_per_coded_sample != 4 ||
308  avctx->block_align != 17 * avctx->ch_layout.nb_channels)
309  return AVERROR_INVALIDDATA;
310  break;
312  if (avctx->bits_per_coded_sample != 8)
313  return AVERROR_INVALIDDATA;
314  break;
315  default:
316  break;
317  }
318 
319  switch (avctx->codec->id) {
340  break;
342  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
344  break;
346  avctx->sample_fmt = avctx->ch_layout.nb_channels > 2 ? AV_SAMPLE_FMT_S16P :
348  break;
349  default:
350  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
351  }
352  return 0;
353 }
354 
355 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
356 {
357  int delta, pred, step, add;
358 
359  pred = c->predictor;
360  delta = nibble & 7;
361  step = c->step;
362  add = (delta * 2 + 1) * step;
363  if (add < 0)
364  add = add + 7;
365 
366  if ((nibble & 8) == 0)
367  pred = av_clip(pred + (add >> 3), -32767, 32767);
368  else
369  pred = av_clip(pred - (add >> 3), -32767, 32767);
370 
371  switch (delta) {
372  case 7:
373  step *= 0x99;
374  break;
375  case 6:
376  c->step = av_clip(c->step * 2, 127, 24576);
377  c->predictor = pred;
378  return pred;
379  case 5:
380  step *= 0x66;
381  break;
382  case 4:
383  step *= 0x4d;
384  break;
385  default:
386  step *= 0x39;
387  break;
388  }
389 
390  if (step < 0)
391  step += 0x3f;
392 
393  c->step = step >> 6;
394  c->step = av_clip(c->step, 127, 24576);
395  c->predictor = pred;
396  return pred;
397 }
398 
399 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
400 {
401  int step_index;
402  int predictor;
403  int sign, delta, diff, step;
404 
405  step = ff_adpcm_step_table[c->step_index];
406  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
407  step_index = av_clip(step_index, 0, 88);
408 
409  sign = nibble & 8;
410  delta = nibble & 7;
411  /* perform direct multiplication instead of series of jumps proposed by
412  * the reference ADPCM implementation since modern CPUs can do the mults
413  * quickly enough */
414  diff = ((2 * delta + 1) * step) >> shift;
415  predictor = c->predictor;
416  if (sign) predictor -= diff;
417  else predictor += diff;
418 
419  c->predictor = av_clip_int16(predictor);
420  c->step_index = step_index;
421 
422  return (int16_t)c->predictor;
423 }
424 
425 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
426 {
427  int step_index;
428  int predictor;
429  int sign, delta, diff, step;
430 
431  step = ff_adpcm_step_table[c->step_index];
432  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
433  step_index = av_clip(step_index, 0, 88);
434 
435  sign = nibble & 8;
436  delta = nibble & 7;
437  diff = (delta * step) >> shift;
438  predictor = c->predictor;
439  if (sign) predictor -= diff;
440  else predictor += diff;
441 
442  c->predictor = av_clip_int16(predictor);
443  c->step_index = step_index;
444 
445  return (int16_t)c->predictor;
446 }
447 
448 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
449 {
450  int step_index, step, delta, predictor;
451 
452  step = ff_adpcm_step_table[c->step_index];
453 
454  delta = step * (2 * nibble - 15);
455  predictor = c->predictor + delta;
456 
457  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
458  c->predictor = av_clip_int16(predictor >> 4);
459  c->step_index = av_clip(step_index, 0, 88);
460 
461  return (int16_t)c->predictor;
462 }
463 
464 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
465 {
466  int step_index;
467  int predictor;
468  int step;
469 
470  nibble = sign_extend(nibble & 0xF, 4);
471 
472  step = ima_cunning_step_table[c->step_index];
473  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
474  step_index = av_clip(step_index, 0, 60);
475 
476  predictor = c->predictor + step * nibble;
477 
478  c->predictor = av_clip_int16(predictor);
479  c->step_index = step_index;
480 
481  return c->predictor;
482 }
483 
485 {
486  int nibble, step_index, predictor, sign, delta, diff, step, shift;
487 
488  shift = bps - 1;
489  nibble = get_bits_le(gb, bps),
490  step = ff_adpcm_step_table[c->step_index];
491  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
492  step_index = av_clip(step_index, 0, 88);
493 
494  sign = nibble & (1 << shift);
495  delta = av_mod_uintp2(nibble, shift);
496  diff = ((2 * delta + 1) * step) >> shift;
497  predictor = c->predictor;
498  if (sign) predictor -= diff;
499  else predictor += diff;
500 
501  c->predictor = av_clip_int16(predictor);
502  c->step_index = step_index;
503 
504  return (int16_t)c->predictor;
505 }
506 
507 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
508 {
509  int step_index;
510  int predictor;
511  int diff, step;
512 
513  step = ff_adpcm_step_table[c->step_index];
514  step_index = c->step_index + ff_adpcm_index_table[nibble];
515  step_index = av_clip(step_index, 0, 88);
516 
517  diff = step >> 3;
518  if (nibble & 4) diff += step;
519  if (nibble & 2) diff += step >> 1;
520  if (nibble & 1) diff += step >> 2;
521 
522  if (nibble & 8)
523  predictor = c->predictor - diff;
524  else
525  predictor = c->predictor + diff;
526 
527  c->predictor = av_clip_int16(predictor);
528  c->step_index = step_index;
529 
530  return c->predictor;
531 }
532 
533 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
534 {
535  int predictor;
536 
537  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
538  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
539 
540  c->sample2 = c->sample1;
541  c->sample1 = av_clip_int16(predictor);
542  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
543  if (c->idelta < 16) c->idelta = 16;
544  if (c->idelta > INT_MAX/768) {
545  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
546  c->idelta = INT_MAX/768;
547  }
548 
549  return c->sample1;
550 }
551 
552 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
553 {
554  int step_index, predictor, sign, delta, diff, step;
555 
556  step = oki_step_table[c->step_index];
557  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
558  step_index = av_clip(step_index, 0, 48);
559 
560  sign = nibble & 8;
561  delta = nibble & 7;
562  diff = ((2 * delta + 1) * step) >> 3;
563  predictor = c->predictor;
564  if (sign) predictor -= diff;
565  else predictor += diff;
566 
567  c->predictor = av_clip_intp2(predictor, 11);
568  c->step_index = step_index;
569 
570  return c->predictor * 16;
571 }
572 
573 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
574 {
575  int sign, delta, diff;
576  int new_step;
577 
578  sign = nibble & 8;
579  delta = nibble & 7;
580  /* perform direct multiplication instead of series of jumps proposed by
581  * the reference ADPCM implementation since modern CPUs can do the mults
582  * quickly enough */
583  diff = ((2 * delta + 1) * c->step) >> 3;
584  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
585  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
586  c->predictor = av_clip_int16(c->predictor);
587  /* calculate new step and clamp it to range 511..32767 */
588  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
589  c->step = av_clip(new_step, 511, 32767);
590 
591  return (int16_t)c->predictor;
592 }
593 
594 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
595 {
596  int sign, delta, diff;
597 
598  sign = nibble & (1<<(size-1));
599  delta = nibble & ((1<<(size-1))-1);
600  diff = delta << (7 + c->step + shift);
601 
602  /* clamp result */
603  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
604 
605  /* calculate new step */
606  if (delta >= (2*size - 3) && c->step < 3)
607  c->step++;
608  else if (delta == 0 && c->step > 0)
609  c->step--;
610 
611  return (int16_t) c->predictor;
612 }
613 
614 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
615 {
616  if(!c->step) {
617  c->predictor = 0;
618  c->step = 127;
619  }
620 
621  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
622  c->predictor = av_clip_int16(c->predictor);
623  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
624  c->step = av_clip(c->step, 127, 24576);
625  return c->predictor;
626 }
627 
628 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
629 {
630  c->predictor += mtaf_stepsize[c->step][nibble];
631  c->predictor = av_clip_int16(c->predictor);
632  c->step += ff_adpcm_index_table[nibble];
633  c->step = av_clip_uintp2(c->step, 5);
634  return c->predictor;
635 }
636 
637 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
638 {
639  int16_t index = c->step_index;
640  uint32_t lookup_sample = ff_adpcm_step_table[index];
641  int32_t sample = 0;
642 
643  if (nibble & 0x40)
644  sample += lookup_sample;
645  if (nibble & 0x20)
646  sample += lookup_sample >> 1;
647  if (nibble & 0x10)
648  sample += lookup_sample >> 2;
649  if (nibble & 0x08)
650  sample += lookup_sample >> 3;
651  if (nibble & 0x04)
652  sample += lookup_sample >> 4;
653  if (nibble & 0x02)
654  sample += lookup_sample >> 5;
655  if (nibble & 0x01)
656  sample += lookup_sample >> 6;
657  if (nibble & 0x80)
658  sample = -sample;
659 
660  sample += c->predictor;
662 
663  index += zork_index_table[(nibble >> 4) & 7];
664  index = av_clip(index, 0, 88);
665 
666  c->predictor = sample;
667  c->step_index = index;
668 
669  return sample;
670 }
671 
672 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
673  const uint8_t *in, ADPCMChannelStatus *left,
674  ADPCMChannelStatus *right, int channels, int sample_offset)
675 {
676  int i, j;
677  int shift,filter,f0,f1;
678  int s_1,s_2;
679  int d,s,t;
680 
681  out0 += sample_offset;
682  if (channels == 1)
683  out1 = out0 + 28;
684  else
685  out1 += sample_offset;
686 
687  for(i=0;i<4;i++) {
688  shift = 12 - (in[4+i*2] & 15);
689  filter = in[4+i*2] >> 4;
691  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
692  filter=0;
693  }
694  if (shift < 0) {
695  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
696  shift = 0;
697  }
698  f0 = xa_adpcm_table[filter][0];
699  f1 = xa_adpcm_table[filter][1];
700 
701  s_1 = left->sample1;
702  s_2 = left->sample2;
703 
704  for(j=0;j<28;j++) {
705  d = in[16+i+j*4];
706 
707  t = sign_extend(d, 4);
708  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
709  s_2 = s_1;
710  s_1 = av_clip_int16(s);
711  out0[j] = s_1;
712  }
713 
714  if (channels == 2) {
715  left->sample1 = s_1;
716  left->sample2 = s_2;
717  s_1 = right->sample1;
718  s_2 = right->sample2;
719  }
720 
721  shift = 12 - (in[5+i*2] & 15);
722  filter = in[5+i*2] >> 4;
723  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
724  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
725  filter=0;
726  }
727  if (shift < 0) {
728  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
729  shift = 0;
730  }
731 
732  f0 = xa_adpcm_table[filter][0];
733  f1 = xa_adpcm_table[filter][1];
734 
735  for(j=0;j<28;j++) {
736  d = in[16+i+j*4];
737 
738  t = sign_extend(d >> 4, 4);
739  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
740  s_2 = s_1;
741  s_1 = av_clip_int16(s);
742  out1[j] = s_1;
743  }
744 
745  if (channels == 2) {
746  right->sample1 = s_1;
747  right->sample2 = s_2;
748  } else {
749  left->sample1 = s_1;
750  left->sample2 = s_2;
751  }
752 
753  out0 += 28 * (3 - channels);
754  out1 += 28 * (3 - channels);
755  }
756 
757  return 0;
758 }
759 
760 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
761 {
762  ADPCMDecodeContext *c = avctx->priv_data;
763  GetBitContext gb;
764  const int8_t *table;
765  int channels = avctx->ch_layout.nb_channels;
766  int k0, signmask, nb_bits, count;
767  int size = buf_size*8;
768  int i;
769 
770  init_get_bits(&gb, buf, size);
771 
772  //read bits & initial values
773  nb_bits = get_bits(&gb, 2)+2;
774  table = swf_index_tables[nb_bits-2];
775  k0 = 1 << (nb_bits-2);
776  signmask = 1 << (nb_bits-1);
777 
778  while (get_bits_count(&gb) <= size - 22 * channels) {
779  for (i = 0; i < channels; i++) {
780  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
781  c->status[i].step_index = get_bits(&gb, 6);
782  }
783 
784  for (count = 0; get_bits_count(&gb) <= size - nb_bits * channels && count < 4095; count++) {
785  int i;
786 
787  for (i = 0; i < channels; i++) {
788  // similar to IMA adpcm
789  int delta = get_bits(&gb, nb_bits);
790  int step = ff_adpcm_step_table[c->status[i].step_index];
791  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
792  int k = k0;
793 
794  do {
795  if (delta & k)
796  vpdiff += step;
797  step >>= 1;
798  k >>= 1;
799  } while(k);
800  vpdiff += step;
801 
802  if (delta & signmask)
803  c->status[i].predictor -= vpdiff;
804  else
805  c->status[i].predictor += vpdiff;
806 
807  c->status[i].step_index += table[delta & (~signmask)];
808 
809  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
810  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
811 
812  *samples++ = c->status[i].predictor;
813  }
814  }
815  }
816 }
817 
818 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
819 {
820  int sample = sign_extend(nibble, 4) * (1 << shift);
821 
822  if (flag)
823  sample += (8 * cs->sample1) - (4 * cs->sample2);
824  else
825  sample += 4 * cs->sample1;
826 
827  sample = av_clip_int16(sample >> 2);
828 
829  cs->sample2 = cs->sample1;
830  cs->sample1 = sample;
831 
832  return sample;
833 }
834 
835 /**
836  * Get the number of samples (per channel) that will be decoded from the packet.
837  * In one case, this is actually the maximum number of samples possible to
838  * decode with the given buf_size.
839  *
840  * @param[out] coded_samples set to the number of samples as coded in the
841  * packet, or 0 if the codec does not encode the
842  * number of samples in each frame.
843  * @param[out] approx_nb_samples set to non-zero if the number of samples
844  * returned is an approximation.
845  */
847  int buf_size, int *coded_samples, int *approx_nb_samples)
848 {
849  ADPCMDecodeContext *s = avctx->priv_data;
850  int nb_samples = 0;
851  int ch = avctx->ch_layout.nb_channels;
852  int has_coded_samples = 0;
853  int header_size;
854 
855  *coded_samples = 0;
856  *approx_nb_samples = 0;
857 
858  if(ch <= 0)
859  return 0;
860 
861  switch (avctx->codec->id) {
862  /* constant, only check buf_size */
864  if (buf_size < 76 * ch)
865  return 0;
866  nb_samples = 128;
867  break;
869  if (buf_size < 34 * ch)
870  return 0;
871  nb_samples = 64;
872  break;
873  /* simple 4-bit adpcm */
886  nb_samples = buf_size * 2 / ch;
887  break;
888  }
889  if (nb_samples)
890  return nb_samples;
891 
892  /* simple 4-bit adpcm, with header */
893  header_size = 0;
894  switch (avctx->codec->id) {
900  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
901  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
902  }
903  if (header_size > 0)
904  return (buf_size - header_size) * 2 / ch;
905 
906  /* more complex formats */
907  switch (avctx->codec->id) {
909  bytestream2_skip(gb, 4);
910  has_coded_samples = 1;
911  *coded_samples = bytestream2_get_le32u(gb);
912  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
913  bytestream2_seek(gb, -8, SEEK_CUR);
914  break;
916  has_coded_samples = 1;
917  *coded_samples = bytestream2_get_le32(gb);
918  *coded_samples -= *coded_samples % 28;
919  nb_samples = (buf_size - 12) / 30 * 28;
920  break;
922  has_coded_samples = 1;
923  *coded_samples = bytestream2_get_le32(gb);
924  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
925  break;
927  nb_samples = (buf_size - ch) / ch * 2;
928  break;
932  /* maximum number of samples */
933  /* has internal offsets and a per-frame switch to signal raw 16-bit */
934  has_coded_samples = 1;
935  switch (avctx->codec->id) {
937  header_size = 4 + 9 * ch;
938  *coded_samples = bytestream2_get_le32(gb);
939  break;
941  header_size = 4 + 5 * ch;
942  *coded_samples = bytestream2_get_le32(gb);
943  break;
945  header_size = 4 + 5 * ch;
946  *coded_samples = bytestream2_get_be32(gb);
947  break;
948  }
949  *coded_samples -= *coded_samples % 28;
950  nb_samples = (buf_size - header_size) * 2 / ch;
951  nb_samples -= nb_samples % 28;
952  *approx_nb_samples = 1;
953  break;
955  if (avctx->block_align > 0)
956  buf_size = FFMIN(buf_size, avctx->block_align);
957  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
958  break;
960  if (avctx->block_align > 0)
961  buf_size = FFMIN(buf_size, avctx->block_align);
962  if (buf_size < 4 * ch)
963  return AVERROR_INVALIDDATA;
964  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
965  break;
967  if (avctx->block_align > 0)
968  buf_size = FFMIN(buf_size, avctx->block_align);
969  nb_samples = (buf_size - 4 * ch) * 2 / ch;
970  break;
971  CASE(ADPCM_IMA_WAV,
972  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
973  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
974  if (avctx->block_align > 0)
975  buf_size = FFMIN(buf_size, avctx->block_align);
976  if (buf_size < 4 * ch)
977  return AVERROR_INVALIDDATA;
978  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
979  ) /* End of CASE */
981  if (avctx->block_align > 0)
982  buf_size = FFMIN(buf_size, avctx->block_align);
983  nb_samples = (buf_size - 6 * ch) * 2 / ch;
984  break;
986  if (avctx->block_align > 0)
987  buf_size = FFMIN(buf_size, avctx->block_align);
988  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
989  break;
993  {
994  int samples_per_byte;
995  switch (avctx->codec->id) {
996  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
997  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
998  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
999  }
1000  if (!s->status[0].step_index) {
1001  if (buf_size < ch)
1002  return AVERROR_INVALIDDATA;
1003  nb_samples++;
1004  buf_size -= ch;
1005  }
1006  nb_samples += buf_size * samples_per_byte / ch;
1007  break;
1008  }
1009  case AV_CODEC_ID_ADPCM_SWF:
1010  {
1011  int buf_bits = buf_size * 8 - 2;
1012  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
1013  int block_hdr_size = 22 * ch;
1014  int block_size = block_hdr_size + nbits * ch * 4095;
1015  int nblocks = buf_bits / block_size;
1016  int bits_left = buf_bits - nblocks * block_size;
1017  nb_samples = nblocks * 4096;
1018  if (bits_left >= block_hdr_size)
1019  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
1020  break;
1021  }
1022  case AV_CODEC_ID_ADPCM_THP:
1024  if (avctx->extradata) {
1025  nb_samples = buf_size * 14 / (8 * ch);
1026  break;
1027  }
1028  has_coded_samples = 1;
1029  bytestream2_skip(gb, 4); // channel size
1030  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
1031  bytestream2_get_le32(gb) :
1032  bytestream2_get_be32(gb);
1033  buf_size -= 8 + 36 * ch;
1034  buf_size /= ch;
1035  nb_samples = buf_size / 8 * 14;
1036  if (buf_size % 8 > 1)
1037  nb_samples += (buf_size % 8 - 1) * 2;
1038  *approx_nb_samples = 1;
1039  break;
1040  case AV_CODEC_ID_ADPCM_AFC:
1041  nb_samples = buf_size / (9 * ch) * 16;
1042  break;
1043  case AV_CODEC_ID_ADPCM_XA:
1044  nb_samples = (buf_size / 128) * 224 / ch;
1045  break;
1046  case AV_CODEC_ID_ADPCM_DTK:
1047  case AV_CODEC_ID_ADPCM_PSX:
1048  nb_samples = buf_size / (16 * ch) * 28;
1049  break;
1051  nb_samples = buf_size / avctx->block_align * 32;
1052  break;
1054  nb_samples = buf_size / ch;
1055  break;
1056  }
1057 
1058  /* validate coded sample count */
1059  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
1060  return AVERROR_INVALIDDATA;
1061 
1062  return nb_samples;
1063 }
1064 
1066  int *got_frame_ptr, AVPacket *avpkt)
1067 {
1068  const uint8_t *buf = avpkt->data;
1069  int buf_size = avpkt->size;
1070  ADPCMDecodeContext *c = avctx->priv_data;
1071  int channels = avctx->ch_layout.nb_channels;
1072  int16_t *samples;
1073  int16_t **samples_p;
1074  int st; /* stereo */
1075  int nb_samples, coded_samples, approx_nb_samples, ret;
1076  GetByteContext gb;
1077 
1078  bytestream2_init(&gb, buf, buf_size);
1079  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
1080  if (nb_samples <= 0) {
1081  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
1082  return AVERROR_INVALIDDATA;
1083  }
1084 
1085  /* get output buffer */
1086  frame->nb_samples = nb_samples;
1087  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1088  return ret;
1089  samples = (int16_t *)frame->data[0];
1090  samples_p = (int16_t **)frame->extended_data;
1091 
1092  /* use coded_samples when applicable */
1093  /* it is always <= nb_samples, so the output buffer will be large enough */
1094  if (coded_samples) {
1095  if (!approx_nb_samples && coded_samples != nb_samples)
1096  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
1097  frame->nb_samples = nb_samples = coded_samples;
1098  }
1099 
1100  st = channels == 2 ? 1 : 0;
1101 
1102  switch(avctx->codec->id) {
1103  CASE(ADPCM_IMA_QT,
1104  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
1105  Channel data is interleaved per-chunk. */
1106  for (int channel = 0; channel < channels; channel++) {
1107  ADPCMChannelStatus *cs = &c->status[channel];
1108  int predictor;
1109  int step_index;
1110  /* (pppppp) (piiiiiii) */
1111 
1112  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
1113  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1114  step_index = predictor & 0x7F;
1115  predictor &= ~0x7F;
1116 
1117  if (cs->step_index == step_index) {
1118  int diff = predictor - cs->predictor;
1119  if (diff < 0)
1120  diff = - diff;
1121  if (diff > 0x7f)
1122  goto update;
1123  } else {
1124  update:
1125  cs->step_index = step_index;
1126  cs->predictor = predictor;
1127  }
1128 
1129  if (cs->step_index > 88u){
1130  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1131  channel, cs->step_index);
1132  return AVERROR_INVALIDDATA;
1133  }
1134 
1135  samples = samples_p[channel];
1136 
1137  for (int m = 0; m < 64; m += 2) {
1138  int byte = bytestream2_get_byteu(&gb);
1139  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1140  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1141  }
1142  }
1143  ) /* End of CASE */
1144  CASE(ADPCM_IMA_WAV,
1145  for (int i = 0; i < channels; i++) {
1146  ADPCMChannelStatus *cs = &c->status[i];
1147  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1148 
1149  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1150  if (cs->step_index > 88u){
1151  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1152  i, cs->step_index);
1153  return AVERROR_INVALIDDATA;
1154  }
1155  }
1156 
1157  if (avctx->bits_per_coded_sample != 4) {
1158  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1159  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1160  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1161  GetBitContext g;
1162 
1163  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1164  for (int i = 0; i < channels; i++) {
1165  ADPCMChannelStatus *cs = &c->status[i];
1166  samples = &samples_p[i][1 + n * samples_per_block];
1167  for (int j = 0; j < block_size; j++) {
1168  temp[j] = buf[4 * channels + block_size * n * channels +
1169  (j % 4) + (j / 4) * (channels * 4) + i * 4];
1170  }
1171  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1172  if (ret < 0)
1173  return ret;
1174  for (int m = 0; m < samples_per_block; m++) {
1176  avctx->bits_per_coded_sample);
1177  }
1178  }
1179  }
1180  bytestream2_skip(&gb, avctx->block_align - channels * 4);
1181  } else {
1182  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
1183  for (int i = 0; i < channels; i++) {
1184  ADPCMChannelStatus *cs = &c->status[i];
1185  samples = &samples_p[i][1 + n * 8];
1186  for (int m = 0; m < 8; m += 2) {
1187  int v = bytestream2_get_byteu(&gb);
1188  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1189  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1190  }
1191  }
1192  }
1193  }
1194  ) /* End of CASE */
1195  CASE(ADPCM_4XM,
1196  for (int i = 0; i < channels; i++)
1197  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1198 
1199  for (int i = 0; i < channels; i++) {
1200  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1201  if (c->status[i].step_index > 88u) {
1202  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1203  i, c->status[i].step_index);
1204  return AVERROR_INVALIDDATA;
1205  }
1206  }
1207 
1208  for (int i = 0; i < channels; i++) {
1209  ADPCMChannelStatus *cs = &c->status[i];
1210  samples = (int16_t *)frame->data[i];
1211  for (int n = nb_samples >> 1; n > 0; n--) {
1212  int v = bytestream2_get_byteu(&gb);
1213  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1214  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1215  }
1216  }
1217  ) /* End of CASE */
1218  CASE(ADPCM_AGM,
1219  for (int i = 0; i < channels; i++)
1220  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1221  for (int i = 0; i < channels; i++)
1222  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1223 
1224  for (int n = 0; n < nb_samples >> (1 - st); n++) {
1225  int v = bytestream2_get_byteu(&gb);
1226  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1227  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1228  }
1229  ) /* End of CASE */
1230  CASE(ADPCM_MS,
1231  int block_predictor;
1232 
1233  if (avctx->ch_layout.nb_channels > 2) {
1234  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
1235  samples = samples_p[channel];
1236  block_predictor = bytestream2_get_byteu(&gb);
1237  if (block_predictor > 6) {
1238  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1239  channel, block_predictor);
1240  return AVERROR_INVALIDDATA;
1241  }
1242  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1243  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1244  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1245  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1246  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1247  *samples++ = c->status[channel].sample2;
1248  *samples++ = c->status[channel].sample1;
1249  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
1250  int byte = bytestream2_get_byteu(&gb);
1251  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1252  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1253  }
1254  }
1255  } else {
1256  block_predictor = bytestream2_get_byteu(&gb);
1257  if (block_predictor > 6) {
1258  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1259  block_predictor);
1260  return AVERROR_INVALIDDATA;
1261  }
1262  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1263  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1264  if (st) {
1265  block_predictor = bytestream2_get_byteu(&gb);
1266  if (block_predictor > 6) {
1267  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1268  block_predictor);
1269  return AVERROR_INVALIDDATA;
1270  }
1271  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1272  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1273  }
1274  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1275  if (st){
1276  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1277  }
1278 
1279  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1280  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1281  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1282  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1283 
1284  *samples++ = c->status[0].sample2;
1285  if (st) *samples++ = c->status[1].sample2;
1286  *samples++ = c->status[0].sample1;
1287  if (st) *samples++ = c->status[1].sample1;
1288  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1289  int byte = bytestream2_get_byteu(&gb);
1290  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1291  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1292  }
1293  }
1294  ) /* End of CASE */
1295  CASE(ADPCM_MTAF,
1296  for (int channel = 0; channel < channels; channel += 2) {
1297  bytestream2_skipu(&gb, 4);
1298  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1299  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1300  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1301  bytestream2_skipu(&gb, 2);
1302  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1303  bytestream2_skipu(&gb, 2);
1304  for (int n = 0; n < nb_samples; n += 2) {
1305  int v = bytestream2_get_byteu(&gb);
1306  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1307  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1308  }
1309  for (int n = 0; n < nb_samples; n += 2) {
1310  int v = bytestream2_get_byteu(&gb);
1311  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1312  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1313  }
1314  }
1315  ) /* End of CASE */
1316  CASE(ADPCM_IMA_DK4,
1317  for (int channel = 0; channel < channels; channel++) {
1318  ADPCMChannelStatus *cs = &c->status[channel];
1319  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1320  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1321  if (cs->step_index > 88u){
1322  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1323  channel, cs->step_index);
1324  return AVERROR_INVALIDDATA;
1325  }
1326  }
1327  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1328  int v = bytestream2_get_byteu(&gb);
1329  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1330  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1331  }
1332  ) /* End of CASE */
1333 
1334  /* DK3 ADPCM support macro */
1335 #define DK3_GET_NEXT_NIBBLE() \
1336  if (decode_top_nibble_next) { \
1337  nibble = last_byte >> 4; \
1338  decode_top_nibble_next = 0; \
1339  } else { \
1340  last_byte = bytestream2_get_byteu(&gb); \
1341  nibble = last_byte & 0x0F; \
1342  decode_top_nibble_next = 1; \
1343  }
1344  CASE(ADPCM_IMA_DK3,
1345  int last_byte = 0;
1346  int nibble;
1347  int decode_top_nibble_next = 0;
1348  int diff_channel;
1349  const int16_t *samples_end = samples + channels * nb_samples;
1350 
1351  bytestream2_skipu(&gb, 10);
1352  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1353  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1354  c->status[0].step_index = bytestream2_get_byteu(&gb);
1355  c->status[1].step_index = bytestream2_get_byteu(&gb);
1356  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1357  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1358  c->status[0].step_index, c->status[1].step_index);
1359  return AVERROR_INVALIDDATA;
1360  }
1361  /* sign extend the predictors */
1362  diff_channel = c->status[1].predictor;
1363 
1364  while (samples < samples_end) {
1365 
1366  /* for this algorithm, c->status[0] is the sum channel and
1367  * c->status[1] is the diff channel */
1368 
1369  /* process the first predictor of the sum channel */
1371  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1372 
1373  /* process the diff channel predictor */
1375  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1376 
1377  /* process the first pair of stereo PCM samples */
1378  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1379  *samples++ = c->status[0].predictor + c->status[1].predictor;
1380  *samples++ = c->status[0].predictor - c->status[1].predictor;
1381 
1382  /* process the second predictor of the sum channel */
1384  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1385 
1386  /* process the second pair of stereo PCM samples */
1387  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1388  *samples++ = c->status[0].predictor + c->status[1].predictor;
1389  *samples++ = c->status[0].predictor - c->status[1].predictor;
1390  }
1391 
1392  if ((bytestream2_tell(&gb) & 1))
1393  bytestream2_skip(&gb, 1);
1394  ) /* End of CASE */
1395  CASE(ADPCM_IMA_ISS,
1396  for (int channel = 0; channel < channels; channel++) {
1397  ADPCMChannelStatus *cs = &c->status[channel];
1398  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1399  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1400  if (cs->step_index > 88u){
1401  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1402  channel, cs->step_index);
1403  return AVERROR_INVALIDDATA;
1404  }
1405  }
1406 
1407  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1408  int v1, v2;
1409  int v = bytestream2_get_byteu(&gb);
1410  /* nibbles are swapped for mono */
1411  if (st) {
1412  v1 = v >> 4;
1413  v2 = v & 0x0F;
1414  } else {
1415  v2 = v >> 4;
1416  v1 = v & 0x0F;
1417  }
1418  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1419  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1420  }
1421  ) /* End of CASE */
1422  CASE(ADPCM_IMA_MOFLEX,
1423  for (int channel = 0; channel < channels; channel++) {
1424  ADPCMChannelStatus *cs = &c->status[channel];
1425  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1426  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1427  if (cs->step_index > 88u){
1428  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1429  channel, cs->step_index);
1430  return AVERROR_INVALIDDATA;
1431  }
1432  }
1433 
1434  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1435  for (int channel = 0; channel < channels; channel++) {
1436  samples = samples_p[channel] + 256 * subframe;
1437  for (int n = 0; n < 256; n += 2) {
1438  int v = bytestream2_get_byteu(&gb);
1439  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1440  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1441  }
1442  }
1443  }
1444  ) /* End of CASE */
1445  CASE(ADPCM_IMA_DAT4,
1446  for (int channel = 0; channel < channels; channel++) {
1447  ADPCMChannelStatus *cs = &c->status[channel];
1448  samples = samples_p[channel];
1449  bytestream2_skip(&gb, 4);
1450  for (int n = 0; n < nb_samples; n += 2) {
1451  int v = bytestream2_get_byteu(&gb);
1452  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1453  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1454  }
1455  }
1456  ) /* End of CASE */
1457  CASE(ADPCM_IMA_APC,
1458  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1459  int v = bytestream2_get_byteu(&gb);
1460  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1461  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1462  }
1463  ) /* End of CASE */
1464  CASE(ADPCM_IMA_SSI,
1465  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1466  int v = bytestream2_get_byteu(&gb);
1467  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1468  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1469  }
1470  ) /* End of CASE */
1471  CASE(ADPCM_IMA_APM,
1472  for (int n = nb_samples / 2; n > 0; n--) {
1473  for (int channel = 0; channel < channels; channel++) {
1474  int v = bytestream2_get_byteu(&gb);
1475  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1476  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1477  }
1478  samples += channels;
1479  }
1480  ) /* End of CASE */
1481  CASE(ADPCM_IMA_ALP,
1482  for (int n = nb_samples / 2; n > 0; n--) {
1483  for (int channel = 0; channel < channels; channel++) {
1484  int v = bytestream2_get_byteu(&gb);
1485  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1486  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1487  }
1488  samples += channels;
1489  }
1490  ) /* End of CASE */
1491  CASE(ADPCM_IMA_CUNNING,
1492  for (int channel = 0; channel < channels; channel++) {
1493  int16_t *smp = samples_p[channel];
1494  for (int n = 0; n < nb_samples / 2; n++) {
1495  int v = bytestream2_get_byteu(&gb);
1496  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1497  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1498  }
1499  }
1500  ) /* End of CASE */
1501  CASE(ADPCM_IMA_OKI,
1502  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1503  int v = bytestream2_get_byteu(&gb);
1504  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1505  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1506  }
1507  ) /* End of CASE */
1508  CASE(ADPCM_IMA_RAD,
1509  for (int channel = 0; channel < channels; channel++) {
1510  ADPCMChannelStatus *cs = &c->status[channel];
1511  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1512  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1513  if (cs->step_index > 88u){
1514  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1515  channel, cs->step_index);
1516  return AVERROR_INVALIDDATA;
1517  }
1518  }
1519  for (int n = 0; n < nb_samples / 2; n++) {
1520  int byte[2];
1521 
1522  byte[0] = bytestream2_get_byteu(&gb);
1523  if (st)
1524  byte[1] = bytestream2_get_byteu(&gb);
1525  for (int channel = 0; channel < channels; channel++) {
1526  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1527  }
1528  for (int channel = 0; channel < channels; channel++) {
1529  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1530  }
1531  }
1532  ) /* End of CASE */
1533  CASE(ADPCM_IMA_WS,
1534  if (c->vqa_version == 3) {
1535  for (int channel = 0; channel < channels; channel++) {
1536  int16_t *smp = samples_p[channel];
1537 
1538  for (int n = nb_samples / 2; n > 0; n--) {
1539  int v = bytestream2_get_byteu(&gb);
1540  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1541  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1542  }
1543  }
1544  } else {
1545  for (int n = nb_samples / 2; n > 0; n--) {
1546  for (int channel = 0; channel < channels; channel++) {
1547  int v = bytestream2_get_byteu(&gb);
1548  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1549  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1550  }
1551  samples += channels;
1552  }
1553  }
1554  bytestream2_seek(&gb, 0, SEEK_END);
1555  ) /* End of CASE */
1556  CASE(ADPCM_XA,
1557  int16_t *out0 = samples_p[0];
1558  int16_t *out1 = samples_p[1];
1559  int samples_per_block = 28 * (3 - channels) * 4;
1560  int sample_offset = 0;
1561  int bytes_remaining;
1562  while (bytestream2_get_bytes_left(&gb) >= 128) {
1563  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1564  &c->status[0], &c->status[1],
1565  channels, sample_offset)) < 0)
1566  return ret;
1567  bytestream2_skipu(&gb, 128);
1568  sample_offset += samples_per_block;
1569  }
1570  /* Less than a full block of data left, e.g. when reading from
1571  * 2324 byte per sector XA; the remainder is padding */
1572  bytes_remaining = bytestream2_get_bytes_left(&gb);
1573  if (bytes_remaining > 0) {
1574  bytestream2_skip(&gb, bytes_remaining);
1575  }
1576  ) /* End of CASE */
1577  CASE(ADPCM_IMA_EA_EACS,
1578  for (int i = 0; i <= st; i++) {
1579  c->status[i].step_index = bytestream2_get_le32u(&gb);
1580  if (c->status[i].step_index > 88u) {
1581  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1582  i, c->status[i].step_index);
1583  return AVERROR_INVALIDDATA;
1584  }
1585  }
1586  for (int i = 0; i <= st; i++) {
1587  c->status[i].predictor = bytestream2_get_le32u(&gb);
1588  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1589  return AVERROR_INVALIDDATA;
1590  }
1591 
1592  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1593  int byte = bytestream2_get_byteu(&gb);
1594  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1595  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1596  }
1597  ) /* End of CASE */
1598  CASE(ADPCM_IMA_EA_SEAD,
1599  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1600  int byte = bytestream2_get_byteu(&gb);
1601  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1602  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1603  }
1604  ) /* End of CASE */
1605  CASE(ADPCM_EA,
1606  int previous_left_sample, previous_right_sample;
1607  int current_left_sample, current_right_sample;
1608  int next_left_sample, next_right_sample;
1609  int coeff1l, coeff2l, coeff1r, coeff2r;
1610  int shift_left, shift_right;
1611 
1612  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1613  each coding 28 stereo samples. */
1614 
1615  if (channels != 2)
1616  return AVERROR_INVALIDDATA;
1617 
1618  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1619  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1620  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1621  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1622 
1623  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
1624  int byte = bytestream2_get_byteu(&gb);
1625  coeff1l = ea_adpcm_table[ byte >> 4 ];
1626  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1627  coeff1r = ea_adpcm_table[ byte & 0x0F];
1628  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1629 
1630  byte = bytestream2_get_byteu(&gb);
1631  shift_left = 20 - (byte >> 4);
1632  shift_right = 20 - (byte & 0x0F);
1633 
1634  for (int count2 = 0; count2 < 28; count2++) {
1635  byte = bytestream2_get_byteu(&gb);
1636  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1637  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1638 
1639  next_left_sample = (next_left_sample +
1640  (current_left_sample * coeff1l) +
1641  (previous_left_sample * coeff2l) + 0x80) >> 8;
1642  next_right_sample = (next_right_sample +
1643  (current_right_sample * coeff1r) +
1644  (previous_right_sample * coeff2r) + 0x80) >> 8;
1645 
1646  previous_left_sample = current_left_sample;
1647  current_left_sample = av_clip_int16(next_left_sample);
1648  previous_right_sample = current_right_sample;
1649  current_right_sample = av_clip_int16(next_right_sample);
1650  *samples++ = current_left_sample;
1651  *samples++ = current_right_sample;
1652  }
1653  }
1654 
1655  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1656  ) /* End of CASE */
1657  CASE(ADPCM_EA_MAXIS_XA,
1658  int coeff[2][2], shift[2];
1659 
1660  for (int channel = 0; channel < channels; channel++) {
1661  int byte = bytestream2_get_byteu(&gb);
1662  for (int i = 0; i < 2; i++)
1663  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1664  shift[channel] = 20 - (byte & 0x0F);
1665  }
1666  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
1667  int byte[2];
1668 
1669  byte[0] = bytestream2_get_byteu(&gb);
1670  if (st) byte[1] = bytestream2_get_byteu(&gb);
1671  for (int i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1672  for (int channel = 0; channel < channels; channel++) {
1673  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1674  sample = (sample +
1675  c->status[channel].sample1 * coeff[channel][0] +
1676  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1677  c->status[channel].sample2 = c->status[channel].sample1;
1678  c->status[channel].sample1 = av_clip_int16(sample);
1679  *samples++ = c->status[channel].sample1;
1680  }
1681  }
1682  }
1683  bytestream2_seek(&gb, 0, SEEK_END);
1684  ) /* End of CASE */
1685 #if CONFIG_ADPCM_EA_R1_DECODER || CONFIG_ADPCM_EA_R2_DECODER || CONFIG_ADPCM_EA_R3_DECODER
1688  case AV_CODEC_ID_ADPCM_EA_R3: {
1689  /* channel numbering
1690  2chan: 0=fl, 1=fr
1691  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1692  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1693  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1694  int previous_sample, current_sample, next_sample;
1695  int coeff1, coeff2;
1696  int shift;
1697  uint16_t *samplesC;
1698  int count = 0;
1699  int offsets[6];
1700 
1701  for (unsigned channel = 0; channel < channels; channel++)
1702  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1703  bytestream2_get_le32(&gb)) +
1704  (channels + 1) * 4;
1705 
1706  for (unsigned channel = 0; channel < channels; channel++) {
1707  int count1;
1708 
1709  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1710  samplesC = samples_p[channel];
1711 
1712  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1713  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1714  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1715  } else {
1716  current_sample = c->status[channel].predictor;
1717  previous_sample = c->status[channel].prev_sample;
1718  }
1719 
1720  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1721  int byte = bytestream2_get_byte(&gb);
1722  if (byte == 0xEE) { /* only seen in R2 and R3 */
1723  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1724  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1725 
1726  for (int count2 = 0; count2 < 28; count2++)
1727  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1728  } else {
1729  coeff1 = ea_adpcm_table[ byte >> 4 ];
1730  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1731  shift = 20 - (byte & 0x0F);
1732 
1733  for (int count2 = 0; count2 < 28; count2++) {
1734  if (count2 & 1)
1735  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1736  else {
1737  byte = bytestream2_get_byte(&gb);
1738  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1739  }
1740 
1741  next_sample += (current_sample * coeff1) +
1742  (previous_sample * coeff2);
1743  next_sample = av_clip_int16(next_sample >> 8);
1744 
1745  previous_sample = current_sample;
1746  current_sample = next_sample;
1747  *samplesC++ = current_sample;
1748  }
1749  }
1750  }
1751  if (!count) {
1752  count = count1;
1753  } else if (count != count1) {
1754  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1755  count = FFMAX(count, count1);
1756  }
1757 
1758  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1759  c->status[channel].predictor = current_sample;
1760  c->status[channel].prev_sample = previous_sample;
1761  }
1762  }
1763 
1764  frame->nb_samples = count * 28;
1765  bytestream2_seek(&gb, 0, SEEK_END);
1766  break;
1767  }
1768 #endif /* CONFIG_ADPCM_EA_Rx_DECODER */
1769  CASE(ADPCM_EA_XAS,
1770  for (int channel=0; channel < channels; channel++) {
1771  int coeff[2][4], shift[4];
1772  int16_t *s = samples_p[channel];
1773  for (int n = 0; n < 4; n++, s += 32) {
1774  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1775  for (int i = 0; i < 2; i++)
1776  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1777  s[0] = val & ~0x0F;
1778 
1779  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1780  shift[n] = 20 - (val & 0x0F);
1781  s[1] = val & ~0x0F;
1782  }
1783 
1784  for (int m = 2; m < 32; m += 2) {
1785  s = &samples_p[channel][m];
1786  for (int n = 0; n < 4; n++, s += 32) {
1787  int level, pred;
1788  int byte = bytestream2_get_byteu(&gb);
1789 
1790  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1791  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1792  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1793 
1794  level = sign_extend(byte, 4) * (1 << shift[n]);
1795  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1796  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1797  }
1798  }
1799  }
1800  ) /* End of CASE */
1801  CASE(ADPCM_IMA_ACORN,
1802  for (int channel = 0; channel < channels; channel++) {
1803  ADPCMChannelStatus *cs = &c->status[channel];
1804  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1805  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
1806  if (cs->step_index > 88u){
1807  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1808  channel, cs->step_index);
1809  return AVERROR_INVALIDDATA;
1810  }
1811  }
1812  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1813  int byte = bytestream2_get_byteu(&gb);
1814  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
1815  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
1816  }
1817  ) /* End of CASE */
1818  CASE(ADPCM_IMA_AMV,
1819  av_assert0(channels == 1);
1820 
1821  /*
1822  * Header format:
1823  * int16_t predictor;
1824  * uint8_t step_index;
1825  * uint8_t reserved;
1826  * uint32_t frame_size;
1827  *
1828  * Some implementations have step_index as 16-bits, but others
1829  * only use the lower 8 and store garbage in the upper 8.
1830  */
1831  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1832  c->status[0].step_index = bytestream2_get_byteu(&gb);
1833  bytestream2_skipu(&gb, 5);
1834  if (c->status[0].step_index > 88u) {
1835  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1836  c->status[0].step_index);
1837  return AVERROR_INVALIDDATA;
1838  }
1839 
1840  for (int n = nb_samples >> 1; n > 0; n--) {
1841  int v = bytestream2_get_byteu(&gb);
1842 
1843  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1844  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1845  }
1846 
1847  if (nb_samples & 1) {
1848  int v = bytestream2_get_byteu(&gb);
1849  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1850 
1851  if (v & 0x0F) {
1852  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1853  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1854  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1855  }
1856  }
1857  ) /* End of CASE */
1858  CASE(ADPCM_IMA_SMJPEG,
1859  for (int i = 0; i < channels; i++) {
1860  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1861  c->status[i].step_index = bytestream2_get_byteu(&gb);
1862  bytestream2_skipu(&gb, 1);
1863  if (c->status[i].step_index > 88u) {
1864  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1865  c->status[i].step_index);
1866  return AVERROR_INVALIDDATA;
1867  }
1868  }
1869 
1870  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1871  int v = bytestream2_get_byteu(&gb);
1872 
1873  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1874  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1875  }
1876  ) /* End of CASE */
1877  CASE(ADPCM_CT,
1878  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1879  int v = bytestream2_get_byteu(&gb);
1880  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1881  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1882  }
1883  ) /* End of CASE */
1884 #if CONFIG_ADPCM_SBPRO_2_DECODER || CONFIG_ADPCM_SBPRO_3_DECODER || \
1885  CONFIG_ADPCM_SBPRO_4_DECODER
1889  if (!c->status[0].step_index) {
1890  /* the first byte is a raw sample */
1891  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1892  if (st)
1893  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1894  c->status[0].step_index = 1;
1895  nb_samples--;
1896  }
1897  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1898  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1899  int byte = bytestream2_get_byteu(&gb);
1900  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1901  byte >> 4, 4, 0);
1902  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1903  byte & 0x0F, 4, 0);
1904  }
1905  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1906  for (int n = (nb_samples<<st) / 3; n > 0; n--) {
1907  int byte = bytestream2_get_byteu(&gb);
1908  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1909  byte >> 5 , 3, 0);
1910  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1911  (byte >> 2) & 0x07, 3, 0);
1912  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1913  byte & 0x03, 2, 0);
1914  }
1915  } else {
1916  for (int n = nb_samples >> (2 - st); n > 0; n--) {
1917  int byte = bytestream2_get_byteu(&gb);
1918  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1919  byte >> 6 , 2, 2);
1920  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1921  (byte >> 4) & 0x03, 2, 2);
1922  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1923  (byte >> 2) & 0x03, 2, 2);
1924  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1925  byte & 0x03, 2, 2);
1926  }
1927  }
1928  break;
1929 #endif /* CONFIG_ADPCM_SBPRO_x_DECODER */
1930  CASE(ADPCM_SWF,
1931  adpcm_swf_decode(avctx, buf, buf_size, samples);
1932  bytestream2_seek(&gb, 0, SEEK_END);
1933  ) /* End of CASE */
1934  CASE(ADPCM_YAMAHA,
1935  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1936  int v = bytestream2_get_byteu(&gb);
1937  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1938  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1939  }
1940  ) /* End of CASE */
1941  CASE(ADPCM_AICA,
1942  for (int channel = 0; channel < channels; channel++) {
1943  samples = samples_p[channel];
1944  for (int n = nb_samples >> 1; n > 0; n--) {
1945  int v = bytestream2_get_byteu(&gb);
1946  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1947  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1948  }
1949  }
1950  ) /* End of CASE */
1951  CASE(ADPCM_AFC,
1952  int samples_per_block;
1953  int blocks;
1954 
1955  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1956  samples_per_block = avctx->extradata[0] / 16;
1957  blocks = nb_samples / avctx->extradata[0];
1958  } else {
1959  samples_per_block = nb_samples / 16;
1960  blocks = 1;
1961  }
1962 
1963  for (int m = 0; m < blocks; m++) {
1964  for (int channel = 0; channel < channels; channel++) {
1965  int prev1 = c->status[channel].sample1;
1966  int prev2 = c->status[channel].sample2;
1967 
1968  samples = samples_p[channel] + m * 16;
1969  /* Read in every sample for this channel. */
1970  for (int i = 0; i < samples_per_block; i++) {
1971  int byte = bytestream2_get_byteu(&gb);
1972  int scale = 1 << (byte >> 4);
1973  int index = byte & 0xf;
1974  int factor1 = afc_coeffs[0][index];
1975  int factor2 = afc_coeffs[1][index];
1976 
1977  /* Decode 16 samples. */
1978  for (int n = 0; n < 16; n++) {
1979  int32_t sampledat;
1980 
1981  if (n & 1) {
1982  sampledat = sign_extend(byte, 4);
1983  } else {
1984  byte = bytestream2_get_byteu(&gb);
1985  sampledat = sign_extend(byte >> 4, 4);
1986  }
1987 
1988  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1989  sampledat * scale;
1990  *samples = av_clip_int16(sampledat);
1991  prev2 = prev1;
1992  prev1 = *samples++;
1993  }
1994  }
1995 
1996  c->status[channel].sample1 = prev1;
1997  c->status[channel].sample2 = prev2;
1998  }
1999  }
2000  bytestream2_seek(&gb, 0, SEEK_END);
2001  ) /* End of CASE */
2002 #if CONFIG_ADPCM_THP_DECODER || CONFIG_ADPCM_THP_LE_DECODER
2003  case AV_CODEC_ID_ADPCM_THP:
2005  {
2006  int table[14][16];
2007 
2008 #define THP_GET16(g) \
2009  sign_extend( \
2010  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
2011  bytestream2_get_le16u(&(g)) : \
2012  bytestream2_get_be16u(&(g)), 16)
2013 
2014  if (avctx->extradata) {
2016  if (avctx->extradata_size < 32 * channels) {
2017  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2018  return AVERROR_INVALIDDATA;
2019  }
2020 
2021  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
2022  for (int i = 0; i < channels; i++)
2023  for (int n = 0; n < 16; n++)
2024  table[i][n] = THP_GET16(tb);
2025  } else {
2026  for (int i = 0; i < channels; i++)
2027  for (int n = 0; n < 16; n++)
2028  table[i][n] = THP_GET16(gb);
2029 
2030  if (!c->has_status) {
2031  /* Initialize the previous sample. */
2032  for (int i = 0; i < channels; i++) {
2033  c->status[i].sample1 = THP_GET16(gb);
2034  c->status[i].sample2 = THP_GET16(gb);
2035  }
2036  c->has_status = 1;
2037  } else {
2038  bytestream2_skip(&gb, channels * 4);
2039  }
2040  }
2041 
2042  for (int ch = 0; ch < channels; ch++) {
2043  samples = samples_p[ch];
2044 
2045  /* Read in every sample for this channel. */
2046  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2047  int byte = bytestream2_get_byteu(&gb);
2048  int index = (byte >> 4) & 7;
2049  unsigned int exp = byte & 0x0F;
2050  int64_t factor1 = table[ch][index * 2];
2051  int64_t factor2 = table[ch][index * 2 + 1];
2052 
2053  /* Decode 14 samples. */
2054  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2055  int32_t sampledat;
2056 
2057  if (n & 1) {
2058  sampledat = sign_extend(byte, 4);
2059  } else {
2060  byte = bytestream2_get_byteu(&gb);
2061  sampledat = sign_extend(byte >> 4, 4);
2062  }
2063 
2064  sampledat = ((c->status[ch].sample1 * factor1
2065  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
2066  *samples = av_clip_int16(sampledat);
2067  c->status[ch].sample2 = c->status[ch].sample1;
2068  c->status[ch].sample1 = *samples++;
2069  }
2070  }
2071  }
2072  break;
2073  }
2074 #endif /* CONFIG_ADPCM_THP(_LE)_DECODER */
2075  CASE(ADPCM_DTK,
2076  for (int channel = 0; channel < channels; channel++) {
2077  samples = samples_p[channel];
2078 
2079  /* Read in every sample for this channel. */
2080  for (int i = 0; i < nb_samples / 28; i++) {
2081  int byte, header;
2082  if (channel)
2083  bytestream2_skipu(&gb, 1);
2084  header = bytestream2_get_byteu(&gb);
2085  bytestream2_skipu(&gb, 3 - channel);
2086 
2087  /* Decode 28 samples. */
2088  for (int n = 0; n < 28; n++) {
2089  int32_t sampledat, prev;
2090 
2091  switch (header >> 4) {
2092  case 1:
2093  prev = (c->status[channel].sample1 * 0x3c);
2094  break;
2095  case 2:
2096  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
2097  break;
2098  case 3:
2099  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
2100  break;
2101  default:
2102  prev = 0;
2103  }
2104 
2105  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2106 
2107  byte = bytestream2_get_byteu(&gb);
2108  if (!channel)
2109  sampledat = sign_extend(byte, 4);
2110  else
2111  sampledat = sign_extend(byte >> 4, 4);
2112 
2113  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2114  *samples++ = av_clip_int16(sampledat >> 6);
2115  c->status[channel].sample2 = c->status[channel].sample1;
2116  c->status[channel].sample1 = sampledat;
2117  }
2118  }
2119  if (!channel)
2120  bytestream2_seek(&gb, 0, SEEK_SET);
2121  }
2122  ) /* End of CASE */
2123  CASE(ADPCM_PSX,
2124  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * channels); block++) {
2125  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * channels) / (16 * channels);
2126  for (int channel = 0; channel < channels; channel++) {
2127  samples = samples_p[channel] + block * nb_samples_per_block;
2128  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2129 
2130  /* Read in every sample for this channel. */
2131  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2132  int filter, shift, flag, byte;
2133 
2134  filter = bytestream2_get_byteu(&gb);
2135  shift = filter & 0xf;
2136  filter = filter >> 4;
2138  return AVERROR_INVALIDDATA;
2139  flag = bytestream2_get_byteu(&gb) & 0x7;
2140 
2141  /* Decode 28 samples. */
2142  for (int n = 0; n < 28; n++) {
2143  int sample = 0, scale;
2144 
2145  if (n & 1) {
2146  scale = sign_extend(byte >> 4, 4);
2147  } else {
2148  byte = bytestream2_get_byteu(&gb);
2149  scale = sign_extend(byte, 4);
2150  }
2151 
2152  if (flag < 0x07) {
2153  scale = scale * (1 << 12);
2154  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2155  }
2157  c->status[channel].sample2 = c->status[channel].sample1;
2158  c->status[channel].sample1 = sample;
2159  }
2160  }
2161  }
2162  }
2163  ) /* End of CASE */
2164  CASE(ADPCM_ARGO,
2165  /*
2166  * The format of each block:
2167  * uint8_t left_control;
2168  * uint4_t left_samples[nb_samples];
2169  * ---- and if stereo ----
2170  * uint8_t right_control;
2171  * uint4_t right_samples[nb_samples];
2172  *
2173  * Format of the control byte:
2174  * MSB [SSSSRDRR] LSB
2175  * S = (Shift Amount - 2)
2176  * D = Decoder flag.
2177  * R = Reserved
2178  *
2179  * Each block relies on the previous two samples of each channel.
2180  * They should be 0 initially.
2181  */
2182  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2183  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
2184  ADPCMChannelStatus *cs = c->status + channel;
2185  int control, shift;
2186 
2187  samples = samples_p[channel] + block * 32;
2188 
2189  /* Get the control byte and decode the samples, 2 at a time. */
2190  control = bytestream2_get_byteu(&gb);
2191  shift = (control >> 4) + 2;
2192 
2193  for (int n = 0; n < 16; n++) {
2194  int sample = bytestream2_get_byteu(&gb);
2195  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2196  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2197  }
2198  }
2199  }
2200  ) /* End of CASE */
2201  CASE(ADPCM_ZORK,
2202  for (int n = 0; n < nb_samples * channels; n++) {
2203  int v = bytestream2_get_byteu(&gb);
2204  *samples++ = adpcm_zork_expand_nibble(&c->status[n % channels], v);
2205  }
2206  ) /* End of CASE */
2207  CASE(ADPCM_IMA_MTF,
2208  for (int n = nb_samples / 2; n > 0; n--) {
2209  for (int channel = 0; channel < channels; channel++) {
2210  int v = bytestream2_get_byteu(&gb);
2211  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2212  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2213  }
2214  samples += channels;
2215  }
2216  ) /* End of CASE */
2217  default:
2218  av_assert0(0); // unsupported codec_id should not happen
2219  }
2220 
2221  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2222  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2223  return AVERROR_INVALIDDATA;
2224  }
2225 
2226  *got_frame_ptr = 1;
2227 
2228  if (avpkt->size < bytestream2_tell(&gb)) {
2229  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2230  return avpkt->size;
2231  }
2232 
2233  return bytestream2_tell(&gb);
2234 }
2235 
2236 static void adpcm_flush(AVCodecContext *avctx)
2237 {
2238  ADPCMDecodeContext *c = avctx->priv_data;
2239 
2240  /* Just nuke the entire state and re-init. */
2241  memset(c, 0, sizeof(ADPCMDecodeContext));
2242 
2243  switch(avctx->codec_id) {
2244  case AV_CODEC_ID_ADPCM_CT:
2245  c->status[0].step = c->status[1].step = 511;
2246  break;
2247 
2249  if (avctx->extradata && avctx->extradata_size >= 8) {
2250  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2251  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2252  }
2253  break;
2254 
2256  if (avctx->extradata && avctx->extradata_size >= 28) {
2257  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2258  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2259  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2260  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2261  }
2262  break;
2263 
2265  if (avctx->extradata && avctx->extradata_size >= 2)
2266  c->vqa_version = AV_RL16(avctx->extradata);
2267  break;
2268  default:
2269  /* Other codecs may want to handle this during decoding. */
2270  c->has_status = 0;
2271  return;
2272  }
2273 
2274  c->has_status = 1;
2275 }
2276 
2277 
2285 
2286 #define ADPCM_DECODER_0(id_, sample_fmts_, name_, long_name_)
2287 #define ADPCM_DECODER_1(id_, sample_fmts_, name_, long_name_) \
2288 const FFCodec ff_ ## name_ ## _decoder = { \
2289  .p.name = #name_, \
2290  .p.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2291  .p.type = AVMEDIA_TYPE_AUDIO, \
2292  .p.id = id_, \
2293  .p.capabilities = AV_CODEC_CAP_DR1, \
2294  .p.sample_fmts = sample_fmts_, \
2295  .priv_data_size = sizeof(ADPCMDecodeContext), \
2296  .init = adpcm_decode_init, \
2297  FF_CODEC_DECODE_CB(adpcm_decode_frame), \
2298  .flush = adpcm_flush, \
2299  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
2300 };
2301 #define ADPCM_DECODER_2(enabled, codec_id, name, sample_fmts, long_name) \
2302  ADPCM_DECODER_ ## enabled(codec_id, name, sample_fmts, long_name)
2303 #define ADPCM_DECODER_3(config, codec_id, name, sample_fmts, long_name) \
2304  ADPCM_DECODER_2(config, codec_id, name, sample_fmts, long_name)
2305 #define ADPCM_DECODER(codec, name, sample_fmts, long_name) \
2306  ADPCM_DECODER_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, \
2307  name, sample_fmts, long_name)
2308 
2309 /* Note: Do not forget to add new entries to the Makefile as well. */
2310 ADPCM_DECODER(ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie")
2311 ADPCM_DECODER(ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC")
2312 ADPCM_DECODER(ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie")
2313 ADPCM_DECODER(ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA")
2314 ADPCM_DECODER(ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games")
2315 ADPCM_DECODER(ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology")
2316 ADPCM_DECODER(ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK")
2317 ADPCM_DECODER(ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts")
2318 ADPCM_DECODER(ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA")
2319 ADPCM_DECODER(ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1")
2320 ADPCM_DECODER(ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2")
2321 ADPCM_DECODER(ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3")
2322 ADPCM_DECODER(ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS")
2323 ADPCM_DECODER(ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay")
2324 ADPCM_DECODER(ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV")
2325 ADPCM_DECODER(ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC")
2326 ADPCM_DECODER(ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM")
2327 ADPCM_DECODER(ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments")
2328 ADPCM_DECODER(ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4")
2329 ADPCM_DECODER(ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3")
2330 ADPCM_DECODER(ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4")
2331 ADPCM_DECODER(ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS")
2332 ADPCM_DECODER(ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD")
2333 ADPCM_DECODER(ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS")
2334 ADPCM_DECODER(ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX")
2335 ADPCM_DECODER(ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework")
2336 ADPCM_DECODER(ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI")
2337 ADPCM_DECODER(ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime")
2338 ADPCM_DECODER(ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical")
2339 ADPCM_DECODER(ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive")
2340 ADPCM_DECODER(ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG")
2341 ADPCM_DECODER(ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP")
2342 ADPCM_DECODER(ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV")
2343 ADPCM_DECODER(ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood")
2344 ADPCM_DECODER(ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft")
2345 ADPCM_DECODER(ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF")
2346 ADPCM_DECODER(ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation")
2347 ADPCM_DECODER(ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit")
2348 ADPCM_DECODER(ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit")
2349 ADPCM_DECODER(ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit")
2350 ADPCM_DECODER(ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash")
2351 ADPCM_DECODER(ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)")
2352 ADPCM_DECODER(ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP")
2353 ADPCM_DECODER(ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA")
2354 ADPCM_DECODER(ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha")
2355 ADPCM_DECODER(ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork")
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:363
adpcm_index_table5
static const int8_t adpcm_index_table5[32]
Definition: adpcm.c:137
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:357
level
uint8_t level
Definition: svq3.c:206
av_clip
#define av_clip
Definition: common.h:95
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:390
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
R3
#define R3
Definition: simple_idct.c:173
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:232
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:119
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
internal.h
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:405
AVPacket::data
uint8_t * data
Definition: packet.h:374
table
static const uint16_t table[]
Definition: prosumer.c:206
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:378
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:395
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:389
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:507
R1
#define R1
Definition: simple_idct.c:171
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:300
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:393
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:594
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:369
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:398
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2056
GetBitContext
Definition: get_bits.h:61
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:448
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:399
val
static double val(void *priv, double ch)
Definition: aeval.c:77
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2236
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:374
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2280
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:425
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:614
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:491
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:407
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:637
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:388
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:380
g
const char * g
Definition: vf_curves.c:117
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:359
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:386
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:384
channels
channels
Definition: aptx.h:32
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:362
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:533
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
if
if(ret)
Definition: filter_design.txt:179
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2278
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:382
av_clip_int16
#define av_clip_int16
Definition: common.h:110
NULL
#define NULL
Definition: coverity.c:32
ADPCM_DECODER
#define ADPCM_DECODER(codec, name, sample_fmts, long_name)
Definition: adpcm.c:2305
av_clip_intp2
#define av_clip_intp2
Definition: common.h:116
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:371
oki_step_table
static const int16_t oki_step_table[49]
Definition: adpcm.c:216
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:361
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:381
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:399
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:360
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:376
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:94
ima_cunning_index_table
static const int8_t ima_cunning_index_table[9]
Definition: adpcm.c:108
exp
int8_t exp
Definition: eval.c:72
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:365
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:573
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:552
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:1065
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:401
afc_coeffs
static const int16_t afc_coeffs[2][16]
Definition: adpcm.c:89
ADPCMDecodeContext
Definition: adpcm.c:243
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1403
AVPacket::size
int size
Definition: packet.h:375
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
codec_internal.h
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:391
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:464
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:403
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1647
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1014
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:846
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
R2
#define R2
Definition: simple_idct.c:172
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:370
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:67
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:171
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:672
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
adpcm_index_table3
static const int8_t adpcm_index_table3[8]
Definition: adpcm.c:132
AVCodec::id
enum AVCodecID id
Definition: codec.h:210
flag
#define flag(name)
Definition: cbs_av1.c:553
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1441
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2282
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:397
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:383
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:490
adpcm_index_tables
static const int8_t *const adpcm_index_tables[4]
Definition: adpcm.c:142
MT
#define MT(...)
Definition: codec_desc.c:32
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:664
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:402
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
tb
#define tb
Definition: regdef.h:68
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:245
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:396
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:818
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:81
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:367
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:404
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1043
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:484
AVCodecContext
main external API structure.
Definition: avcodec.h:389
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:398
mtaf_stepsize
static const int16_t mtaf_stepsize[32][16]
Definition: adpcm.c:149
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:132
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:377
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:379
temp
else temp
Definition: vf_mcdeint.c:248
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
shift
static int shift(int a, int b)
Definition: sonic.c:88
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:375
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:35
adpcm_index_table2
static const int8_t adpcm_index_table2[4]
Definition: adpcm.c:127
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:372
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:760
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:400
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:251
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:246
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:406
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:358
d
d
Definition: ffmpeg_filter.c:153
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:364
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:355
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:394
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:628
CASE
#define CASE(codec,...)
Definition: adpcm.c:77
ima_cunning_step_table
static const int16_t ima_cunning_step_table[61]
Definition: adpcm.c:118
int
int
Definition: ffmpeg_filter.c:153
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:236
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:373
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:244
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:225