FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "bytestream.h"
40 #include "adpcm.h"
41 #include "adpcm_data.h"
42 #include "internal.h"
43 
44 /**
45  * @file
46  * ADPCM decoders
47  * Features and limitations:
48  *
49  * Reference documents:
50  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
51  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
52  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
53  * http://openquicktime.sourceforge.net/
54  * XAnim sources (xa_codec.c) http://xanim.polter.net/
55  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
56  * SoX source code http://sox.sourceforge.net/
57  *
58  * CD-ROM XA:
59  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
60  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
61  * readstr http://www.geocities.co.jp/Playtown/2004/
62  */
63 
64 /* These are for CD-ROM XA ADPCM */
65 static const int8_t xa_adpcm_table[5][2] = {
66  { 0, 0 },
67  { 60, 0 },
68  { 115, -52 },
69  { 98, -55 },
70  { 122, -60 }
71 };
72 
73 static const int16_t ea_adpcm_table[] = {
74  0, 240, 460, 392,
75  0, 0, -208, -220,
76  0, 1, 3, 4,
77  7, 8, 10, 11,
78  0, -1, -3, -4
79 };
80 
81 // padded to zero where table size is less then 16
82 static const int8_t swf_index_tables[4][16] = {
83  /*2*/ { -1, 2 },
84  /*3*/ { -1, -1, 2, 4 },
85  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
86  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
87 };
88 
89 static const int8_t zork_index_table[8] = {
90  -1, -1, -1, 1, 4, 7, 10, 12,
91 };
92 
93 static const int8_t mtf_index_table[16] = {
94  8, 6, 4, 2, -1, -1, -1, -1,
95  -1, -1, -1, -1, 2, 4, 6, 8,
96 };
97 
98 /* end of tables */
99 
100 typedef struct ADPCMDecodeContext {
102  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
103  int has_status; /**< Status flag. Reset to 0 after a flush. */
105 
106 static void adpcm_flush(AVCodecContext *avctx);
107 
109 {
110  ADPCMDecodeContext *c = avctx->priv_data;
111  unsigned int min_channels = 1;
112  unsigned int max_channels = 2;
113 
114  adpcm_flush(avctx);
115 
116  switch(avctx->codec->id) {
118  max_channels = 1;
119  break;
122  min_channels = 2;
123  break;
130  max_channels = 6;
131  break;
133  min_channels = 2;
134  max_channels = 8;
135  if (avctx->channels & 1) {
136  avpriv_request_sample(avctx, "channel count %d", avctx->channels);
137  return AVERROR_PATCHWELCOME;
138  }
139  break;
141  max_channels = 8;
142  if (avctx->channels <= 0 || avctx->block_align % (16 * avctx->channels))
143  return AVERROR_INVALIDDATA;
144  break;
148  max_channels = 14;
149  break;
150  }
151  if (avctx->channels < min_channels || avctx->channels > max_channels) {
152  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
153  return AVERROR(EINVAL);
154  }
155 
156  switch(avctx->codec->id) {
158  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
159  return AVERROR_INVALIDDATA;
160  break;
162  if (avctx->bits_per_coded_sample != 4 || avctx->block_align != 17 * avctx->channels)
163  return AVERROR_INVALIDDATA;
164  break;
166  if (avctx->bits_per_coded_sample != 8)
167  return AVERROR_INVALIDDATA;
168  break;
169  default:
170  break;
171  }
172 
173  switch (avctx->codec->id) {
194  break;
196  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
198  break;
200  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
202  break;
203  default:
204  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
205  }
206  return 0;
207 }
208 
209 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
210 {
211  int delta, pred, step, add;
212 
213  pred = c->predictor;
214  delta = nibble & 7;
215  step = c->step;
216  add = (delta * 2 + 1) * step;
217  if (add < 0)
218  add = add + 7;
219 
220  if ((nibble & 8) == 0)
221  pred = av_clip(pred + (add >> 3), -32767, 32767);
222  else
223  pred = av_clip(pred - (add >> 3), -32767, 32767);
224 
225  switch (delta) {
226  case 7:
227  step *= 0x99;
228  break;
229  case 6:
230  c->step = av_clip(c->step * 2, 127, 24576);
231  c->predictor = pred;
232  return pred;
233  case 5:
234  step *= 0x66;
235  break;
236  case 4:
237  step *= 0x4d;
238  break;
239  default:
240  step *= 0x39;
241  break;
242  }
243 
244  if (step < 0)
245  step += 0x3f;
246 
247  c->step = step >> 6;
248  c->step = av_clip(c->step, 127, 24576);
249  c->predictor = pred;
250  return pred;
251 }
252 
253 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
254 {
255  int step_index;
256  int predictor;
257  int sign, delta, diff, step;
258 
259  step = ff_adpcm_step_table[c->step_index];
260  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
261  step_index = av_clip(step_index, 0, 88);
262 
263  sign = nibble & 8;
264  delta = nibble & 7;
265  /* perform direct multiplication instead of series of jumps proposed by
266  * the reference ADPCM implementation since modern CPUs can do the mults
267  * quickly enough */
268  diff = ((2 * delta + 1) * step) >> shift;
269  predictor = c->predictor;
270  if (sign) predictor -= diff;
271  else predictor += diff;
272 
273  c->predictor = av_clip_int16(predictor);
274  c->step_index = step_index;
275 
276  return (int16_t)c->predictor;
277 }
278 
279 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
280 {
281  int step_index;
282  int predictor;
283  int sign, delta, diff, step;
284 
285  step = ff_adpcm_step_table[c->step_index];
286  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
287  step_index = av_clip(step_index, 0, 88);
288 
289  sign = nibble & 8;
290  delta = nibble & 7;
291  diff = (delta * step) >> shift;
292  predictor = c->predictor;
293  if (sign) predictor -= diff;
294  else predictor += diff;
295 
296  c->predictor = av_clip_int16(predictor);
297  c->step_index = step_index;
298 
299  return (int16_t)c->predictor;
300 }
301 
302 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
303 {
304  int step_index, step, delta, predictor;
305 
306  step = ff_adpcm_step_table[c->step_index];
307 
308  delta = step * (2 * nibble - 15);
309  predictor = c->predictor + delta;
310 
311  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
312  c->predictor = av_clip_int16(predictor >> 4);
313  c->step_index = av_clip(step_index, 0, 88);
314 
315  return (int16_t)c->predictor;
316 }
317 
318 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
319 {
320  int step_index;
321  int predictor;
322  int step;
323 
324  nibble = sign_extend(nibble & 0xF, 4);
325 
326  step = ff_adpcm_ima_cunning_step_table[c->step_index];
327  step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
328  step_index = av_clip(step_index, 0, 60);
329 
330  predictor = c->predictor + step * nibble;
331 
332  c->predictor = av_clip_int16(predictor);
333  c->step_index = step_index;
334 
335  return c->predictor;
336 }
337 
339 {
340  int nibble, step_index, predictor, sign, delta, diff, step, shift;
341 
342  shift = bps - 1;
343  nibble = get_bits_le(gb, bps),
344  step = ff_adpcm_step_table[c->step_index];
345  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
346  step_index = av_clip(step_index, 0, 88);
347 
348  sign = nibble & (1 << shift);
349  delta = av_mod_uintp2(nibble, shift);
350  diff = ((2 * delta + 1) * step) >> shift;
351  predictor = c->predictor;
352  if (sign) predictor -= diff;
353  else predictor += diff;
354 
355  c->predictor = av_clip_int16(predictor);
356  c->step_index = step_index;
357 
358  return (int16_t)c->predictor;
359 }
360 
361 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
362 {
363  int step_index;
364  int predictor;
365  int diff, step;
366 
367  step = ff_adpcm_step_table[c->step_index];
368  step_index = c->step_index + ff_adpcm_index_table[nibble];
369  step_index = av_clip(step_index, 0, 88);
370 
371  diff = step >> 3;
372  if (nibble & 4) diff += step;
373  if (nibble & 2) diff += step >> 1;
374  if (nibble & 1) diff += step >> 2;
375 
376  if (nibble & 8)
377  predictor = c->predictor - diff;
378  else
379  predictor = c->predictor + diff;
380 
381  c->predictor = av_clip_int16(predictor);
382  c->step_index = step_index;
383 
384  return c->predictor;
385 }
386 
387 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
388 {
389  int predictor;
390 
391  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
392  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
393 
394  c->sample2 = c->sample1;
395  c->sample1 = av_clip_int16(predictor);
396  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
397  if (c->idelta < 16) c->idelta = 16;
398  if (c->idelta > INT_MAX/768) {
399  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
400  c->idelta = INT_MAX/768;
401  }
402 
403  return c->sample1;
404 }
405 
406 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
407 {
408  int step_index, predictor, sign, delta, diff, step;
409 
410  step = ff_adpcm_oki_step_table[c->step_index];
411  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
412  step_index = av_clip(step_index, 0, 48);
413 
414  sign = nibble & 8;
415  delta = nibble & 7;
416  diff = ((2 * delta + 1) * step) >> 3;
417  predictor = c->predictor;
418  if (sign) predictor -= diff;
419  else predictor += diff;
420 
421  c->predictor = av_clip_intp2(predictor, 11);
422  c->step_index = step_index;
423 
424  return c->predictor * 16;
425 }
426 
427 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
428 {
429  int sign, delta, diff;
430  int new_step;
431 
432  sign = nibble & 8;
433  delta = nibble & 7;
434  /* perform direct multiplication instead of series of jumps proposed by
435  * the reference ADPCM implementation since modern CPUs can do the mults
436  * quickly enough */
437  diff = ((2 * delta + 1) * c->step) >> 3;
438  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
439  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
440  c->predictor = av_clip_int16(c->predictor);
441  /* calculate new step and clamp it to range 511..32767 */
442  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
443  c->step = av_clip(new_step, 511, 32767);
444 
445  return (int16_t)c->predictor;
446 }
447 
448 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
449 {
450  int sign, delta, diff;
451 
452  sign = nibble & (1<<(size-1));
453  delta = nibble & ((1<<(size-1))-1);
454  diff = delta << (7 + c->step + shift);
455 
456  /* clamp result */
457  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
458 
459  /* calculate new step */
460  if (delta >= (2*size - 3) && c->step < 3)
461  c->step++;
462  else if (delta == 0 && c->step > 0)
463  c->step--;
464 
465  return (int16_t) c->predictor;
466 }
467 
468 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
469 {
470  if(!c->step) {
471  c->predictor = 0;
472  c->step = 127;
473  }
474 
475  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
476  c->predictor = av_clip_int16(c->predictor);
477  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
478  c->step = av_clip(c->step, 127, 24576);
479  return c->predictor;
480 }
481 
482 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
483 {
484  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
485  c->predictor = av_clip_int16(c->predictor);
486  c->step += ff_adpcm_index_table[nibble];
487  c->step = av_clip_uintp2(c->step, 5);
488  return c->predictor;
489 }
490 
491 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
492 {
493  int16_t index = c->step_index;
494  uint32_t lookup_sample = ff_adpcm_step_table[index];
495  int32_t sample = 0;
496 
497  if (nibble & 0x40)
498  sample += lookup_sample;
499  if (nibble & 0x20)
500  sample += lookup_sample >> 1;
501  if (nibble & 0x10)
502  sample += lookup_sample >> 2;
503  if (nibble & 0x08)
504  sample += lookup_sample >> 3;
505  if (nibble & 0x04)
506  sample += lookup_sample >> 4;
507  if (nibble & 0x02)
508  sample += lookup_sample >> 5;
509  if (nibble & 0x01)
510  sample += lookup_sample >> 6;
511  if (nibble & 0x80)
512  sample = -sample;
513 
514  sample += c->predictor;
516 
517  index += zork_index_table[(nibble >> 4) & 7];
518  index = av_clip(index, 0, 88);
519 
520  c->predictor = sample;
521  c->step_index = index;
522 
523  return sample;
524 }
525 
526 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
527  const uint8_t *in, ADPCMChannelStatus *left,
528  ADPCMChannelStatus *right, int channels, int sample_offset)
529 {
530  int i, j;
531  int shift,filter,f0,f1;
532  int s_1,s_2;
533  int d,s,t;
534 
535  out0 += sample_offset;
536  if (channels == 1)
537  out1 = out0 + 28;
538  else
539  out1 += sample_offset;
540 
541  for(i=0;i<4;i++) {
542  shift = 12 - (in[4+i*2] & 15);
543  filter = in[4+i*2] >> 4;
545  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
546  filter=0;
547  }
548  if (shift < 0) {
549  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
550  shift = 0;
551  }
552  f0 = xa_adpcm_table[filter][0];
553  f1 = xa_adpcm_table[filter][1];
554 
555  s_1 = left->sample1;
556  s_2 = left->sample2;
557 
558  for(j=0;j<28;j++) {
559  d = in[16+i+j*4];
560 
561  t = sign_extend(d, 4);
562  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
563  s_2 = s_1;
564  s_1 = av_clip_int16(s);
565  out0[j] = s_1;
566  }
567 
568  if (channels == 2) {
569  left->sample1 = s_1;
570  left->sample2 = s_2;
571  s_1 = right->sample1;
572  s_2 = right->sample2;
573  }
574 
575  shift = 12 - (in[5+i*2] & 15);
576  filter = in[5+i*2] >> 4;
577  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
578  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
579  filter=0;
580  }
581  if (shift < 0) {
582  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
583  shift = 0;
584  }
585 
586  f0 = xa_adpcm_table[filter][0];
587  f1 = xa_adpcm_table[filter][1];
588 
589  for(j=0;j<28;j++) {
590  d = in[16+i+j*4];
591 
592  t = sign_extend(d >> 4, 4);
593  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
594  s_2 = s_1;
595  s_1 = av_clip_int16(s);
596  out1[j] = s_1;
597  }
598 
599  if (channels == 2) {
600  right->sample1 = s_1;
601  right->sample2 = s_2;
602  } else {
603  left->sample1 = s_1;
604  left->sample2 = s_2;
605  }
606 
607  out0 += 28 * (3 - channels);
608  out1 += 28 * (3 - channels);
609  }
610 
611  return 0;
612 }
613 
614 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
615 {
616  ADPCMDecodeContext *c = avctx->priv_data;
617  GetBitContext gb;
618  const int8_t *table;
619  int k0, signmask, nb_bits, count;
620  int size = buf_size*8;
621  int i;
622 
623  init_get_bits(&gb, buf, size);
624 
625  //read bits & initial values
626  nb_bits = get_bits(&gb, 2)+2;
627  table = swf_index_tables[nb_bits-2];
628  k0 = 1 << (nb_bits-2);
629  signmask = 1 << (nb_bits-1);
630 
631  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
632  for (i = 0; i < avctx->channels; i++) {
633  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
634  c->status[i].step_index = get_bits(&gb, 6);
635  }
636 
637  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
638  int i;
639 
640  for (i = 0; i < avctx->channels; i++) {
641  // similar to IMA adpcm
642  int delta = get_bits(&gb, nb_bits);
643  int step = ff_adpcm_step_table[c->status[i].step_index];
644  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
645  int k = k0;
646 
647  do {
648  if (delta & k)
649  vpdiff += step;
650  step >>= 1;
651  k >>= 1;
652  } while(k);
653  vpdiff += step;
654 
655  if (delta & signmask)
656  c->status[i].predictor -= vpdiff;
657  else
658  c->status[i].predictor += vpdiff;
659 
660  c->status[i].step_index += table[delta & (~signmask)];
661 
662  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
663  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
664 
665  *samples++ = c->status[i].predictor;
666  }
667  }
668  }
669 }
670 
671 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
672 {
673  int sample = sign_extend(nibble, 4) * (1 << shift);
674 
675  if (flag)
676  sample += (8 * cs->sample1) - (4 * cs->sample2);
677  else
678  sample += 4 * cs->sample1;
679 
680  sample = av_clip_int16(sample >> 2);
681 
682  cs->sample2 = cs->sample1;
683  cs->sample1 = sample;
684 
685  return sample;
686 }
687 
688 /**
689  * Get the number of samples (per channel) that will be decoded from the packet.
690  * In one case, this is actually the maximum number of samples possible to
691  * decode with the given buf_size.
692  *
693  * @param[out] coded_samples set to the number of samples as coded in the
694  * packet, or 0 if the codec does not encode the
695  * number of samples in each frame.
696  * @param[out] approx_nb_samples set to non-zero if the number of samples
697  * returned is an approximation.
698  */
700  int buf_size, int *coded_samples, int *approx_nb_samples)
701 {
702  ADPCMDecodeContext *s = avctx->priv_data;
703  int nb_samples = 0;
704  int ch = avctx->channels;
705  int has_coded_samples = 0;
706  int header_size;
707 
708  *coded_samples = 0;
709  *approx_nb_samples = 0;
710 
711  if(ch <= 0)
712  return 0;
713 
714  switch (avctx->codec->id) {
715  /* constant, only check buf_size */
717  if (buf_size < 76 * ch)
718  return 0;
719  nb_samples = 128;
720  break;
722  if (buf_size < 34 * ch)
723  return 0;
724  nb_samples = 64;
725  break;
726  /* simple 4-bit adpcm */
739  nb_samples = buf_size * 2 / ch;
740  break;
741  }
742  if (nb_samples)
743  return nb_samples;
744 
745  /* simple 4-bit adpcm, with header */
746  header_size = 0;
747  switch (avctx->codec->id) {
753  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
754  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
755  }
756  if (header_size > 0)
757  return (buf_size - header_size) * 2 / ch;
758 
759  /* more complex formats */
760  switch (avctx->codec->id) {
762  bytestream2_skip(gb, 4);
763  has_coded_samples = 1;
764  *coded_samples = bytestream2_get_le32u(gb);
765  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
766  bytestream2_seek(gb, -8, SEEK_CUR);
767  break;
769  has_coded_samples = 1;
770  *coded_samples = bytestream2_get_le32(gb);
771  *coded_samples -= *coded_samples % 28;
772  nb_samples = (buf_size - 12) / 30 * 28;
773  break;
775  has_coded_samples = 1;
776  *coded_samples = bytestream2_get_le32(gb);
777  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
778  break;
780  nb_samples = (buf_size - ch) / ch * 2;
781  break;
785  /* maximum number of samples */
786  /* has internal offsets and a per-frame switch to signal raw 16-bit */
787  has_coded_samples = 1;
788  switch (avctx->codec->id) {
790  header_size = 4 + 9 * ch;
791  *coded_samples = bytestream2_get_le32(gb);
792  break;
794  header_size = 4 + 5 * ch;
795  *coded_samples = bytestream2_get_le32(gb);
796  break;
798  header_size = 4 + 5 * ch;
799  *coded_samples = bytestream2_get_be32(gb);
800  break;
801  }
802  *coded_samples -= *coded_samples % 28;
803  nb_samples = (buf_size - header_size) * 2 / ch;
804  nb_samples -= nb_samples % 28;
805  *approx_nb_samples = 1;
806  break;
808  if (avctx->block_align > 0)
809  buf_size = FFMIN(buf_size, avctx->block_align);
810  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
811  break;
813  if (avctx->block_align > 0)
814  buf_size = FFMIN(buf_size, avctx->block_align);
815  if (buf_size < 4 * ch)
816  return AVERROR_INVALIDDATA;
817  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
818  break;
820  if (avctx->block_align > 0)
821  buf_size = FFMIN(buf_size, avctx->block_align);
822  nb_samples = (buf_size - 4 * ch) * 2 / ch;
823  break;
825  {
826  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
827  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
828  if (avctx->block_align > 0)
829  buf_size = FFMIN(buf_size, avctx->block_align);
830  if (buf_size < 4 * ch)
831  return AVERROR_INVALIDDATA;
832  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
833  break;
834  }
836  if (avctx->block_align > 0)
837  buf_size = FFMIN(buf_size, avctx->block_align);
838  nb_samples = (buf_size - 6 * ch) * 2 / ch;
839  break;
841  if (avctx->block_align > 0)
842  buf_size = FFMIN(buf_size, avctx->block_align);
843  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
844  break;
848  {
849  int samples_per_byte;
850  switch (avctx->codec->id) {
851  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
852  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
853  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
854  }
855  if (!s->status[0].step_index) {
856  if (buf_size < ch)
857  return AVERROR_INVALIDDATA;
858  nb_samples++;
859  buf_size -= ch;
860  }
861  nb_samples += buf_size * samples_per_byte / ch;
862  break;
863  }
865  {
866  int buf_bits = buf_size * 8 - 2;
867  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
868  int block_hdr_size = 22 * ch;
869  int block_size = block_hdr_size + nbits * ch * 4095;
870  int nblocks = buf_bits / block_size;
871  int bits_left = buf_bits - nblocks * block_size;
872  nb_samples = nblocks * 4096;
873  if (bits_left >= block_hdr_size)
874  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
875  break;
876  }
879  if (avctx->extradata) {
880  nb_samples = buf_size * 14 / (8 * ch);
881  break;
882  }
883  has_coded_samples = 1;
884  bytestream2_skip(gb, 4); // channel size
885  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
886  bytestream2_get_le32(gb) :
887  bytestream2_get_be32(gb);
888  buf_size -= 8 + 36 * ch;
889  buf_size /= ch;
890  nb_samples = buf_size / 8 * 14;
891  if (buf_size % 8 > 1)
892  nb_samples += (buf_size % 8 - 1) * 2;
893  *approx_nb_samples = 1;
894  break;
896  nb_samples = buf_size / (9 * ch) * 16;
897  break;
899  nb_samples = (buf_size / 128) * 224 / ch;
900  break;
903  nb_samples = buf_size / (16 * ch) * 28;
904  break;
906  nb_samples = buf_size / avctx->block_align * 32;
907  break;
909  nb_samples = buf_size / ch;
910  break;
911  }
912 
913  /* validate coded sample count */
914  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
915  return AVERROR_INVALIDDATA;
916 
917  return nb_samples;
918 }
919 
920 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
921  int *got_frame_ptr, AVPacket *avpkt)
922 {
923  AVFrame *frame = data;
924  const uint8_t *buf = avpkt->data;
925  int buf_size = avpkt->size;
926  ADPCMDecodeContext *c = avctx->priv_data;
927  ADPCMChannelStatus *cs;
928  int n, m, channel, i;
929  int16_t *samples;
930  int16_t **samples_p;
931  int st; /* stereo */
932  int count1, count2;
933  int nb_samples, coded_samples, approx_nb_samples, ret;
934  GetByteContext gb;
935 
936  bytestream2_init(&gb, buf, buf_size);
937  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
938  if (nb_samples <= 0) {
939  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
940  return AVERROR_INVALIDDATA;
941  }
942 
943  /* get output buffer */
944  frame->nb_samples = nb_samples;
945  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
946  return ret;
947  samples = (int16_t *)frame->data[0];
948  samples_p = (int16_t **)frame->extended_data;
949 
950  /* use coded_samples when applicable */
951  /* it is always <= nb_samples, so the output buffer will be large enough */
952  if (coded_samples) {
953  if (!approx_nb_samples && coded_samples != nb_samples)
954  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
955  frame->nb_samples = nb_samples = coded_samples;
956  }
957 
958  st = avctx->channels == 2 ? 1 : 0;
959 
960  switch(avctx->codec->id) {
962  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
963  Channel data is interleaved per-chunk. */
964  for (channel = 0; channel < avctx->channels; channel++) {
965  int predictor;
966  int step_index;
967  cs = &(c->status[channel]);
968  /* (pppppp) (piiiiiii) */
969 
970  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
971  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
972  step_index = predictor & 0x7F;
973  predictor &= ~0x7F;
974 
975  if (cs->step_index == step_index) {
976  int diff = predictor - cs->predictor;
977  if (diff < 0)
978  diff = - diff;
979  if (diff > 0x7f)
980  goto update;
981  } else {
982  update:
983  cs->step_index = step_index;
984  cs->predictor = predictor;
985  }
986 
987  if (cs->step_index > 88u){
988  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
989  channel, cs->step_index);
990  return AVERROR_INVALIDDATA;
991  }
992 
993  samples = samples_p[channel];
994 
995  for (m = 0; m < 64; m += 2) {
996  int byte = bytestream2_get_byteu(&gb);
997  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
998  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
999  }
1000  }
1001  break;
1003  for(i=0; i<avctx->channels; i++){
1004  cs = &(c->status[i]);
1005  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1006 
1007  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1008  if (cs->step_index > 88u){
1009  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1010  i, cs->step_index);
1011  return AVERROR_INVALIDDATA;
1012  }
1013  }
1014 
1015  if (avctx->bits_per_coded_sample != 4) {
1016  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1017  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1018  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1019  GetBitContext g;
1020 
1021  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1022  for (i = 0; i < avctx->channels; i++) {
1023  int j;
1024 
1025  cs = &c->status[i];
1026  samples = &samples_p[i][1 + n * samples_per_block];
1027  for (j = 0; j < block_size; j++) {
1028  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
1029  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
1030  }
1031  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1032  if (ret < 0)
1033  return ret;
1034  for (m = 0; m < samples_per_block; m++) {
1036  avctx->bits_per_coded_sample);
1037  }
1038  }
1039  }
1040  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
1041  } else {
1042  for (n = 0; n < (nb_samples - 1) / 8; n++) {
1043  for (i = 0; i < avctx->channels; i++) {
1044  cs = &c->status[i];
1045  samples = &samples_p[i][1 + n * 8];
1046  for (m = 0; m < 8; m += 2) {
1047  int v = bytestream2_get_byteu(&gb);
1048  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1049  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1050  }
1051  }
1052  }
1053  }
1054  break;
1055  case AV_CODEC_ID_ADPCM_4XM:
1056  for (i = 0; i < avctx->channels; i++)
1057  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1058 
1059  for (i = 0; i < avctx->channels; i++) {
1060  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1061  if (c->status[i].step_index > 88u) {
1062  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1063  i, c->status[i].step_index);
1064  return AVERROR_INVALIDDATA;
1065  }
1066  }
1067 
1068  for (i = 0; i < avctx->channels; i++) {
1069  samples = (int16_t *)frame->data[i];
1070  cs = &c->status[i];
1071  for (n = nb_samples >> 1; n > 0; n--) {
1072  int v = bytestream2_get_byteu(&gb);
1073  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1074  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1075  }
1076  }
1077  break;
1078  case AV_CODEC_ID_ADPCM_AGM:
1079  for (i = 0; i < avctx->channels; i++)
1080  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1081  for (i = 0; i < avctx->channels; i++)
1082  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1083 
1084  for (n = 0; n < nb_samples >> (1 - st); n++) {
1085  int v = bytestream2_get_byteu(&gb);
1086  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1087  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1088  }
1089  break;
1090  case AV_CODEC_ID_ADPCM_MS:
1091  {
1092  int block_predictor;
1093 
1094  if (avctx->channels > 2) {
1095  for (channel = 0; channel < avctx->channels; channel++) {
1096  samples = samples_p[channel];
1097  block_predictor = bytestream2_get_byteu(&gb);
1098  if (block_predictor > 6) {
1099  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1100  channel, block_predictor);
1101  return AVERROR_INVALIDDATA;
1102  }
1103  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1104  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1105  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1106  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1107  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1108  *samples++ = c->status[channel].sample2;
1109  *samples++ = c->status[channel].sample1;
1110  for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1111  int byte = bytestream2_get_byteu(&gb);
1112  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1113  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1114  }
1115  }
1116  } else {
1117  block_predictor = bytestream2_get_byteu(&gb);
1118  if (block_predictor > 6) {
1119  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1120  block_predictor);
1121  return AVERROR_INVALIDDATA;
1122  }
1123  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1124  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1125  if (st) {
1126  block_predictor = bytestream2_get_byteu(&gb);
1127  if (block_predictor > 6) {
1128  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1129  block_predictor);
1130  return AVERROR_INVALIDDATA;
1131  }
1132  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1133  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1134  }
1135  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1136  if (st){
1137  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1138  }
1139 
1140  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1141  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1142  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1143  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1144 
1145  *samples++ = c->status[0].sample2;
1146  if (st) *samples++ = c->status[1].sample2;
1147  *samples++ = c->status[0].sample1;
1148  if (st) *samples++ = c->status[1].sample1;
1149  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1150  int byte = bytestream2_get_byteu(&gb);
1151  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1152  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1153  }
1154  }
1155  break;
1156  }
1158  for (channel = 0; channel < avctx->channels; channel+=2) {
1159  bytestream2_skipu(&gb, 4);
1160  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1161  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1162  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1163  bytestream2_skipu(&gb, 2);
1164  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1165  bytestream2_skipu(&gb, 2);
1166  for (n = 0; n < nb_samples; n+=2) {
1167  int v = bytestream2_get_byteu(&gb);
1168  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1169  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1170  }
1171  for (n = 0; n < nb_samples; n+=2) {
1172  int v = bytestream2_get_byteu(&gb);
1173  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1174  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1175  }
1176  }
1177  break;
1179  for (channel = 0; channel < avctx->channels; channel++) {
1180  cs = &c->status[channel];
1181  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1182  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1183  if (cs->step_index > 88u){
1184  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1185  channel, cs->step_index);
1186  return AVERROR_INVALIDDATA;
1187  }
1188  }
1189  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1190  int v = bytestream2_get_byteu(&gb);
1191  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1192  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1193  }
1194  break;
1196  {
1197  int last_byte = 0;
1198  int nibble;
1199  int decode_top_nibble_next = 0;
1200  int diff_channel;
1201  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1202 
1203  bytestream2_skipu(&gb, 10);
1204  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1205  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1206  c->status[0].step_index = bytestream2_get_byteu(&gb);
1207  c->status[1].step_index = bytestream2_get_byteu(&gb);
1208  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1209  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1210  c->status[0].step_index, c->status[1].step_index);
1211  return AVERROR_INVALIDDATA;
1212  }
1213  /* sign extend the predictors */
1214  diff_channel = c->status[1].predictor;
1215 
1216  /* DK3 ADPCM support macro */
1217 #define DK3_GET_NEXT_NIBBLE() \
1218  if (decode_top_nibble_next) { \
1219  nibble = last_byte >> 4; \
1220  decode_top_nibble_next = 0; \
1221  } else { \
1222  last_byte = bytestream2_get_byteu(&gb); \
1223  nibble = last_byte & 0x0F; \
1224  decode_top_nibble_next = 1; \
1225  }
1226 
1227  while (samples < samples_end) {
1228 
1229  /* for this algorithm, c->status[0] is the sum channel and
1230  * c->status[1] is the diff channel */
1231 
1232  /* process the first predictor of the sum channel */
1234  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1235 
1236  /* process the diff channel predictor */
1238  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1239 
1240  /* process the first pair of stereo PCM samples */
1241  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1242  *samples++ = c->status[0].predictor + c->status[1].predictor;
1243  *samples++ = c->status[0].predictor - c->status[1].predictor;
1244 
1245  /* process the second predictor of the sum channel */
1247  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1248 
1249  /* process the second pair of stereo PCM samples */
1250  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1251  *samples++ = c->status[0].predictor + c->status[1].predictor;
1252  *samples++ = c->status[0].predictor - c->status[1].predictor;
1253  }
1254 
1255  if ((bytestream2_tell(&gb) & 1))
1256  bytestream2_skip(&gb, 1);
1257  break;
1258  }
1260  for (channel = 0; channel < avctx->channels; channel++) {
1261  cs = &c->status[channel];
1262  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1263  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1264  if (cs->step_index > 88u){
1265  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1266  channel, cs->step_index);
1267  return AVERROR_INVALIDDATA;
1268  }
1269  }
1270 
1271  for (n = nb_samples >> (1 - st); n > 0; n--) {
1272  int v1, v2;
1273  int v = bytestream2_get_byteu(&gb);
1274  /* nibbles are swapped for mono */
1275  if (st) {
1276  v1 = v >> 4;
1277  v2 = v & 0x0F;
1278  } else {
1279  v2 = v >> 4;
1280  v1 = v & 0x0F;
1281  }
1282  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1283  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1284  }
1285  break;
1287  for (channel = 0; channel < avctx->channels; channel++) {
1288  cs = &c->status[channel];
1289  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1290  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1291  if (cs->step_index > 88u){
1292  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1293  channel, cs->step_index);
1294  return AVERROR_INVALIDDATA;
1295  }
1296  }
1297 
1298  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1299  for (channel = 0; channel < avctx->channels; channel++) {
1300  samples = samples_p[channel] + 256 * subframe;
1301  for (n = 0; n < 256; n += 2) {
1302  int v = bytestream2_get_byteu(&gb);
1303  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1304  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1305  }
1306  }
1307  }
1308  break;
1310  for (channel = 0; channel < avctx->channels; channel++) {
1311  cs = &c->status[channel];
1312  samples = samples_p[channel];
1313  bytestream2_skip(&gb, 4);
1314  for (n = 0; n < nb_samples; n += 2) {
1315  int v = bytestream2_get_byteu(&gb);
1316  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1317  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1318  }
1319  }
1320  break;
1322  for (n = nb_samples >> (1 - st); n > 0; n--) {
1323  int v = bytestream2_get_byteu(&gb);
1324  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1325  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1326  }
1327  break;
1329  for (n = nb_samples >> (1 - st); n > 0; n--) {
1330  int v = bytestream2_get_byteu(&gb);
1331  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1332  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1333  }
1334  break;
1336  for (n = nb_samples / 2; n > 0; n--) {
1337  for (channel = 0; channel < avctx->channels; channel++) {
1338  int v = bytestream2_get_byteu(&gb);
1339  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1340  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1341  }
1342  samples += avctx->channels;
1343  }
1344  break;
1346  for (n = nb_samples / 2; n > 0; n--) {
1347  for (channel = 0; channel < avctx->channels; channel++) {
1348  int v = bytestream2_get_byteu(&gb);
1349  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1350  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1351  }
1352  samples += avctx->channels;
1353  }
1354  break;
1356  for (channel = 0; channel < avctx->channels; channel++) {
1357  int16_t *smp = samples_p[channel];
1358  for (n = 0; n < nb_samples / 2; n++) {
1359  int v = bytestream2_get_byteu(&gb);
1360  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1361  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1362  }
1363  }
1364  break;
1366  for (n = nb_samples >> (1 - st); n > 0; n--) {
1367  int v = bytestream2_get_byteu(&gb);
1368  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1369  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1370  }
1371  break;
1373  for (channel = 0; channel < avctx->channels; channel++) {
1374  cs = &c->status[channel];
1375  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1376  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1377  if (cs->step_index > 88u){
1378  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1379  channel, cs->step_index);
1380  return AVERROR_INVALIDDATA;
1381  }
1382  }
1383  for (n = 0; n < nb_samples / 2; n++) {
1384  int byte[2];
1385 
1386  byte[0] = bytestream2_get_byteu(&gb);
1387  if (st)
1388  byte[1] = bytestream2_get_byteu(&gb);
1389  for(channel = 0; channel < avctx->channels; channel++) {
1390  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1391  }
1392  for(channel = 0; channel < avctx->channels; channel++) {
1393  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1394  }
1395  }
1396  break;
1398  if (c->vqa_version == 3) {
1399  for (channel = 0; channel < avctx->channels; channel++) {
1400  int16_t *smp = samples_p[channel];
1401 
1402  for (n = nb_samples / 2; n > 0; n--) {
1403  int v = bytestream2_get_byteu(&gb);
1404  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1405  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1406  }
1407  }
1408  } else {
1409  for (n = nb_samples / 2; n > 0; n--) {
1410  for (channel = 0; channel < avctx->channels; channel++) {
1411  int v = bytestream2_get_byteu(&gb);
1412  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1413  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1414  }
1415  samples += avctx->channels;
1416  }
1417  }
1418  bytestream2_seek(&gb, 0, SEEK_END);
1419  break;
1420  case AV_CODEC_ID_ADPCM_XA:
1421  {
1422  int16_t *out0 = samples_p[0];
1423  int16_t *out1 = samples_p[1];
1424  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1425  int sample_offset = 0;
1426  int bytes_remaining;
1427  while (bytestream2_get_bytes_left(&gb) >= 128) {
1428  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1429  &c->status[0], &c->status[1],
1430  avctx->channels, sample_offset)) < 0)
1431  return ret;
1432  bytestream2_skipu(&gb, 128);
1433  sample_offset += samples_per_block;
1434  }
1435  /* Less than a full block of data left, e.g. when reading from
1436  * 2324 byte per sector XA; the remainder is padding */
1437  bytes_remaining = bytestream2_get_bytes_left(&gb);
1438  if (bytes_remaining > 0) {
1439  bytestream2_skip(&gb, bytes_remaining);
1440  }
1441  break;
1442  }
1444  for (i=0; i<=st; i++) {
1445  c->status[i].step_index = bytestream2_get_le32u(&gb);
1446  if (c->status[i].step_index > 88u) {
1447  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1448  i, c->status[i].step_index);
1449  return AVERROR_INVALIDDATA;
1450  }
1451  }
1452  for (i=0; i<=st; i++) {
1453  c->status[i].predictor = bytestream2_get_le32u(&gb);
1454  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1455  return AVERROR_INVALIDDATA;
1456  }
1457 
1458  for (n = nb_samples >> (1 - st); n > 0; n--) {
1459  int byte = bytestream2_get_byteu(&gb);
1460  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1461  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1462  }
1463  break;
1465  for (n = nb_samples >> (1 - st); n > 0; n--) {
1466  int byte = bytestream2_get_byteu(&gb);
1467  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1468  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1469  }
1470  break;
1471  case AV_CODEC_ID_ADPCM_EA:
1472  {
1473  int previous_left_sample, previous_right_sample;
1474  int current_left_sample, current_right_sample;
1475  int next_left_sample, next_right_sample;
1476  int coeff1l, coeff2l, coeff1r, coeff2r;
1477  int shift_left, shift_right;
1478 
1479  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1480  each coding 28 stereo samples. */
1481 
1482  if(avctx->channels != 2)
1483  return AVERROR_INVALIDDATA;
1484 
1485  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1486  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1487  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1488  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1489 
1490  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1491  int byte = bytestream2_get_byteu(&gb);
1492  coeff1l = ea_adpcm_table[ byte >> 4 ];
1493  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1494  coeff1r = ea_adpcm_table[ byte & 0x0F];
1495  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1496 
1497  byte = bytestream2_get_byteu(&gb);
1498  shift_left = 20 - (byte >> 4);
1499  shift_right = 20 - (byte & 0x0F);
1500 
1501  for (count2 = 0; count2 < 28; count2++) {
1502  byte = bytestream2_get_byteu(&gb);
1503  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1504  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1505 
1506  next_left_sample = (next_left_sample +
1507  (current_left_sample * coeff1l) +
1508  (previous_left_sample * coeff2l) + 0x80) >> 8;
1509  next_right_sample = (next_right_sample +
1510  (current_right_sample * coeff1r) +
1511  (previous_right_sample * coeff2r) + 0x80) >> 8;
1512 
1513  previous_left_sample = current_left_sample;
1514  current_left_sample = av_clip_int16(next_left_sample);
1515  previous_right_sample = current_right_sample;
1516  current_right_sample = av_clip_int16(next_right_sample);
1517  *samples++ = current_left_sample;
1518  *samples++ = current_right_sample;
1519  }
1520  }
1521 
1522  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1523 
1524  break;
1525  }
1527  {
1528  int coeff[2][2], shift[2];
1529 
1530  for(channel = 0; channel < avctx->channels; channel++) {
1531  int byte = bytestream2_get_byteu(&gb);
1532  for (i=0; i<2; i++)
1533  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1534  shift[channel] = 20 - (byte & 0x0F);
1535  }
1536  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1537  int byte[2];
1538 
1539  byte[0] = bytestream2_get_byteu(&gb);
1540  if (st) byte[1] = bytestream2_get_byteu(&gb);
1541  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1542  for(channel = 0; channel < avctx->channels; channel++) {
1543  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1544  sample = (sample +
1545  c->status[channel].sample1 * coeff[channel][0] +
1546  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1547  c->status[channel].sample2 = c->status[channel].sample1;
1548  c->status[channel].sample1 = av_clip_int16(sample);
1549  *samples++ = c->status[channel].sample1;
1550  }
1551  }
1552  }
1553  bytestream2_seek(&gb, 0, SEEK_END);
1554  break;
1555  }
1558  case AV_CODEC_ID_ADPCM_EA_R3: {
1559  /* channel numbering
1560  2chan: 0=fl, 1=fr
1561  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1562  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1563  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1564  int previous_sample, current_sample, next_sample;
1565  int coeff1, coeff2;
1566  int shift;
1567  unsigned int channel;
1568  uint16_t *samplesC;
1569  int count = 0;
1570  int offsets[6];
1571 
1572  for (channel=0; channel<avctx->channels; channel++)
1573  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1574  bytestream2_get_le32(&gb)) +
1575  (avctx->channels + 1) * 4;
1576 
1577  for (channel=0; channel<avctx->channels; channel++) {
1578  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1579  samplesC = samples_p[channel];
1580 
1581  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1582  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1583  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1584  } else {
1585  current_sample = c->status[channel].predictor;
1586  previous_sample = c->status[channel].prev_sample;
1587  }
1588 
1589  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1590  int byte = bytestream2_get_byte(&gb);
1591  if (byte == 0xEE) { /* only seen in R2 and R3 */
1592  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1593  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1594 
1595  for (count2=0; count2<28; count2++)
1596  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1597  } else {
1598  coeff1 = ea_adpcm_table[ byte >> 4 ];
1599  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1600  shift = 20 - (byte & 0x0F);
1601 
1602  for (count2=0; count2<28; count2++) {
1603  if (count2 & 1)
1604  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1605  else {
1606  byte = bytestream2_get_byte(&gb);
1607  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1608  }
1609 
1610  next_sample += (current_sample * coeff1) +
1611  (previous_sample * coeff2);
1612  next_sample = av_clip_int16(next_sample >> 8);
1613 
1614  previous_sample = current_sample;
1615  current_sample = next_sample;
1616  *samplesC++ = current_sample;
1617  }
1618  }
1619  }
1620  if (!count) {
1621  count = count1;
1622  } else if (count != count1) {
1623  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1624  count = FFMAX(count, count1);
1625  }
1626 
1627  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1628  c->status[channel].predictor = current_sample;
1629  c->status[channel].prev_sample = previous_sample;
1630  }
1631  }
1632 
1633  frame->nb_samples = count * 28;
1634  bytestream2_seek(&gb, 0, SEEK_END);
1635  break;
1636  }
1638  for (channel=0; channel<avctx->channels; channel++) {
1639  int coeff[2][4], shift[4];
1640  int16_t *s = samples_p[channel];
1641  for (n = 0; n < 4; n++, s += 32) {
1642  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1643  for (i=0; i<2; i++)
1644  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1645  s[0] = val & ~0x0F;
1646 
1647  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1648  shift[n] = 20 - (val & 0x0F);
1649  s[1] = val & ~0x0F;
1650  }
1651 
1652  for (m=2; m<32; m+=2) {
1653  s = &samples_p[channel][m];
1654  for (n = 0; n < 4; n++, s += 32) {
1655  int level, pred;
1656  int byte = bytestream2_get_byteu(&gb);
1657 
1658  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1659  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1660  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1661 
1662  level = sign_extend(byte, 4) * (1 << shift[n]);
1663  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1664  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1665  }
1666  }
1667  }
1668  break;
1670  for (channel = 0; channel < avctx->channels; channel++) {
1671  cs = &c->status[channel];
1672  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1673  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
1674  if (cs->step_index > 88u){
1675  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1676  channel, cs->step_index);
1677  return AVERROR_INVALIDDATA;
1678  }
1679  }
1680  for (n = nb_samples >> (1 - st); n > 0; n--) {
1681  int byte = bytestream2_get_byteu(&gb);
1682  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
1683  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
1684  }
1685  break;
1687  av_assert0(avctx->channels == 1);
1688 
1689  /*
1690  * Header format:
1691  * int16_t predictor;
1692  * uint8_t step_index;
1693  * uint8_t reserved;
1694  * uint32_t frame_size;
1695  *
1696  * Some implementations have step_index as 16-bits, but others
1697  * only use the lower 8 and store garbage in the upper 8.
1698  */
1699  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1700  c->status[0].step_index = bytestream2_get_byteu(&gb);
1701  bytestream2_skipu(&gb, 5);
1702  if (c->status[0].step_index > 88u) {
1703  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1704  c->status[0].step_index);
1705  return AVERROR_INVALIDDATA;
1706  }
1707 
1708  for (n = nb_samples >> 1; n > 0; n--) {
1709  int v = bytestream2_get_byteu(&gb);
1710 
1711  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1712  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1713  }
1714 
1715  if (nb_samples & 1) {
1716  int v = bytestream2_get_byteu(&gb);
1717  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1718 
1719  if (v & 0x0F) {
1720  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1721  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1722  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1723  }
1724  }
1725  break;
1727  for (i = 0; i < avctx->channels; i++) {
1728  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1729  c->status[i].step_index = bytestream2_get_byteu(&gb);
1730  bytestream2_skipu(&gb, 1);
1731  if (c->status[i].step_index > 88u) {
1732  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1733  c->status[i].step_index);
1734  return AVERROR_INVALIDDATA;
1735  }
1736  }
1737 
1738  for (n = nb_samples >> (1 - st); n > 0; n--) {
1739  int v = bytestream2_get_byteu(&gb);
1740 
1741  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1742  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1743  }
1744  break;
1745  case AV_CODEC_ID_ADPCM_CT:
1746  for (n = nb_samples >> (1 - st); n > 0; n--) {
1747  int v = bytestream2_get_byteu(&gb);
1748  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1749  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1750  }
1751  break;
1755  if (!c->status[0].step_index) {
1756  /* the first byte is a raw sample */
1757  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1758  if (st)
1759  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1760  c->status[0].step_index = 1;
1761  nb_samples--;
1762  }
1763  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1764  for (n = nb_samples >> (1 - st); n > 0; n--) {
1765  int byte = bytestream2_get_byteu(&gb);
1766  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1767  byte >> 4, 4, 0);
1768  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1769  byte & 0x0F, 4, 0);
1770  }
1771  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1772  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1773  int byte = bytestream2_get_byteu(&gb);
1774  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1775  byte >> 5 , 3, 0);
1776  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1777  (byte >> 2) & 0x07, 3, 0);
1778  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1779  byte & 0x03, 2, 0);
1780  }
1781  } else {
1782  for (n = nb_samples >> (2 - st); n > 0; n--) {
1783  int byte = bytestream2_get_byteu(&gb);
1784  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1785  byte >> 6 , 2, 2);
1786  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1787  (byte >> 4) & 0x03, 2, 2);
1788  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1789  (byte >> 2) & 0x03, 2, 2);
1790  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1791  byte & 0x03, 2, 2);
1792  }
1793  }
1794  break;
1795  case AV_CODEC_ID_ADPCM_SWF:
1796  adpcm_swf_decode(avctx, buf, buf_size, samples);
1797  bytestream2_seek(&gb, 0, SEEK_END);
1798  break;
1800  for (n = nb_samples >> (1 - st); n > 0; n--) {
1801  int v = bytestream2_get_byteu(&gb);
1802  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1803  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1804  }
1805  break;
1807  for (channel = 0; channel < avctx->channels; channel++) {
1808  samples = samples_p[channel];
1809  for (n = nb_samples >> 1; n > 0; n--) {
1810  int v = bytestream2_get_byteu(&gb);
1811  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1812  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1813  }
1814  }
1815  break;
1816  case AV_CODEC_ID_ADPCM_AFC:
1817  {
1818  int samples_per_block;
1819  int blocks;
1820 
1821  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1822  samples_per_block = avctx->extradata[0] / 16;
1823  blocks = nb_samples / avctx->extradata[0];
1824  } else {
1825  samples_per_block = nb_samples / 16;
1826  blocks = 1;
1827  }
1828 
1829  for (m = 0; m < blocks; m++) {
1830  for (channel = 0; channel < avctx->channels; channel++) {
1831  int prev1 = c->status[channel].sample1;
1832  int prev2 = c->status[channel].sample2;
1833 
1834  samples = samples_p[channel] + m * 16;
1835  /* Read in every sample for this channel. */
1836  for (i = 0; i < samples_per_block; i++) {
1837  int byte = bytestream2_get_byteu(&gb);
1838  int scale = 1 << (byte >> 4);
1839  int index = byte & 0xf;
1840  int factor1 = ff_adpcm_afc_coeffs[0][index];
1841  int factor2 = ff_adpcm_afc_coeffs[1][index];
1842 
1843  /* Decode 16 samples. */
1844  for (n = 0; n < 16; n++) {
1845  int32_t sampledat;
1846 
1847  if (n & 1) {
1848  sampledat = sign_extend(byte, 4);
1849  } else {
1850  byte = bytestream2_get_byteu(&gb);
1851  sampledat = sign_extend(byte >> 4, 4);
1852  }
1853 
1854  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1855  sampledat * scale;
1856  *samples = av_clip_int16(sampledat);
1857  prev2 = prev1;
1858  prev1 = *samples++;
1859  }
1860  }
1861 
1862  c->status[channel].sample1 = prev1;
1863  c->status[channel].sample2 = prev2;
1864  }
1865  }
1866  bytestream2_seek(&gb, 0, SEEK_END);
1867  break;
1868  }
1869  case AV_CODEC_ID_ADPCM_THP:
1871  {
1872  int table[14][16];
1873  int ch;
1874 
1875 #define THP_GET16(g) \
1876  sign_extend( \
1877  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1878  bytestream2_get_le16u(&(g)) : \
1879  bytestream2_get_be16u(&(g)), 16)
1880 
1881  if (avctx->extradata) {
1883  if (avctx->extradata_size < 32 * avctx->channels) {
1884  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1885  return AVERROR_INVALIDDATA;
1886  }
1887 
1888  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1889  for (i = 0; i < avctx->channels; i++)
1890  for (n = 0; n < 16; n++)
1891  table[i][n] = THP_GET16(tb);
1892  } else {
1893  for (i = 0; i < avctx->channels; i++)
1894  for (n = 0; n < 16; n++)
1895  table[i][n] = THP_GET16(gb);
1896 
1897  if (!c->has_status) {
1898  /* Initialize the previous sample. */
1899  for (i = 0; i < avctx->channels; i++) {
1900  c->status[i].sample1 = THP_GET16(gb);
1901  c->status[i].sample2 = THP_GET16(gb);
1902  }
1903  c->has_status = 1;
1904  } else {
1905  bytestream2_skip(&gb, avctx->channels * 4);
1906  }
1907  }
1908 
1909  for (ch = 0; ch < avctx->channels; ch++) {
1910  samples = samples_p[ch];
1911 
1912  /* Read in every sample for this channel. */
1913  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1914  int byte = bytestream2_get_byteu(&gb);
1915  int index = (byte >> 4) & 7;
1916  unsigned int exp = byte & 0x0F;
1917  int64_t factor1 = table[ch][index * 2];
1918  int64_t factor2 = table[ch][index * 2 + 1];
1919 
1920  /* Decode 14 samples. */
1921  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1922  int32_t sampledat;
1923 
1924  if (n & 1) {
1925  sampledat = sign_extend(byte, 4);
1926  } else {
1927  byte = bytestream2_get_byteu(&gb);
1928  sampledat = sign_extend(byte >> 4, 4);
1929  }
1930 
1931  sampledat = ((c->status[ch].sample1 * factor1
1932  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1933  *samples = av_clip_int16(sampledat);
1934  c->status[ch].sample2 = c->status[ch].sample1;
1935  c->status[ch].sample1 = *samples++;
1936  }
1937  }
1938  }
1939  break;
1940  }
1941  case AV_CODEC_ID_ADPCM_DTK:
1942  for (channel = 0; channel < avctx->channels; channel++) {
1943  samples = samples_p[channel];
1944 
1945  /* Read in every sample for this channel. */
1946  for (i = 0; i < nb_samples / 28; i++) {
1947  int byte, header;
1948  if (channel)
1949  bytestream2_skipu(&gb, 1);
1950  header = bytestream2_get_byteu(&gb);
1951  bytestream2_skipu(&gb, 3 - channel);
1952 
1953  /* Decode 28 samples. */
1954  for (n = 0; n < 28; n++) {
1955  int32_t sampledat, prev;
1956 
1957  switch (header >> 4) {
1958  case 1:
1959  prev = (c->status[channel].sample1 * 0x3c);
1960  break;
1961  case 2:
1962  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1963  break;
1964  case 3:
1965  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1966  break;
1967  default:
1968  prev = 0;
1969  }
1970 
1971  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1972 
1973  byte = bytestream2_get_byteu(&gb);
1974  if (!channel)
1975  sampledat = sign_extend(byte, 4);
1976  else
1977  sampledat = sign_extend(byte >> 4, 4);
1978 
1979  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1980  *samples++ = av_clip_int16(sampledat >> 6);
1981  c->status[channel].sample2 = c->status[channel].sample1;
1982  c->status[channel].sample1 = sampledat;
1983  }
1984  }
1985  if (!channel)
1986  bytestream2_seek(&gb, 0, SEEK_SET);
1987  }
1988  break;
1989  case AV_CODEC_ID_ADPCM_PSX:
1990  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
1991  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
1992  for (channel = 0; channel < avctx->channels; channel++) {
1993  samples = samples_p[channel] + block * nb_samples_per_block;
1994  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
1995 
1996  /* Read in every sample for this channel. */
1997  for (i = 0; i < nb_samples_per_block / 28; i++) {
1998  int filter, shift, flag, byte;
1999 
2000  filter = bytestream2_get_byteu(&gb);
2001  shift = filter & 0xf;
2002  filter = filter >> 4;
2004  return AVERROR_INVALIDDATA;
2005  flag = bytestream2_get_byteu(&gb) & 0x7;
2006 
2007  /* Decode 28 samples. */
2008  for (n = 0; n < 28; n++) {
2009  int sample = 0, scale;
2010 
2011  if (n & 1) {
2012  scale = sign_extend(byte >> 4, 4);
2013  } else {
2014  byte = bytestream2_get_byteu(&gb);
2015  scale = sign_extend(byte, 4);
2016  }
2017 
2018  if (flag < 0x07) {
2019  scale = scale * (1 << 12);
2020  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2021  }
2023  c->status[channel].sample2 = c->status[channel].sample1;
2024  c->status[channel].sample1 = sample;
2025  }
2026  }
2027  }
2028  }
2029  break;
2031  /*
2032  * The format of each block:
2033  * uint8_t left_control;
2034  * uint4_t left_samples[nb_samples];
2035  * ---- and if stereo ----
2036  * uint8_t right_control;
2037  * uint4_t right_samples[nb_samples];
2038  *
2039  * Format of the control byte:
2040  * MSB [SSSSRDRR] LSB
2041  * S = (Shift Amount - 2)
2042  * D = Decoder flag.
2043  * R = Reserved
2044  *
2045  * Each block relies on the previous two samples of each channel.
2046  * They should be 0 initially.
2047  */
2048  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2049  for (channel = 0; channel < avctx->channels; channel++) {
2050  int control, shift;
2051 
2052  samples = samples_p[channel] + block * 32;
2053  cs = c->status + channel;
2054 
2055  /* Get the control byte and decode the samples, 2 at a time. */
2056  control = bytestream2_get_byteu(&gb);
2057  shift = (control >> 4) + 2;
2058 
2059  for (n = 0; n < 16; n++) {
2060  int sample = bytestream2_get_byteu(&gb);
2061  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2062  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2063  }
2064  }
2065  }
2066  break;
2068  for (n = 0; n < nb_samples * avctx->channels; n++) {
2069  int v = bytestream2_get_byteu(&gb);
2070  *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
2071  }
2072  break;
2074  for (n = nb_samples / 2; n > 0; n--) {
2075  for (channel = 0; channel < avctx->channels; channel++) {
2076  int v = bytestream2_get_byteu(&gb);
2077  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2078  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2079  }
2080  samples += avctx->channels;
2081  }
2082  break;
2083  default:
2084  av_assert0(0); // unsupported codec_id should not happen
2085  }
2086 
2087  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2088  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2089  return AVERROR_INVALIDDATA;
2090  }
2091 
2092  *got_frame_ptr = 1;
2093 
2094  if (avpkt->size < bytestream2_tell(&gb)) {
2095  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2096  return avpkt->size;
2097  }
2098 
2099  return bytestream2_tell(&gb);
2100 }
2101 
2102 static void adpcm_flush(AVCodecContext *avctx)
2103 {
2104  ADPCMDecodeContext *c = avctx->priv_data;
2105 
2106  /* Just nuke the entire state and re-init. */
2107  memset(c, 0, sizeof(ADPCMDecodeContext));
2108 
2109  switch(avctx->codec_id) {
2110  case AV_CODEC_ID_ADPCM_CT:
2111  c->status[0].step = c->status[1].step = 511;
2112  break;
2113 
2115  if (avctx->extradata && avctx->extradata_size >= 8) {
2116  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2117  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2118  }
2119  break;
2120 
2122  if (avctx->extradata && avctx->extradata_size >= 28) {
2123  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2124  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2125  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2126  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2127  }
2128  break;
2129 
2131  if (avctx->extradata && avctx->extradata_size >= 2)
2132  c->vqa_version = AV_RL16(avctx->extradata);
2133  break;
2134  default:
2135  /* Other codecs may want to handle this during decoding. */
2136  c->has_status = 0;
2137  return;
2138  }
2139 
2140  c->has_status = 1;
2141 }
2142 
2143 
2151 
2152 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2153 const AVCodec ff_ ## name_ ## _decoder = { \
2154  .name = #name_, \
2155  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2156  .type = AVMEDIA_TYPE_AUDIO, \
2157  .id = id_, \
2158  .priv_data_size = sizeof(ADPCMDecodeContext), \
2159  .init = adpcm_decode_init, \
2160  .decode = adpcm_decode_frame, \
2161  .flush = adpcm_flush, \
2162  .capabilities = AV_CODEC_CAP_DR1, \
2163  .sample_fmts = sample_fmts_, \
2164  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
2165 }
2166 
2167 /* Note: Do not forget to add new entries to the Makefile as well. */
2168 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
2169 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
2170 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
2171 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
2172 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
2173 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
2174 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
2175 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
2176 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
2177 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
2178 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
2179 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
2180 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
2181 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay");
2182 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
2183 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
2184 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
2185 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
2186 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
2187 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
2188 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
2189 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
2190 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
2191 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
2192 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX");
2193 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
2194 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
2195 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
2196 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
2197 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
2198 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
2199 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
2200 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
2201 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
2202 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
2203 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
2204 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
2205 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
2206 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
2207 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
2208 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
2209 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
2210 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
2211 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
2212 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
2213 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork");
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:187
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:358
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:352
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:122
ff_adpcm_oki_step_table
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:385
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:89
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:146
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:149
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
index
fg index
Definition: ffmpeg_filter.c:168
internal.h
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:400
AVPacket::data
uint8_t * data
Definition: packet.h:365
table
static const uint16_t table[]
Definition: prosumer.c:206
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:373
data
const char data[16]
Definition: mxf.c:142
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:390
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:384
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:361
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:388
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:448
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:364
ff_adpcm_ima_cunning_index_table
const int8_t ff_adpcm_ima_cunning_index_table[9]
Definition: adpcm_data.c:187
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
THP_GET16
#define THP_GET16(g)
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:393
GetBitContext
Definition: get_bits.h:62
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:302
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:253
val
static double val(void *priv, double ch)
Definition: aeval.c:76
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2102
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:369
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2146
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:279
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:468
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:486
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:402
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:491
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:383
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:375
g
const char * g
Definition: vf_curves.c:117
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:354
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:381
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:421
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:360
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:379
channels
channels
Definition: aptx.h:33
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:357
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:387
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:394
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2144
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:377
av_clip_int16
#define av_clip_int16
Definition: common.h:137
NULL
#define NULL
Definition: coverity.c:32
av_clip_intp2
#define av_clip_intp2
Definition: common.h:143
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:366
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:356
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:376
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:394
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:355
ff_adpcm_mtaf_stepsize
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:371
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:73
exp
int8_t exp
Definition: eval.c:72
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:360
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:427
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:406
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:396
ADPCMDecodeContext
Definition: adpcm.c:100
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1645
AVPacket::size
int size
Definition: packet.h:366
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:386
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:318
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:398
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
bps
unsigned bps
Definition: movenc.c:1595
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:989
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:699
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:365
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:67
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:164
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:526
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
ADPCM_DECODER
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:2152
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:982
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
flag
#define flag(name)
Definition: cbs_av1.c:553
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1407
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2148
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:392
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:378
i
int i
Definition: input.c:407
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:485
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
delta
float delta
Definition: vorbis_enc_data.h:430
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:664
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:397
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
tb
#define tb
Definition: regdef.h:68
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:102
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:391
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:671
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:65
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:362
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:399
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1018
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:338
AVCodecContext
main external API structure.
Definition: avcodec.h:384
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:393
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:372
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:374
temp
else temp
Definition: vf_mcdeint.c:259
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
shift
static int shift(int a, int b)
Definition: sonic.c:83
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:370
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:36
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:367
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:614
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:395
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:108
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:103
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:401
AVPacket
This structure stores compressed data.
Definition: packet.h:342
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:411
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:353
d
d
Definition: ffmpeg_filter.c:156
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
ff_adpcm_ima_cunning_step_table
const int16_t ff_adpcm_ima_cunning_step_table[61]
Definition: adpcm_data.c:197
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_adpcm_afc_coeffs
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:920
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:359
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:209
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:389
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:482
ff_adpcm_index_tables
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
int
int
Definition: ffmpeg_filter.c:156
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:93
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:368
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:101
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:82