FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "bytestream.h"
40 #include "adpcm.h"
41 #include "adpcm_data.h"
42 #include "internal.h"
43 
44 /**
45  * @file
46  * ADPCM decoders
47  * Features and limitations:
48  *
49  * Reference documents:
50  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
51  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
52  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
53  * http://openquicktime.sourceforge.net/
54  * XAnim sources (xa_codec.c) http://xanim.polter.net/
55  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
56  * SoX source code http://sox.sourceforge.net/
57  *
58  * CD-ROM XA:
59  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
60  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
61  * readstr http://www.geocities.co.jp/Playtown/2004/
62  */
63 
64 /* These are for CD-ROM XA ADPCM */
65 static const int8_t xa_adpcm_table[5][2] = {
66  { 0, 0 },
67  { 60, 0 },
68  { 115, -52 },
69  { 98, -55 },
70  { 122, -60 }
71 };
72 
73 static const int16_t ea_adpcm_table[] = {
74  0, 240, 460, 392,
75  0, 0, -208, -220,
76  0, 1, 3, 4,
77  7, 8, 10, 11,
78  0, -1, -3, -4
79 };
80 
81 // padded to zero where table size is less then 16
82 static const int8_t swf_index_tables[4][16] = {
83  /*2*/ { -1, 2 },
84  /*3*/ { -1, -1, 2, 4 },
85  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
86  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
87 };
88 
89 static const int8_t zork_index_table[8] = {
90  -1, -1, -1, 1, 4, 7, 10, 12,
91 };
92 
93 static const int8_t mtf_index_table[16] = {
94  8, 6, 4, 2, -1, -1, -1, -1,
95  -1, -1, -1, -1, 2, 4, 6, 8,
96 };
97 
98 /* end of tables */
99 
100 typedef struct ADPCMDecodeContext {
102  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
105 
107 {
108  ADPCMDecodeContext *c = avctx->priv_data;
109  unsigned int min_channels = 1;
110  unsigned int max_channels = 2;
111 
112  switch(avctx->codec->id) {
114  max_channels = 1;
115  break;
118  min_channels = 2;
119  break;
126  max_channels = 6;
127  break;
129  min_channels = 2;
130  max_channels = 8;
131  if (avctx->channels & 1) {
132  avpriv_request_sample(avctx, "channel count %d", avctx->channels);
133  return AVERROR_PATCHWELCOME;
134  }
135  break;
137  max_channels = 8;
138  if (avctx->channels <= 0 || avctx->block_align % (16 * avctx->channels))
139  return AVERROR_INVALIDDATA;
140  break;
144  max_channels = 14;
145  break;
146  }
147  if (avctx->channels < min_channels || avctx->channels > max_channels) {
148  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
149  return AVERROR(EINVAL);
150  }
151 
152  switch(avctx->codec->id) {
154  c->status[0].step = c->status[1].step = 511;
155  break;
157  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
158  return AVERROR_INVALIDDATA;
159  break;
161  if (avctx->extradata && avctx->extradata_size >= 8) {
162  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
163  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
164  }
165  break;
167  if (avctx->extradata) {
168  if (avctx->extradata_size >= 28) {
169  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
170  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
171  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
172  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
173  } else if (avctx->extradata_size >= 16) {
174  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
175  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
176  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
177  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
178  }
179  }
180  break;
182  if (avctx->extradata && avctx->extradata_size >= 2)
183  c->vqa_version = AV_RL16(avctx->extradata);
184  break;
186  if (avctx->bits_per_coded_sample != 4 || avctx->block_align != 17 * avctx->channels)
187  return AVERROR_INVALIDDATA;
188  break;
190  if (avctx->bits_per_coded_sample != 8)
191  return AVERROR_INVALIDDATA;
192  break;
193  default:
194  break;
195  }
196 
197  switch (avctx->codec->id) {
217  break;
219  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
221  break;
223  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
225  break;
226  default:
227  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
228  }
229 
230  return 0;
231 }
232 
233 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
234 {
235  int delta, pred, step, add;
236 
237  pred = c->predictor;
238  delta = nibble & 7;
239  step = c->step;
240  add = (delta * 2 + 1) * step;
241  if (add < 0)
242  add = add + 7;
243 
244  if ((nibble & 8) == 0)
245  pred = av_clip(pred + (add >> 3), -32767, 32767);
246  else
247  pred = av_clip(pred - (add >> 3), -32767, 32767);
248 
249  switch (delta) {
250  case 7:
251  step *= 0x99;
252  break;
253  case 6:
254  c->step = av_clip(c->step * 2, 127, 24576);
255  c->predictor = pred;
256  return pred;
257  case 5:
258  step *= 0x66;
259  break;
260  case 4:
261  step *= 0x4d;
262  break;
263  default:
264  step *= 0x39;
265  break;
266  }
267 
268  if (step < 0)
269  step += 0x3f;
270 
271  c->step = step >> 6;
272  c->step = av_clip(c->step, 127, 24576);
273  c->predictor = pred;
274  return pred;
275 }
276 
277 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
278 {
279  int step_index;
280  int predictor;
281  int sign, delta, diff, step;
282 
283  step = ff_adpcm_step_table[c->step_index];
284  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
285  step_index = av_clip(step_index, 0, 88);
286 
287  sign = nibble & 8;
288  delta = nibble & 7;
289  /* perform direct multiplication instead of series of jumps proposed by
290  * the reference ADPCM implementation since modern CPUs can do the mults
291  * quickly enough */
292  diff = ((2 * delta + 1) * step) >> shift;
293  predictor = c->predictor;
294  if (sign) predictor -= diff;
295  else predictor += diff;
296 
297  c->predictor = av_clip_int16(predictor);
298  c->step_index = step_index;
299 
300  return (int16_t)c->predictor;
301 }
302 
303 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
304 {
305  int step_index;
306  int predictor;
307  int sign, delta, diff, step;
308 
309  step = ff_adpcm_step_table[c->step_index];
310  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
311  step_index = av_clip(step_index, 0, 88);
312 
313  sign = nibble & 8;
314  delta = nibble & 7;
315  diff = (delta * step) >> shift;
316  predictor = c->predictor;
317  if (sign) predictor -= diff;
318  else predictor += diff;
319 
320  c->predictor = av_clip_int16(predictor);
321  c->step_index = step_index;
322 
323  return (int16_t)c->predictor;
324 }
325 
326 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
327 {
328  int step_index, step, delta, predictor;
329 
330  step = ff_adpcm_step_table[c->step_index];
331 
332  delta = step * (2 * nibble - 15);
333  predictor = c->predictor + delta;
334 
335  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
336  c->predictor = av_clip_int16(predictor >> 4);
337  c->step_index = av_clip(step_index, 0, 88);
338 
339  return (int16_t)c->predictor;
340 }
341 
342 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
343 {
344  int step_index;
345  int predictor;
346  int step;
347 
348  nibble = sign_extend(nibble & 0xF, 4);
349 
351  step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
352  step_index = av_clip(step_index, 0, 60);
353 
354  predictor = c->predictor + step * nibble;
355 
356  c->predictor = av_clip_int16(predictor);
357  c->step_index = step_index;
358 
359  return c->predictor;
360 }
361 
363 {
364  int nibble, step_index, predictor, sign, delta, diff, step, shift;
365 
366  shift = bps - 1;
367  nibble = get_bits_le(gb, bps),
368  step = ff_adpcm_step_table[c->step_index];
369  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
370  step_index = av_clip(step_index, 0, 88);
371 
372  sign = nibble & (1 << shift);
373  delta = av_mod_uintp2(nibble, shift);
374  diff = ((2 * delta + 1) * step) >> shift;
375  predictor = c->predictor;
376  if (sign) predictor -= diff;
377  else predictor += diff;
378 
379  c->predictor = av_clip_int16(predictor);
380  c->step_index = step_index;
381 
382  return (int16_t)c->predictor;
383 }
384 
385 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
386 {
387  int step_index;
388  int predictor;
389  int diff, step;
390 
391  step = ff_adpcm_step_table[c->step_index];
392  step_index = c->step_index + ff_adpcm_index_table[nibble];
393  step_index = av_clip(step_index, 0, 88);
394 
395  diff = step >> 3;
396  if (nibble & 4) diff += step;
397  if (nibble & 2) diff += step >> 1;
398  if (nibble & 1) diff += step >> 2;
399 
400  if (nibble & 8)
401  predictor = c->predictor - diff;
402  else
403  predictor = c->predictor + diff;
404 
405  c->predictor = av_clip_int16(predictor);
406  c->step_index = step_index;
407 
408  return c->predictor;
409 }
410 
411 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
412 {
413  int predictor;
414 
415  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
416  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
417 
418  c->sample2 = c->sample1;
419  c->sample1 = av_clip_int16(predictor);
420  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
421  if (c->idelta < 16) c->idelta = 16;
422  if (c->idelta > INT_MAX/768) {
423  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
424  c->idelta = INT_MAX/768;
425  }
426 
427  return c->sample1;
428 }
429 
430 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
431 {
432  int step_index, predictor, sign, delta, diff, step;
433 
435  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
436  step_index = av_clip(step_index, 0, 48);
437 
438  sign = nibble & 8;
439  delta = nibble & 7;
440  diff = ((2 * delta + 1) * step) >> 3;
441  predictor = c->predictor;
442  if (sign) predictor -= diff;
443  else predictor += diff;
444 
445  c->predictor = av_clip_intp2(predictor, 11);
446  c->step_index = step_index;
447 
448  return c->predictor * 16;
449 }
450 
451 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
452 {
453  int sign, delta, diff;
454  int new_step;
455 
456  sign = nibble & 8;
457  delta = nibble & 7;
458  /* perform direct multiplication instead of series of jumps proposed by
459  * the reference ADPCM implementation since modern CPUs can do the mults
460  * quickly enough */
461  diff = ((2 * delta + 1) * c->step) >> 3;
462  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
463  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
464  c->predictor = av_clip_int16(c->predictor);
465  /* calculate new step and clamp it to range 511..32767 */
466  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
467  c->step = av_clip(new_step, 511, 32767);
468 
469  return (int16_t)c->predictor;
470 }
471 
472 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
473 {
474  int sign, delta, diff;
475 
476  sign = nibble & (1<<(size-1));
477  delta = nibble & ((1<<(size-1))-1);
478  diff = delta << (7 + c->step + shift);
479 
480  /* clamp result */
481  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
482 
483  /* calculate new step */
484  if (delta >= (2*size - 3) && c->step < 3)
485  c->step++;
486  else if (delta == 0 && c->step > 0)
487  c->step--;
488 
489  return (int16_t) c->predictor;
490 }
491 
493 {
494  if(!c->step) {
495  c->predictor = 0;
496  c->step = 127;
497  }
498 
499  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
500  c->predictor = av_clip_int16(c->predictor);
501  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
502  c->step = av_clip(c->step, 127, 24576);
503  return c->predictor;
504 }
505 
506 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
507 {
508  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
509  c->predictor = av_clip_int16(c->predictor);
510  c->step += ff_adpcm_index_table[nibble];
511  c->step = av_clip_uintp2(c->step, 5);
512  return c->predictor;
513 }
514 
515 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
516 {
517  int16_t index = c->step_index;
518  uint32_t lookup_sample = ff_adpcm_step_table[index];
519  int32_t sample = 0;
520 
521  if (nibble & 0x40)
522  sample += lookup_sample;
523  if (nibble & 0x20)
524  sample += lookup_sample >> 1;
525  if (nibble & 0x10)
526  sample += lookup_sample >> 2;
527  if (nibble & 0x08)
528  sample += lookup_sample >> 3;
529  if (nibble & 0x04)
530  sample += lookup_sample >> 4;
531  if (nibble & 0x02)
532  sample += lookup_sample >> 5;
533  if (nibble & 0x01)
534  sample += lookup_sample >> 6;
535  if (nibble & 0x80)
536  sample = -sample;
537 
538  sample += c->predictor;
539  sample = av_clip_int16(sample);
540 
541  index += zork_index_table[(nibble >> 4) & 7];
542  index = av_clip(index, 0, 88);
543 
544  c->predictor = sample;
545  c->step_index = index;
546 
547  return sample;
548 }
549 
550 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
552  ADPCMChannelStatus *right, int channels, int sample_offset)
553 {
554  int i, j;
555  int shift,filter,f0,f1;
556  int s_1,s_2;
557  int d,s,t;
558 
559  out0 += sample_offset;
560  if (channels == 1)
561  out1 = out0 + 28;
562  else
563  out1 += sample_offset;
564 
565  for(i=0;i<4;i++) {
566  shift = 12 - (in[4+i*2] & 15);
567  filter = in[4+i*2] >> 4;
568  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
569  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
570  filter=0;
571  }
572  if (shift < 0) {
573  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
574  shift = 0;
575  }
576  f0 = xa_adpcm_table[filter][0];
577  f1 = xa_adpcm_table[filter][1];
578 
579  s_1 = left->sample1;
580  s_2 = left->sample2;
581 
582  for(j=0;j<28;j++) {
583  d = in[16+i+j*4];
584 
585  t = sign_extend(d, 4);
586  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
587  s_2 = s_1;
588  s_1 = av_clip_int16(s);
589  out0[j] = s_1;
590  }
591 
592  if (channels == 2) {
593  left->sample1 = s_1;
594  left->sample2 = s_2;
595  s_1 = right->sample1;
596  s_2 = right->sample2;
597  }
598 
599  shift = 12 - (in[5+i*2] & 15);
600  filter = in[5+i*2] >> 4;
601  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
602  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
603  filter=0;
604  }
605  if (shift < 0) {
606  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
607  shift = 0;
608  }
609 
610  f0 = xa_adpcm_table[filter][0];
611  f1 = xa_adpcm_table[filter][1];
612 
613  for(j=0;j<28;j++) {
614  d = in[16+i+j*4];
615 
616  t = sign_extend(d >> 4, 4);
617  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
618  s_2 = s_1;
619  s_1 = av_clip_int16(s);
620  out1[j] = s_1;
621  }
622 
623  if (channels == 2) {
624  right->sample1 = s_1;
625  right->sample2 = s_2;
626  } else {
627  left->sample1 = s_1;
628  left->sample2 = s_2;
629  }
630 
631  out0 += 28 * (3 - channels);
632  out1 += 28 * (3 - channels);
633  }
634 
635  return 0;
636 }
637 
638 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
639 {
640  ADPCMDecodeContext *c = avctx->priv_data;
641  GetBitContext gb;
642  const int8_t *table;
643  int k0, signmask, nb_bits, count;
644  int size = buf_size*8;
645  int i;
646 
647  init_get_bits(&gb, buf, size);
648 
649  //read bits & initial values
650  nb_bits = get_bits(&gb, 2)+2;
651  table = swf_index_tables[nb_bits-2];
652  k0 = 1 << (nb_bits-2);
653  signmask = 1 << (nb_bits-1);
654 
655  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
656  for (i = 0; i < avctx->channels; i++) {
657  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
658  c->status[i].step_index = get_bits(&gb, 6);
659  }
660 
661  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
662  int i;
663 
664  for (i = 0; i < avctx->channels; i++) {
665  // similar to IMA adpcm
666  int delta = get_bits(&gb, nb_bits);
668  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
669  int k = k0;
670 
671  do {
672  if (delta & k)
673  vpdiff += step;
674  step >>= 1;
675  k >>= 1;
676  } while(k);
677  vpdiff += step;
678 
679  if (delta & signmask)
680  c->status[i].predictor -= vpdiff;
681  else
682  c->status[i].predictor += vpdiff;
683 
684  c->status[i].step_index += table[delta & (~signmask)];
685 
686  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
687  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
688 
689  *samples++ = c->status[i].predictor;
690  }
691  }
692  }
693 }
694 
695 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
696 {
697  int sample = sign_extend(nibble, 4) * (1 << shift);
698 
699  if (flag)
700  sample += (8 * cs->sample1) - (4 * cs->sample2);
701  else
702  sample += 4 * cs->sample1;
703 
704  sample = av_clip_int16(sample >> 2);
705 
706  cs->sample2 = cs->sample1;
707  cs->sample1 = sample;
708 
709  return sample;
710 }
711 
712 /**
713  * Get the number of samples (per channel) that will be decoded from the packet.
714  * In one case, this is actually the maximum number of samples possible to
715  * decode with the given buf_size.
716  *
717  * @param[out] coded_samples set to the number of samples as coded in the
718  * packet, or 0 if the codec does not encode the
719  * number of samples in each frame.
720  * @param[out] approx_nb_samples set to non-zero if the number of samples
721  * returned is an approximation.
722  */
724  int buf_size, int *coded_samples, int *approx_nb_samples)
725 {
726  ADPCMDecodeContext *s = avctx->priv_data;
727  int nb_samples = 0;
728  int ch = avctx->channels;
729  int has_coded_samples = 0;
730  int header_size;
731 
732  *coded_samples = 0;
733  *approx_nb_samples = 0;
734 
735  if(ch <= 0)
736  return 0;
737 
738  switch (avctx->codec->id) {
739  /* constant, only check buf_size */
741  if (buf_size < 76 * ch)
742  return 0;
743  nb_samples = 128;
744  break;
746  if (buf_size < 34 * ch)
747  return 0;
748  nb_samples = 64;
749  break;
750  /* simple 4-bit adpcm */
763  nb_samples = buf_size * 2 / ch;
764  break;
765  }
766  if (nb_samples)
767  return nb_samples;
768 
769  /* simple 4-bit adpcm, with header */
770  header_size = 0;
771  switch (avctx->codec->id) {
776  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
777  case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
778  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
779  }
780  if (header_size > 0)
781  return (buf_size - header_size) * 2 / ch;
782 
783  /* more complex formats */
784  switch (avctx->codec->id) {
786  has_coded_samples = 1;
787  *coded_samples = bytestream2_get_le32(gb);
788  *coded_samples -= *coded_samples % 28;
789  nb_samples = (buf_size - 12) / 30 * 28;
790  break;
792  has_coded_samples = 1;
793  *coded_samples = bytestream2_get_le32(gb);
794  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
795  break;
797  nb_samples = (buf_size - ch) / ch * 2;
798  break;
802  /* maximum number of samples */
803  /* has internal offsets and a per-frame switch to signal raw 16-bit */
804  has_coded_samples = 1;
805  switch (avctx->codec->id) {
807  header_size = 4 + 9 * ch;
808  *coded_samples = bytestream2_get_le32(gb);
809  break;
811  header_size = 4 + 5 * ch;
812  *coded_samples = bytestream2_get_le32(gb);
813  break;
815  header_size = 4 + 5 * ch;
816  *coded_samples = bytestream2_get_be32(gb);
817  break;
818  }
819  *coded_samples -= *coded_samples % 28;
820  nb_samples = (buf_size - header_size) * 2 / ch;
821  nb_samples -= nb_samples % 28;
822  *approx_nb_samples = 1;
823  break;
825  if (avctx->block_align > 0)
826  buf_size = FFMIN(buf_size, avctx->block_align);
827  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
828  break;
830  if (avctx->block_align > 0)
831  buf_size = FFMIN(buf_size, avctx->block_align);
832  if (buf_size < 4 * ch)
833  return AVERROR_INVALIDDATA;
834  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
835  break;
837  if (avctx->block_align > 0)
838  buf_size = FFMIN(buf_size, avctx->block_align);
839  nb_samples = (buf_size - 4 * ch) * 2 / ch;
840  break;
842  {
843  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
844  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
845  if (avctx->block_align > 0)
846  buf_size = FFMIN(buf_size, avctx->block_align);
847  if (buf_size < 4 * ch)
848  return AVERROR_INVALIDDATA;
849  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
850  break;
851  }
853  if (avctx->block_align > 0)
854  buf_size = FFMIN(buf_size, avctx->block_align);
855  nb_samples = (buf_size - 6 * ch) * 2 / ch;
856  break;
858  if (avctx->block_align > 0)
859  buf_size = FFMIN(buf_size, avctx->block_align);
860  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
861  break;
865  {
866  int samples_per_byte;
867  switch (avctx->codec->id) {
868  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
869  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
870  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
871  }
872  if (!s->status[0].step_index) {
873  if (buf_size < ch)
874  return AVERROR_INVALIDDATA;
875  nb_samples++;
876  buf_size -= ch;
877  }
878  nb_samples += buf_size * samples_per_byte / ch;
879  break;
880  }
882  {
883  int buf_bits = (avctx->block_align ? avctx->block_align : buf_size) * 8 - 2;
884  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
885  int block_hdr_size = 22 * ch;
886  int block_size = block_hdr_size + nbits * ch * 4095;
887  int nblocks = buf_bits / block_size;
888  int bits_left = buf_bits - nblocks * block_size;
889  nb_samples = nblocks * 4096;
890  if (bits_left >= block_hdr_size)
891  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
892 
893  if (avctx->block_align)
894  nb_samples *= buf_size / avctx->block_align;
895  break;
896  }
899  if (avctx->extradata) {
900  nb_samples = buf_size * 14 / (8 * ch);
901  break;
902  }
903  has_coded_samples = 1;
904  bytestream2_skip(gb, 4); // channel size
905  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
906  bytestream2_get_le32(gb) :
907  bytestream2_get_be32(gb);
908  buf_size -= 8 + 36 * ch;
909  buf_size /= ch;
910  nb_samples = buf_size / 8 * 14;
911  if (buf_size % 8 > 1)
912  nb_samples += (buf_size % 8 - 1) * 2;
913  *approx_nb_samples = 1;
914  break;
916  nb_samples = buf_size / (9 * ch) * 16;
917  break;
919  nb_samples = (buf_size / 128) * 224 / ch;
920  break;
923  nb_samples = buf_size / (16 * ch) * 28;
924  break;
926  nb_samples = buf_size / avctx->block_align * 32;
927  break;
929  nb_samples = buf_size / ch;
930  break;
931  }
932 
933  /* validate coded sample count */
934  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
935  return AVERROR_INVALIDDATA;
936 
937  return nb_samples;
938 }
939 
940 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
941  int *got_frame_ptr, AVPacket *avpkt)
942 {
943  AVFrame *frame = data;
944  const uint8_t *buf = avpkt->data;
945  int buf_size = avpkt->size;
946  ADPCMDecodeContext *c = avctx->priv_data;
947  ADPCMChannelStatus *cs;
948  int n, m, channel, i;
949  int16_t *samples;
950  int16_t **samples_p;
951  int st; /* stereo */
952  int count1, count2;
953  int nb_samples, coded_samples, approx_nb_samples, ret;
954  GetByteContext gb;
955 
956  bytestream2_init(&gb, buf, buf_size);
957  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
958  if (nb_samples <= 0) {
959  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
960  return AVERROR_INVALIDDATA;
961  }
962 
963  /* get output buffer */
964  frame->nb_samples = nb_samples;
965  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
966  return ret;
967  samples = (int16_t *)frame->data[0];
968  samples_p = (int16_t **)frame->extended_data;
969 
970  /* use coded_samples when applicable */
971  /* it is always <= nb_samples, so the output buffer will be large enough */
972  if (coded_samples) {
973  if (!approx_nb_samples && coded_samples != nb_samples)
974  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
975  frame->nb_samples = nb_samples = coded_samples;
976  }
977 
978  st = avctx->channels == 2 ? 1 : 0;
979 
980  switch(avctx->codec->id) {
982  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
983  Channel data is interleaved per-chunk. */
984  for (channel = 0; channel < avctx->channels; channel++) {
985  int predictor;
986  int step_index;
987  cs = &(c->status[channel]);
988  /* (pppppp) (piiiiiii) */
989 
990  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
991  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
992  step_index = predictor & 0x7F;
993  predictor &= ~0x7F;
994 
995  if (cs->step_index == step_index) {
996  int diff = predictor - cs->predictor;
997  if (diff < 0)
998  diff = - diff;
999  if (diff > 0x7f)
1000  goto update;
1001  } else {
1002  update:
1003  cs->step_index = step_index;
1004  cs->predictor = predictor;
1005  }
1006 
1007  if (cs->step_index > 88u){
1008  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1009  channel, cs->step_index);
1010  return AVERROR_INVALIDDATA;
1011  }
1012 
1013  samples = samples_p[channel];
1014 
1015  for (m = 0; m < 64; m += 2) {
1016  int byte = bytestream2_get_byteu(&gb);
1017  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1018  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1019  }
1020  }
1021  break;
1023  for(i=0; i<avctx->channels; i++){
1024  cs = &(c->status[i]);
1025  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1026 
1027  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1028  if (cs->step_index > 88u){
1029  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1030  i, cs->step_index);
1031  return AVERROR_INVALIDDATA;
1032  }
1033  }
1034 
1035  if (avctx->bits_per_coded_sample != 4) {
1036  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1037  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1039  GetBitContext g;
1040 
1041  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1042  for (i = 0; i < avctx->channels; i++) {
1043  int j;
1044 
1045  cs = &c->status[i];
1046  samples = &samples_p[i][1 + n * samples_per_block];
1047  for (j = 0; j < block_size; j++) {
1048  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
1049  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
1050  }
1051  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1052  if (ret < 0)
1053  return ret;
1054  for (m = 0; m < samples_per_block; m++) {
1055  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
1056  avctx->bits_per_coded_sample);
1057  }
1058  }
1059  }
1060  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
1061  } else {
1062  for (n = 0; n < (nb_samples - 1) / 8; n++) {
1063  for (i = 0; i < avctx->channels; i++) {
1064  cs = &c->status[i];
1065  samples = &samples_p[i][1 + n * 8];
1066  for (m = 0; m < 8; m += 2) {
1067  int v = bytestream2_get_byteu(&gb);
1068  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1069  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1070  }
1071  }
1072  }
1073  }
1074  break;
1075  case AV_CODEC_ID_ADPCM_4XM:
1076  for (i = 0; i < avctx->channels; i++)
1077  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1078 
1079  for (i = 0; i < avctx->channels; i++) {
1080  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1081  if (c->status[i].step_index > 88u) {
1082  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1083  i, c->status[i].step_index);
1084  return AVERROR_INVALIDDATA;
1085  }
1086  }
1087 
1088  for (i = 0; i < avctx->channels; i++) {
1089  samples = (int16_t *)frame->data[i];
1090  cs = &c->status[i];
1091  for (n = nb_samples >> 1; n > 0; n--) {
1092  int v = bytestream2_get_byteu(&gb);
1093  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1094  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1095  }
1096  }
1097  break;
1098  case AV_CODEC_ID_ADPCM_AGM:
1099  for (i = 0; i < avctx->channels; i++)
1100  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1101  for (i = 0; i < avctx->channels; i++)
1102  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1103 
1104  for (n = 0; n < nb_samples >> (1 - st); n++) {
1105  int v = bytestream2_get_byteu(&gb);
1106  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1107  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1108  }
1109  break;
1110  case AV_CODEC_ID_ADPCM_MS:
1111  {
1112  int block_predictor;
1113 
1114  if (avctx->channels > 2) {
1115  for (channel = 0; channel < avctx->channels; channel++) {
1116  samples = samples_p[channel];
1117  block_predictor = bytestream2_get_byteu(&gb);
1118  if (block_predictor > 6) {
1119  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1120  channel, block_predictor);
1121  return AVERROR_INVALIDDATA;
1122  }
1123  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1124  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1125  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1126  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1127  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1128  *samples++ = c->status[channel].sample2;
1129  *samples++ = c->status[channel].sample1;
1130  for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1131  int byte = bytestream2_get_byteu(&gb);
1132  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1133  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1134  }
1135  }
1136  } else {
1137  block_predictor = bytestream2_get_byteu(&gb);
1138  if (block_predictor > 6) {
1139  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1140  block_predictor);
1141  return AVERROR_INVALIDDATA;
1142  }
1143  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1144  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1145  if (st) {
1146  block_predictor = bytestream2_get_byteu(&gb);
1147  if (block_predictor > 6) {
1148  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1149  block_predictor);
1150  return AVERROR_INVALIDDATA;
1151  }
1152  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1153  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1154  }
1155  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1156  if (st){
1157  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1158  }
1159 
1160  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1161  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1162  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1163  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1164 
1165  *samples++ = c->status[0].sample2;
1166  if (st) *samples++ = c->status[1].sample2;
1167  *samples++ = c->status[0].sample1;
1168  if (st) *samples++ = c->status[1].sample1;
1169  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1170  int byte = bytestream2_get_byteu(&gb);
1171  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1172  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1173  }
1174  }
1175  break;
1176  }
1178  for (channel = 0; channel < avctx->channels; channel+=2) {
1179  bytestream2_skipu(&gb, 4);
1180  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1181  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1182  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1183  bytestream2_skipu(&gb, 2);
1184  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1185  bytestream2_skipu(&gb, 2);
1186  for (n = 0; n < nb_samples; n+=2) {
1187  int v = bytestream2_get_byteu(&gb);
1188  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1189  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1190  }
1191  for (n = 0; n < nb_samples; n+=2) {
1192  int v = bytestream2_get_byteu(&gb);
1193  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1194  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1195  }
1196  }
1197  break;
1199  for (channel = 0; channel < avctx->channels; channel++) {
1200  cs = &c->status[channel];
1201  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1202  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1203  if (cs->step_index > 88u){
1204  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1205  channel, cs->step_index);
1206  return AVERROR_INVALIDDATA;
1207  }
1208  }
1209  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1210  int v = bytestream2_get_byteu(&gb);
1211  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1212  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1213  }
1214  break;
1216  {
1217  int last_byte = 0;
1218  int nibble;
1219  int decode_top_nibble_next = 0;
1220  int diff_channel;
1221  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1222 
1223  bytestream2_skipu(&gb, 10);
1224  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1225  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1226  c->status[0].step_index = bytestream2_get_byteu(&gb);
1227  c->status[1].step_index = bytestream2_get_byteu(&gb);
1228  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1229  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1230  c->status[0].step_index, c->status[1].step_index);
1231  return AVERROR_INVALIDDATA;
1232  }
1233  /* sign extend the predictors */
1234  diff_channel = c->status[1].predictor;
1235 
1236  /* DK3 ADPCM support macro */
1237 #define DK3_GET_NEXT_NIBBLE() \
1238  if (decode_top_nibble_next) { \
1239  nibble = last_byte >> 4; \
1240  decode_top_nibble_next = 0; \
1241  } else { \
1242  last_byte = bytestream2_get_byteu(&gb); \
1243  nibble = last_byte & 0x0F; \
1244  decode_top_nibble_next = 1; \
1245  }
1246 
1247  while (samples < samples_end) {
1248 
1249  /* for this algorithm, c->status[0] is the sum channel and
1250  * c->status[1] is the diff channel */
1251 
1252  /* process the first predictor of the sum channel */
1254  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1255 
1256  /* process the diff channel predictor */
1258  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1259 
1260  /* process the first pair of stereo PCM samples */
1261  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1262  *samples++ = c->status[0].predictor + c->status[1].predictor;
1263  *samples++ = c->status[0].predictor - c->status[1].predictor;
1264 
1265  /* process the second predictor of the sum channel */
1267  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1268 
1269  /* process the second pair of stereo PCM samples */
1270  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1271  *samples++ = c->status[0].predictor + c->status[1].predictor;
1272  *samples++ = c->status[0].predictor - c->status[1].predictor;
1273  }
1274 
1275  if ((bytestream2_tell(&gb) & 1))
1276  bytestream2_skip(&gb, 1);
1277  break;
1278  }
1280  for (channel = 0; channel < avctx->channels; channel++) {
1281  cs = &c->status[channel];
1282  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1283  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1284  if (cs->step_index > 88u){
1285  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1286  channel, cs->step_index);
1287  return AVERROR_INVALIDDATA;
1288  }
1289  }
1290 
1291  for (n = nb_samples >> (1 - st); n > 0; n--) {
1292  int v1, v2;
1293  int v = bytestream2_get_byteu(&gb);
1294  /* nibbles are swapped for mono */
1295  if (st) {
1296  v1 = v >> 4;
1297  v2 = v & 0x0F;
1298  } else {
1299  v2 = v >> 4;
1300  v1 = v & 0x0F;
1301  }
1302  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1303  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1304  }
1305  break;
1307  for (channel = 0; channel < avctx->channels; channel++) {
1308  cs = &c->status[channel];
1309  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1310  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1311  if (cs->step_index > 88u){
1312  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1313  channel, cs->step_index);
1314  return AVERROR_INVALIDDATA;
1315  }
1316  }
1317 
1318  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1319  for (channel = 0; channel < avctx->channels; channel++) {
1320  samples = samples_p[channel] + 256 * subframe;
1321  for (n = 0; n < 256; n += 2) {
1322  int v = bytestream2_get_byteu(&gb);
1323  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1324  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1325  }
1326  }
1327  }
1328  break;
1330  for (channel = 0; channel < avctx->channels; channel++) {
1331  cs = &c->status[channel];
1332  samples = samples_p[channel];
1333  bytestream2_skip(&gb, 4);
1334  for (n = 0; n < nb_samples; n += 2) {
1335  int v = bytestream2_get_byteu(&gb);
1336  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1337  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1338  }
1339  }
1340  break;
1342  for (n = nb_samples >> (1 - st); n > 0; n--) {
1343  int v = bytestream2_get_byteu(&gb);
1344  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1345  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1346  }
1347  break;
1349  for (n = nb_samples >> (1 - st); n > 0; n--) {
1350  int v = bytestream2_get_byteu(&gb);
1351  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1352  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1353  }
1354  break;
1356  for (n = nb_samples / 2; n > 0; n--) {
1357  for (channel = 0; channel < avctx->channels; channel++) {
1358  int v = bytestream2_get_byteu(&gb);
1359  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1360  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1361  }
1362  samples += avctx->channels;
1363  }
1364  break;
1366  for (n = nb_samples / 2; n > 0; n--) {
1367  for (channel = 0; channel < avctx->channels; channel++) {
1368  int v = bytestream2_get_byteu(&gb);
1369  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1370  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1371  }
1372  samples += avctx->channels;
1373  }
1374  break;
1376  for (n = 0; n < nb_samples / 2; n++) {
1377  int v = bytestream2_get_byteu(&gb);
1378  *samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v & 0x0F);
1379  *samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v >> 4);
1380  }
1381  break;
1383  for (n = nb_samples >> (1 - st); n > 0; n--) {
1384  int v = bytestream2_get_byteu(&gb);
1385  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1386  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1387  }
1388  break;
1390  for (channel = 0; channel < avctx->channels; channel++) {
1391  cs = &c->status[channel];
1392  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1393  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1394  if (cs->step_index > 88u){
1395  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1396  channel, cs->step_index);
1397  return AVERROR_INVALIDDATA;
1398  }
1399  }
1400  for (n = 0; n < nb_samples / 2; n++) {
1401  int byte[2];
1402 
1403  byte[0] = bytestream2_get_byteu(&gb);
1404  if (st)
1405  byte[1] = bytestream2_get_byteu(&gb);
1406  for(channel = 0; channel < avctx->channels; channel++) {
1407  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1408  }
1409  for(channel = 0; channel < avctx->channels; channel++) {
1410  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1411  }
1412  }
1413  break;
1415  if (c->vqa_version == 3) {
1416  for (channel = 0; channel < avctx->channels; channel++) {
1417  int16_t *smp = samples_p[channel];
1418 
1419  for (n = nb_samples / 2; n > 0; n--) {
1420  int v = bytestream2_get_byteu(&gb);
1421  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1422  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1423  }
1424  }
1425  } else {
1426  for (n = nb_samples / 2; n > 0; n--) {
1427  for (channel = 0; channel < avctx->channels; channel++) {
1428  int v = bytestream2_get_byteu(&gb);
1429  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1430  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1431  }
1432  samples += avctx->channels;
1433  }
1434  }
1435  bytestream2_seek(&gb, 0, SEEK_END);
1436  break;
1437  case AV_CODEC_ID_ADPCM_XA:
1438  {
1439  int16_t *out0 = samples_p[0];
1440  int16_t *out1 = samples_p[1];
1441  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1442  int sample_offset = 0;
1443  int bytes_remaining;
1444  while (bytestream2_get_bytes_left(&gb) >= 128) {
1445  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1446  &c->status[0], &c->status[1],
1447  avctx->channels, sample_offset)) < 0)
1448  return ret;
1449  bytestream2_skipu(&gb, 128);
1450  sample_offset += samples_per_block;
1451  }
1452  /* Less than a full block of data left, e.g. when reading from
1453  * 2324 byte per sector XA; the remainder is padding */
1454  bytes_remaining = bytestream2_get_bytes_left(&gb);
1455  if (bytes_remaining > 0) {
1456  bytestream2_skip(&gb, bytes_remaining);
1457  }
1458  break;
1459  }
1461  for (i=0; i<=st; i++) {
1462  c->status[i].step_index = bytestream2_get_le32u(&gb);
1463  if (c->status[i].step_index > 88u) {
1464  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1465  i, c->status[i].step_index);
1466  return AVERROR_INVALIDDATA;
1467  }
1468  }
1469  for (i=0; i<=st; i++) {
1470  c->status[i].predictor = bytestream2_get_le32u(&gb);
1471  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1472  return AVERROR_INVALIDDATA;
1473  }
1474 
1475  for (n = nb_samples >> (1 - st); n > 0; n--) {
1476  int byte = bytestream2_get_byteu(&gb);
1477  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1478  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1479  }
1480  break;
1482  for (n = nb_samples >> (1 - st); n > 0; n--) {
1483  int byte = bytestream2_get_byteu(&gb);
1484  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1485  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1486  }
1487  break;
1488  case AV_CODEC_ID_ADPCM_EA:
1489  {
1490  int previous_left_sample, previous_right_sample;
1491  int current_left_sample, current_right_sample;
1492  int next_left_sample, next_right_sample;
1493  int coeff1l, coeff2l, coeff1r, coeff2r;
1494  int shift_left, shift_right;
1495 
1496  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1497  each coding 28 stereo samples. */
1498 
1499  if(avctx->channels != 2)
1500  return AVERROR_INVALIDDATA;
1501 
1502  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1503  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1504  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1505  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1506 
1507  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1508  int byte = bytestream2_get_byteu(&gb);
1509  coeff1l = ea_adpcm_table[ byte >> 4 ];
1510  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1511  coeff1r = ea_adpcm_table[ byte & 0x0F];
1512  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1513 
1514  byte = bytestream2_get_byteu(&gb);
1515  shift_left = 20 - (byte >> 4);
1516  shift_right = 20 - (byte & 0x0F);
1517 
1518  for (count2 = 0; count2 < 28; count2++) {
1519  byte = bytestream2_get_byteu(&gb);
1520  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1521  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1522 
1523  next_left_sample = (next_left_sample +
1524  (current_left_sample * coeff1l) +
1525  (previous_left_sample * coeff2l) + 0x80) >> 8;
1526  next_right_sample = (next_right_sample +
1527  (current_right_sample * coeff1r) +
1528  (previous_right_sample * coeff2r) + 0x80) >> 8;
1529 
1530  previous_left_sample = current_left_sample;
1531  current_left_sample = av_clip_int16(next_left_sample);
1532  previous_right_sample = current_right_sample;
1533  current_right_sample = av_clip_int16(next_right_sample);
1534  *samples++ = current_left_sample;
1535  *samples++ = current_right_sample;
1536  }
1537  }
1538 
1539  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1540 
1541  break;
1542  }
1544  {
1545  int coeff[2][2], shift[2];
1546 
1547  for(channel = 0; channel < avctx->channels; channel++) {
1548  int byte = bytestream2_get_byteu(&gb);
1549  for (i=0; i<2; i++)
1550  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1551  shift[channel] = 20 - (byte & 0x0F);
1552  }
1553  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1554  int byte[2];
1555 
1556  byte[0] = bytestream2_get_byteu(&gb);
1557  if (st) byte[1] = bytestream2_get_byteu(&gb);
1558  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1559  for(channel = 0; channel < avctx->channels; channel++) {
1560  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1561  sample = (sample +
1562  c->status[channel].sample1 * coeff[channel][0] +
1563  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1565  c->status[channel].sample1 = av_clip_int16(sample);
1566  *samples++ = c->status[channel].sample1;
1567  }
1568  }
1569  }
1570  bytestream2_seek(&gb, 0, SEEK_END);
1571  break;
1572  }
1575  case AV_CODEC_ID_ADPCM_EA_R3: {
1576  /* channel numbering
1577  2chan: 0=fl, 1=fr
1578  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1579  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1580  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1581  int previous_sample, current_sample, next_sample;
1582  int coeff1, coeff2;
1583  int shift;
1584  unsigned int channel;
1585  uint16_t *samplesC;
1586  int count = 0;
1587  int offsets[6];
1588 
1589  for (channel=0; channel<avctx->channels; channel++)
1590  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1591  bytestream2_get_le32(&gb)) +
1592  (avctx->channels + 1) * 4;
1593 
1594  for (channel=0; channel<avctx->channels; channel++) {
1595  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1596  samplesC = samples_p[channel];
1597 
1598  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1599  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1600  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1601  } else {
1602  current_sample = c->status[channel].predictor;
1603  previous_sample = c->status[channel].prev_sample;
1604  }
1605 
1606  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1607  int byte = bytestream2_get_byte(&gb);
1608  if (byte == 0xEE) { /* only seen in R2 and R3 */
1609  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1610  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1611 
1612  for (count2=0; count2<28; count2++)
1613  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1614  } else {
1615  coeff1 = ea_adpcm_table[ byte >> 4 ];
1616  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1617  shift = 20 - (byte & 0x0F);
1618 
1619  for (count2=0; count2<28; count2++) {
1620  if (count2 & 1)
1621  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1622  else {
1623  byte = bytestream2_get_byte(&gb);
1624  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1625  }
1626 
1627  next_sample += (current_sample * coeff1) +
1628  (previous_sample * coeff2);
1629  next_sample = av_clip_int16(next_sample >> 8);
1630 
1631  previous_sample = current_sample;
1632  current_sample = next_sample;
1633  *samplesC++ = current_sample;
1634  }
1635  }
1636  }
1637  if (!count) {
1638  count = count1;
1639  } else if (count != count1) {
1640  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1641  count = FFMAX(count, count1);
1642  }
1643 
1644  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1645  c->status[channel].predictor = current_sample;
1646  c->status[channel].prev_sample = previous_sample;
1647  }
1648  }
1649 
1650  frame->nb_samples = count * 28;
1651  bytestream2_seek(&gb, 0, SEEK_END);
1652  break;
1653  }
1655  for (channel=0; channel<avctx->channels; channel++) {
1656  int coeff[2][4], shift[4];
1657  int16_t *s = samples_p[channel];
1658  for (n = 0; n < 4; n++, s += 32) {
1659  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1660  for (i=0; i<2; i++)
1661  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1662  s[0] = val & ~0x0F;
1663 
1664  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1665  shift[n] = 20 - (val & 0x0F);
1666  s[1] = val & ~0x0F;
1667  }
1668 
1669  for (m=2; m<32; m+=2) {
1670  s = &samples_p[channel][m];
1671  for (n = 0; n < 4; n++, s += 32) {
1672  int level, pred;
1673  int byte = bytestream2_get_byteu(&gb);
1674 
1675  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1676  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1677  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1678 
1679  level = sign_extend(byte, 4) * (1 << shift[n]);
1680  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1681  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1682  }
1683  }
1684  }
1685  break;
1687  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1688  c->status[0].step_index = bytestream2_get_byteu(&gb);
1689  bytestream2_skipu(&gb, 5);
1690  if (c->status[0].step_index > 88u) {
1691  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1692  c->status[0].step_index);
1693  return AVERROR_INVALIDDATA;
1694  }
1695 
1696  for (n = nb_samples >> (1 - st); n > 0; n--) {
1697  int v = bytestream2_get_byteu(&gb);
1698 
1699  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1700  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1701  }
1702  break;
1704  for (i = 0; i < avctx->channels; i++) {
1705  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1706  c->status[i].step_index = bytestream2_get_byteu(&gb);
1707  bytestream2_skipu(&gb, 1);
1708  if (c->status[i].step_index > 88u) {
1709  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1710  c->status[i].step_index);
1711  return AVERROR_INVALIDDATA;
1712  }
1713  }
1714 
1715  for (n = nb_samples >> (1 - st); n > 0; n--) {
1716  int v = bytestream2_get_byteu(&gb);
1717 
1718  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1719  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1720  }
1721  break;
1722  case AV_CODEC_ID_ADPCM_CT:
1723  for (n = nb_samples >> (1 - st); n > 0; n--) {
1724  int v = bytestream2_get_byteu(&gb);
1725  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1726  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1727  }
1728  break;
1732  if (!c->status[0].step_index) {
1733  /* the first byte is a raw sample */
1734  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1735  if (st)
1736  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1737  c->status[0].step_index = 1;
1738  nb_samples--;
1739  }
1740  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1741  for (n = nb_samples >> (1 - st); n > 0; n--) {
1742  int byte = bytestream2_get_byteu(&gb);
1743  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1744  byte >> 4, 4, 0);
1745  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1746  byte & 0x0F, 4, 0);
1747  }
1748  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1749  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1750  int byte = bytestream2_get_byteu(&gb);
1751  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1752  byte >> 5 , 3, 0);
1753  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1754  (byte >> 2) & 0x07, 3, 0);
1755  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1756  byte & 0x03, 2, 0);
1757  }
1758  } else {
1759  for (n = nb_samples >> (2 - st); n > 0; n--) {
1760  int byte = bytestream2_get_byteu(&gb);
1761  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1762  byte >> 6 , 2, 2);
1763  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1764  (byte >> 4) & 0x03, 2, 2);
1765  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1766  (byte >> 2) & 0x03, 2, 2);
1767  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1768  byte & 0x03, 2, 2);
1769  }
1770  }
1771  break;
1772  case AV_CODEC_ID_ADPCM_SWF:
1773  {
1774  const int nb_blocks = avctx->block_align ? avpkt->size / avctx->block_align : 1;
1775  const int block_size = avctx->block_align ? avctx->block_align : avpkt->size;
1776 
1777  for (int block = 0; block < nb_blocks; block++) {
1778  adpcm_swf_decode(avctx, buf + block * block_size, block_size, samples);
1779  samples += nb_samples / nb_blocks;
1780  }
1781  bytestream2_seek(&gb, 0, SEEK_END);
1782  break;
1783  }
1785  for (n = nb_samples >> (1 - st); n > 0; n--) {
1786  int v = bytestream2_get_byteu(&gb);
1787  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1788  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1789  }
1790  break;
1792  if (!c->has_status) {
1793  for (channel = 0; channel < avctx->channels; channel++)
1794  c->status[channel].step = 0;
1795  c->has_status = 1;
1796  }
1797  for (channel = 0; channel < avctx->channels; channel++) {
1798  samples = samples_p[channel];
1799  for (n = nb_samples >> 1; n > 0; n--) {
1800  int v = bytestream2_get_byteu(&gb);
1801  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1802  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1803  }
1804  }
1805  break;
1806  case AV_CODEC_ID_ADPCM_AFC:
1807  {
1808  int samples_per_block;
1809  int blocks;
1810 
1811  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1812  samples_per_block = avctx->extradata[0] / 16;
1813  blocks = nb_samples / avctx->extradata[0];
1814  } else {
1815  samples_per_block = nb_samples / 16;
1816  blocks = 1;
1817  }
1818 
1819  for (m = 0; m < blocks; m++) {
1820  for (channel = 0; channel < avctx->channels; channel++) {
1821  int prev1 = c->status[channel].sample1;
1822  int prev2 = c->status[channel].sample2;
1823 
1824  samples = samples_p[channel] + m * 16;
1825  /* Read in every sample for this channel. */
1826  for (i = 0; i < samples_per_block; i++) {
1827  int byte = bytestream2_get_byteu(&gb);
1828  int scale = 1 << (byte >> 4);
1829  int index = byte & 0xf;
1830  int factor1 = ff_adpcm_afc_coeffs[0][index];
1831  int factor2 = ff_adpcm_afc_coeffs[1][index];
1832 
1833  /* Decode 16 samples. */
1834  for (n = 0; n < 16; n++) {
1835  int32_t sampledat;
1836 
1837  if (n & 1) {
1838  sampledat = sign_extend(byte, 4);
1839  } else {
1840  byte = bytestream2_get_byteu(&gb);
1841  sampledat = sign_extend(byte >> 4, 4);
1842  }
1843 
1844  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1845  sampledat * scale;
1846  *samples = av_clip_int16(sampledat);
1847  prev2 = prev1;
1848  prev1 = *samples++;
1849  }
1850  }
1851 
1852  c->status[channel].sample1 = prev1;
1853  c->status[channel].sample2 = prev2;
1854  }
1855  }
1856  bytestream2_seek(&gb, 0, SEEK_END);
1857  break;
1858  }
1859  case AV_CODEC_ID_ADPCM_THP:
1861  {
1862  int table[14][16];
1863  int ch;
1864 
1865 #define THP_GET16(g) \
1866  sign_extend( \
1867  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1868  bytestream2_get_le16u(&(g)) : \
1869  bytestream2_get_be16u(&(g)), 16)
1870 
1871  if (avctx->extradata) {
1873  if (avctx->extradata_size < 32 * avctx->channels) {
1874  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1875  return AVERROR_INVALIDDATA;
1876  }
1877 
1878  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1879  for (i = 0; i < avctx->channels; i++)
1880  for (n = 0; n < 16; n++)
1881  table[i][n] = THP_GET16(tb);
1882  } else {
1883  for (i = 0; i < avctx->channels; i++)
1884  for (n = 0; n < 16; n++)
1885  table[i][n] = THP_GET16(gb);
1886 
1887  if (!c->has_status) {
1888  /* Initialize the previous sample. */
1889  for (i = 0; i < avctx->channels; i++) {
1890  c->status[i].sample1 = THP_GET16(gb);
1891  c->status[i].sample2 = THP_GET16(gb);
1892  }
1893  c->has_status = 1;
1894  } else {
1895  bytestream2_skip(&gb, avctx->channels * 4);
1896  }
1897  }
1898 
1899  for (ch = 0; ch < avctx->channels; ch++) {
1900  samples = samples_p[ch];
1901 
1902  /* Read in every sample for this channel. */
1903  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1904  int byte = bytestream2_get_byteu(&gb);
1905  int index = (byte >> 4) & 7;
1906  unsigned int exp = byte & 0x0F;
1907  int64_t factor1 = table[ch][index * 2];
1908  int64_t factor2 = table[ch][index * 2 + 1];
1909 
1910  /* Decode 14 samples. */
1911  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1912  int32_t sampledat;
1913 
1914  if (n & 1) {
1915  sampledat = sign_extend(byte, 4);
1916  } else {
1917  byte = bytestream2_get_byteu(&gb);
1918  sampledat = sign_extend(byte >> 4, 4);
1919  }
1920 
1921  sampledat = ((c->status[ch].sample1 * factor1
1922  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1923  *samples = av_clip_int16(sampledat);
1924  c->status[ch].sample2 = c->status[ch].sample1;
1925  c->status[ch].sample1 = *samples++;
1926  }
1927  }
1928  }
1929  break;
1930  }
1931  case AV_CODEC_ID_ADPCM_DTK:
1932  for (channel = 0; channel < avctx->channels; channel++) {
1933  samples = samples_p[channel];
1934 
1935  /* Read in every sample for this channel. */
1936  for (i = 0; i < nb_samples / 28; i++) {
1937  int byte, header;
1938  if (channel)
1939  bytestream2_skipu(&gb, 1);
1940  header = bytestream2_get_byteu(&gb);
1941  bytestream2_skipu(&gb, 3 - channel);
1942 
1943  /* Decode 28 samples. */
1944  for (n = 0; n < 28; n++) {
1945  int32_t sampledat, prev;
1946 
1947  switch (header >> 4) {
1948  case 1:
1949  prev = (c->status[channel].sample1 * 0x3c);
1950  break;
1951  case 2:
1952  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1953  break;
1954  case 3:
1955  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1956  break;
1957  default:
1958  prev = 0;
1959  }
1960 
1961  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1962 
1963  byte = bytestream2_get_byteu(&gb);
1964  if (!channel)
1965  sampledat = sign_extend(byte, 4);
1966  else
1967  sampledat = sign_extend(byte >> 4, 4);
1968 
1969  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1970  *samples++ = av_clip_int16(sampledat >> 6);
1972  c->status[channel].sample1 = sampledat;
1973  }
1974  }
1975  if (!channel)
1976  bytestream2_seek(&gb, 0, SEEK_SET);
1977  }
1978  break;
1979  case AV_CODEC_ID_ADPCM_PSX:
1980  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
1981  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
1982  for (channel = 0; channel < avctx->channels; channel++) {
1983  samples = samples_p[channel] + block * nb_samples_per_block;
1984  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
1985 
1986  /* Read in every sample for this channel. */
1987  for (i = 0; i < nb_samples_per_block / 28; i++) {
1988  int filter, shift, flag, byte;
1989 
1990  filter = bytestream2_get_byteu(&gb);
1991  shift = filter & 0xf;
1992  filter = filter >> 4;
1993  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table))
1994  return AVERROR_INVALIDDATA;
1995  flag = bytestream2_get_byteu(&gb);
1996 
1997  /* Decode 28 samples. */
1998  for (n = 0; n < 28; n++) {
1999  int sample = 0, scale;
2000 
2001  if (flag < 0x07) {
2002  if (n & 1) {
2003  scale = sign_extend(byte >> 4, 4);
2004  } else {
2005  byte = bytestream2_get_byteu(&gb);
2006  scale = sign_extend(byte, 4);
2007  }
2008 
2009  scale = scale * (1 << 12);
2010  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2011  }
2012  *samples++ = av_clip_int16(sample);
2014  c->status[channel].sample1 = sample;
2015  }
2016  }
2017  }
2018  }
2019  break;
2021  /*
2022  * The format of each block:
2023  * uint8_t left_control;
2024  * uint4_t left_samples[nb_samples];
2025  * ---- and if stereo ----
2026  * uint8_t right_control;
2027  * uint4_t right_samples[nb_samples];
2028  *
2029  * Format of the control byte:
2030  * MSB [SSSSRDRR] LSB
2031  * S = (Shift Amount - 2)
2032  * D = Decoder flag.
2033  * R = Reserved
2034  *
2035  * Each block relies on the previous two samples of each channel.
2036  * They should be 0 initially.
2037  */
2038  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2039  for (channel = 0; channel < avctx->channels; channel++) {
2040  int control, shift;
2041 
2042  samples = samples_p[channel] + block * 32;
2043  cs = c->status + channel;
2044 
2045  /* Get the control byte and decode the samples, 2 at a time. */
2046  control = bytestream2_get_byteu(&gb);
2047  shift = (control >> 4) + 2;
2048 
2049  for (n = 0; n < 16; n++) {
2050  int sample = bytestream2_get_byteu(&gb);
2051  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2052  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2053  }
2054  }
2055  }
2056  break;
2058  if (!c->has_status) {
2059  for (channel = 0; channel < avctx->channels; channel++) {
2060  c->status[channel].predictor = 0;
2061  c->status[channel].step_index = 0;
2062  }
2063  c->has_status = 1;
2064  }
2065  for (n = 0; n < nb_samples * avctx->channels; n++) {
2066  int v = bytestream2_get_byteu(&gb);
2067  *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
2068  }
2069  break;
2071  for (n = nb_samples / 2; n > 0; n--) {
2072  for (channel = 0; channel < avctx->channels; channel++) {
2073  int v = bytestream2_get_byteu(&gb);
2074  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2075  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2076  }
2077  samples += avctx->channels;
2078  }
2079  break;
2080  default:
2081  av_assert0(0); // unsupported codec_id should not happen
2082  }
2083 
2084  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2085  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2086  return AVERROR_INVALIDDATA;
2087  }
2088 
2089  *got_frame_ptr = 1;
2090 
2091  if (avpkt->size < bytestream2_tell(&gb)) {
2092  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2093  return avpkt->size;
2094  }
2095 
2096  return bytestream2_tell(&gb);
2097 }
2098 
2099 static void adpcm_flush(AVCodecContext *avctx)
2100 {
2101  ADPCMDecodeContext *c = avctx->priv_data;
2102  c->has_status = 0;
2103 }
2104 
2105 
2113 
2114 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2115 AVCodec ff_ ## name_ ## _decoder = { \
2116  .name = #name_, \
2117  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2118  .type = AVMEDIA_TYPE_AUDIO, \
2119  .id = id_, \
2120  .priv_data_size = sizeof(ADPCMDecodeContext), \
2121  .init = adpcm_decode_init, \
2122  .decode = adpcm_decode_frame, \
2123  .flush = adpcm_flush, \
2124  .capabilities = AV_CODEC_CAP_DR1, \
2125  .sample_fmts = sample_fmts_, \
2126 }
2127 
2128 /* Note: Do not forget to add new entries to the Makefile as well. */
2129 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
2130 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
2131 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
2132 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
2133 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
2134 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
2135 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
2136 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
2137 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
2138 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
2139 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
2140 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
2141 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
2142 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
2143 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
2144 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
2145 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
2146 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
2147 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
2148 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
2149 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
2150 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
2151 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
2152 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX");
2153 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
2154 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
2155 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
2156 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
2157 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
2158 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
2159 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
2160 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
2161 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
2162 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
2163 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
2164 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
2165 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
2166 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
2167 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
2168 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
2169 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
2170 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
2171 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
2172 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
2173 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork");
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:535
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:73
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:506
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define THP_GET16(g)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
#define avpriv_request_sample(...)
int size
Definition: packet.h:364
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2106
#define sample
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:91
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:1223
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:411
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:106
float delta
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2099
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:638
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
ADPCM tables.
uint8_t * data
Definition: packet.h:363
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static const int8_t mtf_index_table[16]
Definition: adpcm.c:93
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:723
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:515
bitstream reader API header.
ptrdiff_t size
Definition: opengl_enc.c:100
static const uint8_t header[24]
Definition: sdr2.c:67
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1750
channels
Definition: aptx.h:33
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
enum AVCodecID id
Definition: codec.h:204
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:342
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:65
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
ADPCM encoder/decoder common header.
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:695
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
GLsizei count
Definition: opengl_enc.c:108
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:430
#define FFMAX(a, b)
Definition: common.h:94
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:362
int8_t exp
Definition: eval.c:72
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:303
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:82
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:550
#define FFMIN(a, b)
Definition: common.h:96
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
int vqa_version
VQA version.
Definition: adpcm.c:102
int32_t
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2108
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Definition: siprdata.h:259
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:91
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define abs(x)
Definition: cuda_runtime.h:35
main external API structure.
Definition: avcodec.h:526
const int16_t ff_adpcm_ima_cunning_step_table[61]
Definition: adpcm_data.c:197
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:492
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1879
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:277
int extradata_size
Definition: avcodec.h:628
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:326
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:233
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:385
ADPCMChannelStatus status[14]
Definition: adpcm.c:101
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:472
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
const int8_t ff_adpcm_ima_cunning_index_table[9]
Definition: adpcm_data.c:187
uint8_t level
Definition: svq3.c:205
int
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:940
signed 16 bits
Definition: samplefmt.h:61
#define flag(name)
Definition: cbs_av1.c:552
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
unsigned bps
Definition: movenc.c:1598
static const int8_t zork_index_table[8]
Definition: adpcm.c:89
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
void * priv_data
Definition: avcodec.h:553
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:663
int channels
number of audio channels
Definition: avcodec.h:1187
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:451
static float add(float src0, float src1)
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2110
Filter the word “frame” indicates either a video frame or a group of audio samples
int16_t step_index
Definition: adpcm.h:33
signed 16 bits, planar
Definition: samplefmt.h:67
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define tb
Definition: regdef.h:68
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:2114