FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg Project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  *
16  * This file is part of FFmpeg.
17  *
18  * FFmpeg is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU Lesser General Public
20  * License as published by the Free Software Foundation; either
21  * version 2.1 of the License, or (at your option) any later version.
22  *
23  * FFmpeg is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26  * Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public
29  * License along with FFmpeg; if not, write to the Free Software
30  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31  */
32 #include "avcodec.h"
33 #include "get_bits.h"
34 #include "bytestream.h"
35 #include "adpcm.h"
36 #include "adpcm_data.h"
37 #include "internal.h"
38 
39 /**
40  * @file
41  * ADPCM decoders
42  * Features and limitations:
43  *
44  * Reference documents:
45  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
46  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
47  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
48  * http://openquicktime.sourceforge.net/
49  * XAnim sources (xa_codec.c) http://xanim.polter.net/
50  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
51  * SoX source code http://sox.sourceforge.net/
52  *
53  * CD-ROM XA:
54  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
55  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
56  * readstr http://www.geocities.co.jp/Playtown/2004/
57  */
58 
59 /* These are for CD-ROM XA ADPCM */
60 static const int xa_adpcm_table[5][2] = {
61  { 0, 0 },
62  { 60, 0 },
63  { 115, -52 },
64  { 98, -55 },
65  { 122, -60 }
66 };
67 
68 static const int ea_adpcm_table[] = {
69  0, 240, 460, 392,
70  0, 0, -208, -220,
71  0, 1, 3, 4,
72  7, 8, 10, 11,
73  0, -1, -3, -4
74 };
75 
76 // padded to zero where table size is less then 16
77 static const int swf_index_tables[4][16] = {
78  /*2*/ { -1, 2 },
79  /*3*/ { -1, -1, 2, 4 },
80  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
81  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
82 };
83 
84 /* end of tables */
85 
86 typedef struct ADPCMDecodeContext {
88  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
91 
93 {
94  ADPCMDecodeContext *c = avctx->priv_data;
95  unsigned int min_channels = 1;
96  unsigned int max_channels = 2;
97 
98  switch(avctx->codec->id) {
101  min_channels = 2;
102  break;
108  max_channels = 6;
109  break;
112  max_channels = 10;
113  break;
114  }
115  if (avctx->channels < min_channels || avctx->channels > max_channels) {
116  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
117  return AVERROR(EINVAL);
118  }
119 
120  switch(avctx->codec->id) {
122  c->status[0].step = c->status[1].step = 511;
123  break;
125  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
126  return AVERROR_INVALIDDATA;
127  break;
129  if (avctx->extradata && avctx->extradata_size >= 8) {
130  c->status[0].predictor = AV_RL32(avctx->extradata);
131  c->status[1].predictor = AV_RL32(avctx->extradata + 4);
132  }
133  break;
135  if (avctx->extradata && avctx->extradata_size >= 2)
136  c->vqa_version = AV_RL16(avctx->extradata);
137  break;
138  default:
139  break;
140  }
141 
142  switch(avctx->codec->id) {
156  break;
158  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
160  break;
161  default:
162  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
163  }
164 
165  return 0;
166 }
167 
168 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
169 {
170  int step_index;
171  int predictor;
172  int sign, delta, diff, step;
173 
174  step = ff_adpcm_step_table[c->step_index];
175  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
176  step_index = av_clip(step_index, 0, 88);
177 
178  sign = nibble & 8;
179  delta = nibble & 7;
180  /* perform direct multiplication instead of series of jumps proposed by
181  * the reference ADPCM implementation since modern CPUs can do the mults
182  * quickly enough */
183  diff = ((2 * delta + 1) * step) >> shift;
184  predictor = c->predictor;
185  if (sign) predictor -= diff;
186  else predictor += diff;
187 
188  c->predictor = av_clip_int16(predictor);
189  c->step_index = step_index;
190 
191  return (short)c->predictor;
192 }
193 
195 {
196  int nibble, step_index, predictor, sign, delta, diff, step, shift;
197 
198  shift = bps - 1;
199  nibble = get_bits_le(gb, bps),
200  step = ff_adpcm_step_table[c->step_index];
201  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
202  step_index = av_clip(step_index, 0, 88);
203 
204  sign = nibble & (1 << shift);
205  delta = av_mod_uintp2(nibble, shift);
206  diff = ((2 * delta + 1) * step) >> shift;
207  predictor = c->predictor;
208  if (sign) predictor -= diff;
209  else predictor += diff;
210 
211  c->predictor = av_clip_int16(predictor);
212  c->step_index = step_index;
213 
214  return (int16_t)c->predictor;
215 }
216 
217 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
218 {
219  int step_index;
220  int predictor;
221  int diff, step;
222 
223  step = ff_adpcm_step_table[c->step_index];
224  step_index = c->step_index + ff_adpcm_index_table[nibble];
225  step_index = av_clip(step_index, 0, 88);
226 
227  diff = step >> 3;
228  if (nibble & 4) diff += step;
229  if (nibble & 2) diff += step >> 1;
230  if (nibble & 1) diff += step >> 2;
231 
232  if (nibble & 8)
233  predictor = c->predictor - diff;
234  else
235  predictor = c->predictor + diff;
236 
237  c->predictor = av_clip_int16(predictor);
238  c->step_index = step_index;
239 
240  return c->predictor;
241 }
242 
243 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
244 {
245  int predictor;
246 
247  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
248  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
249 
250  c->sample2 = c->sample1;
251  c->sample1 = av_clip_int16(predictor);
252  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
253  if (c->idelta < 16) c->idelta = 16;
254  if (c->idelta > INT_MAX/768) {
255  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
256  c->idelta = INT_MAX/768;
257  }
258 
259  return c->sample1;
260 }
261 
262 static inline short adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
263 {
264  int step_index, predictor, sign, delta, diff, step;
265 
267  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
268  step_index = av_clip(step_index, 0, 48);
269 
270  sign = nibble & 8;
271  delta = nibble & 7;
272  diff = ((2 * delta + 1) * step) >> 3;
273  predictor = c->predictor;
274  if (sign) predictor -= diff;
275  else predictor += diff;
276 
277  c->predictor = av_clip_intp2(predictor, 11);
278  c->step_index = step_index;
279 
280  return c->predictor << 4;
281 }
282 
283 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
284 {
285  int sign, delta, diff;
286  int new_step;
287 
288  sign = nibble & 8;
289  delta = nibble & 7;
290  /* perform direct multiplication instead of series of jumps proposed by
291  * the reference ADPCM implementation since modern CPUs can do the mults
292  * quickly enough */
293  diff = ((2 * delta + 1) * c->step) >> 3;
294  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
295  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
296  c->predictor = av_clip_int16(c->predictor);
297  /* calculate new step and clamp it to range 511..32767 */
298  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
299  c->step = av_clip(new_step, 511, 32767);
300 
301  return (short)c->predictor;
302 }
303 
304 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
305 {
306  int sign, delta, diff;
307 
308  sign = nibble & (1<<(size-1));
309  delta = nibble & ((1<<(size-1))-1);
310  diff = delta << (7 + c->step + shift);
311 
312  /* clamp result */
313  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
314 
315  /* calculate new step */
316  if (delta >= (2*size - 3) && c->step < 3)
317  c->step++;
318  else if (delta == 0 && c->step > 0)
319  c->step--;
320 
321  return (short) c->predictor;
322 }
323 
324 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
325 {
326  if(!c->step) {
327  c->predictor = 0;
328  c->step = 127;
329  }
330 
331  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
332  c->predictor = av_clip_int16(c->predictor);
333  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
334  c->step = av_clip(c->step, 127, 24567);
335  return c->predictor;
336 }
337 
338 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
339  const uint8_t *in, ADPCMChannelStatus *left,
340  ADPCMChannelStatus *right, int channels, int sample_offset)
341 {
342  int i, j;
343  int shift,filter,f0,f1;
344  int s_1,s_2;
345  int d,s,t;
346 
347  out0 += sample_offset;
348  if (channels == 1)
349  out1 = out0 + 28;
350  else
351  out1 += sample_offset;
352 
353  for(i=0;i<4;i++) {
354  shift = 12 - (in[4+i*2] & 15);
355  filter = in[4+i*2] >> 4;
356  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
357  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
358  filter=0;
359  }
360  f0 = xa_adpcm_table[filter][0];
361  f1 = xa_adpcm_table[filter][1];
362 
363  s_1 = left->sample1;
364  s_2 = left->sample2;
365 
366  for(j=0;j<28;j++) {
367  d = in[16+i+j*4];
368 
369  t = sign_extend(d, 4);
370  s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
371  s_2 = s_1;
372  s_1 = av_clip_int16(s);
373  out0[j] = s_1;
374  }
375 
376  if (channels == 2) {
377  left->sample1 = s_1;
378  left->sample2 = s_2;
379  s_1 = right->sample1;
380  s_2 = right->sample2;
381  }
382 
383  shift = 12 - (in[5+i*2] & 15);
384  filter = in[5+i*2] >> 4;
385  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
386  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
387  filter=0;
388  }
389 
390  f0 = xa_adpcm_table[filter][0];
391  f1 = xa_adpcm_table[filter][1];
392 
393  for(j=0;j<28;j++) {
394  d = in[16+i+j*4];
395 
396  t = sign_extend(d >> 4, 4);
397  s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
398  s_2 = s_1;
399  s_1 = av_clip_int16(s);
400  out1[j] = s_1;
401  }
402 
403  if (channels == 2) {
404  right->sample1 = s_1;
405  right->sample2 = s_2;
406  } else {
407  left->sample1 = s_1;
408  left->sample2 = s_2;
409  }
410 
411  out0 += 28 * (3 - channels);
412  out1 += 28 * (3 - channels);
413  }
414 
415  return 0;
416 }
417 
418 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
419 {
420  ADPCMDecodeContext *c = avctx->priv_data;
421  GetBitContext gb;
422  const int *table;
423  int k0, signmask, nb_bits, count;
424  int size = buf_size*8;
425  int i;
426 
427  init_get_bits(&gb, buf, size);
428 
429  //read bits & initial values
430  nb_bits = get_bits(&gb, 2)+2;
431  table = swf_index_tables[nb_bits-2];
432  k0 = 1 << (nb_bits-2);
433  signmask = 1 << (nb_bits-1);
434 
435  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
436  for (i = 0; i < avctx->channels; i++) {
437  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
438  c->status[i].step_index = get_bits(&gb, 6);
439  }
440 
441  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
442  int i;
443 
444  for (i = 0; i < avctx->channels; i++) {
445  // similar to IMA adpcm
446  int delta = get_bits(&gb, nb_bits);
447  int step = ff_adpcm_step_table[c->status[i].step_index];
448  long vpdiff = 0; // vpdiff = (delta+0.5)*step/4
449  int k = k0;
450 
451  do {
452  if (delta & k)
453  vpdiff += step;
454  step >>= 1;
455  k >>= 1;
456  } while(k);
457  vpdiff += step;
458 
459  if (delta & signmask)
460  c->status[i].predictor -= vpdiff;
461  else
462  c->status[i].predictor += vpdiff;
463 
464  c->status[i].step_index += table[delta & (~signmask)];
465 
466  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
467  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
468 
469  *samples++ = c->status[i].predictor;
470  }
471  }
472  }
473 }
474 
475 /**
476  * Get the number of samples that will be decoded from the packet.
477  * In one case, this is actually the maximum number of samples possible to
478  * decode with the given buf_size.
479  *
480  * @param[out] coded_samples set to the number of samples as coded in the
481  * packet, or 0 if the codec does not encode the
482  * number of samples in each frame.
483  * @param[out] approx_nb_samples set to non-zero if the number of samples
484  * returned is an approximation.
485  */
487  int buf_size, int *coded_samples, int *approx_nb_samples)
488 {
489  ADPCMDecodeContext *s = avctx->priv_data;
490  int nb_samples = 0;
491  int ch = avctx->channels;
492  int has_coded_samples = 0;
493  int header_size;
494 
495  *coded_samples = 0;
496  *approx_nb_samples = 0;
497 
498  if(ch <= 0)
499  return 0;
500 
501  switch (avctx->codec->id) {
502  /* constant, only check buf_size */
504  if (buf_size < 76 * ch)
505  return 0;
506  nb_samples = 128;
507  break;
509  if (buf_size < 34 * ch)
510  return 0;
511  nb_samples = 64;
512  break;
513  /* simple 4-bit adpcm */
520  nb_samples = buf_size * 2 / ch;
521  break;
522  }
523  if (nb_samples)
524  return nb_samples;
525 
526  /* simple 4-bit adpcm, with header */
527  header_size = 0;
528  switch (avctx->codec->id) {
530  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
531  case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
532  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
533  }
534  if (header_size > 0)
535  return (buf_size - header_size) * 2 / ch;
536 
537  /* more complex formats */
538  switch (avctx->codec->id) {
540  has_coded_samples = 1;
541  *coded_samples = bytestream2_get_le32(gb);
542  *coded_samples -= *coded_samples % 28;
543  nb_samples = (buf_size - 12) / 30 * 28;
544  break;
546  has_coded_samples = 1;
547  *coded_samples = bytestream2_get_le32(gb);
548  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
549  break;
551  nb_samples = (buf_size - ch) / ch * 2;
552  break;
556  /* maximum number of samples */
557  /* has internal offsets and a per-frame switch to signal raw 16-bit */
558  has_coded_samples = 1;
559  switch (avctx->codec->id) {
561  header_size = 4 + 9 * ch;
562  *coded_samples = bytestream2_get_le32(gb);
563  break;
565  header_size = 4 + 5 * ch;
566  *coded_samples = bytestream2_get_le32(gb);
567  break;
569  header_size = 4 + 5 * ch;
570  *coded_samples = bytestream2_get_be32(gb);
571  break;
572  }
573  *coded_samples -= *coded_samples % 28;
574  nb_samples = (buf_size - header_size) * 2 / ch;
575  nb_samples -= nb_samples % 28;
576  *approx_nb_samples = 1;
577  break;
579  if (avctx->block_align > 0)
580  buf_size = FFMIN(buf_size, avctx->block_align);
581  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
582  break;
584  if (avctx->block_align > 0)
585  buf_size = FFMIN(buf_size, avctx->block_align);
586  if (buf_size < 4 * ch)
587  return AVERROR_INVALIDDATA;
588  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
589  break;
591  if (avctx->block_align > 0)
592  buf_size = FFMIN(buf_size, avctx->block_align);
593  nb_samples = (buf_size - 4 * ch) * 2 / ch;
594  break;
596  {
597  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
598  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
599  if (avctx->block_align > 0)
600  buf_size = FFMIN(buf_size, avctx->block_align);
601  if (buf_size < 4 * ch)
602  return AVERROR_INVALIDDATA;
603  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
604  break;
605  }
607  if (avctx->block_align > 0)
608  buf_size = FFMIN(buf_size, avctx->block_align);
609  nb_samples = (buf_size - 6 * ch) * 2 / ch;
610  break;
614  {
615  int samples_per_byte;
616  switch (avctx->codec->id) {
617  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
618  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
619  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
620  }
621  if (!s->status[0].step_index) {
622  if (buf_size < ch)
623  return AVERROR_INVALIDDATA;
624  nb_samples++;
625  buf_size -= ch;
626  }
627  nb_samples += buf_size * samples_per_byte / ch;
628  break;
629  }
631  {
632  int buf_bits = buf_size * 8 - 2;
633  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
634  int block_hdr_size = 22 * ch;
635  int block_size = block_hdr_size + nbits * ch * 4095;
636  int nblocks = buf_bits / block_size;
637  int bits_left = buf_bits - nblocks * block_size;
638  nb_samples = nblocks * 4096;
639  if (bits_left >= block_hdr_size)
640  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
641  break;
642  }
645  if (avctx->extradata) {
646  nb_samples = buf_size * 14 / (8 * ch);
647  break;
648  }
649  has_coded_samples = 1;
650  bytestream2_skip(gb, 4); // channel size
651  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
652  bytestream2_get_le32(gb) :
653  bytestream2_get_be32(gb);
654  buf_size -= 8 + 36 * ch;
655  buf_size /= ch;
656  nb_samples = buf_size / 8 * 14;
657  if (buf_size % 8 > 1)
658  nb_samples += (buf_size % 8 - 1) * 2;
659  *approx_nb_samples = 1;
660  break;
662  nb_samples = buf_size / (9 * ch) * 16;
663  break;
665  nb_samples = (buf_size / 128) * 224 / ch;
666  break;
668  nb_samples = buf_size / (16 * ch) * 28;
669  break;
670  }
671 
672  /* validate coded sample count */
673  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
674  return AVERROR_INVALIDDATA;
675 
676  return nb_samples;
677 }
678 
679 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
680  int *got_frame_ptr, AVPacket *avpkt)
681 {
682  AVFrame *frame = data;
683  const uint8_t *buf = avpkt->data;
684  int buf_size = avpkt->size;
685  ADPCMDecodeContext *c = avctx->priv_data;
686  ADPCMChannelStatus *cs;
687  int n, m, channel, i;
688  short *samples;
689  int16_t **samples_p;
690  int st; /* stereo */
691  int count1, count2;
692  int nb_samples, coded_samples, approx_nb_samples, ret;
693  GetByteContext gb;
694 
695  bytestream2_init(&gb, buf, buf_size);
696  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
697  if (nb_samples <= 0) {
698  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
699  return AVERROR_INVALIDDATA;
700  }
701 
702  /* get output buffer */
703  frame->nb_samples = nb_samples;
704  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
705  return ret;
706  samples = (short *)frame->data[0];
707  samples_p = (int16_t **)frame->extended_data;
708 
709  /* use coded_samples when applicable */
710  /* it is always <= nb_samples, so the output buffer will be large enough */
711  if (coded_samples) {
712  if (!approx_nb_samples && coded_samples != nb_samples)
713  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
714  frame->nb_samples = nb_samples = coded_samples;
715  }
716 
717  st = avctx->channels == 2 ? 1 : 0;
718 
719  switch(avctx->codec->id) {
721  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
722  Channel data is interleaved per-chunk. */
723  for (channel = 0; channel < avctx->channels; channel++) {
724  int predictor;
725  int step_index;
726  cs = &(c->status[channel]);
727  /* (pppppp) (piiiiiii) */
728 
729  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
730  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
731  step_index = predictor & 0x7F;
732  predictor &= ~0x7F;
733 
734  if (cs->step_index == step_index) {
735  int diff = predictor - cs->predictor;
736  if (diff < 0)
737  diff = - diff;
738  if (diff > 0x7f)
739  goto update;
740  } else {
741  update:
742  cs->step_index = step_index;
743  cs->predictor = predictor;
744  }
745 
746  if (cs->step_index > 88u){
747  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
748  channel, cs->step_index);
749  return AVERROR_INVALIDDATA;
750  }
751 
752  samples = samples_p[channel];
753 
754  for (m = 0; m < 64; m += 2) {
755  int byte = bytestream2_get_byteu(&gb);
756  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3);
757  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3);
758  }
759  }
760  break;
762  for(i=0; i<avctx->channels; i++){
763  cs = &(c->status[i]);
764  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
765 
766  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
767  if (cs->step_index > 88u){
768  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
769  i, cs->step_index);
770  return AVERROR_INVALIDDATA;
771  }
772  }
773 
774  if (avctx->bits_per_coded_sample != 4) {
775  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
777 
779  if (ret < 0)
780  return ret;
781  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
782  for (i = 0; i < avctx->channels; i++) {
783  cs = &c->status[i];
784  samples = &samples_p[i][1 + n * samples_per_block];
785  for (m = 0; m < samples_per_block; m++) {
786  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
787  avctx->bits_per_coded_sample);
788  }
789  }
790  }
791  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
792  } else {
793  for (n = 0; n < (nb_samples - 1) / 8; n++) {
794  for (i = 0; i < avctx->channels; i++) {
795  cs = &c->status[i];
796  samples = &samples_p[i][1 + n * 8];
797  for (m = 0; m < 8; m += 2) {
798  int v = bytestream2_get_byteu(&gb);
799  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
800  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
801  }
802  }
803  }
804  }
805  break;
807  for (i = 0; i < avctx->channels; i++)
808  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
809 
810  for (i = 0; i < avctx->channels; i++) {
811  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
812  if (c->status[i].step_index > 88u) {
813  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
814  i, c->status[i].step_index);
815  return AVERROR_INVALIDDATA;
816  }
817  }
818 
819  for (i = 0; i < avctx->channels; i++) {
820  samples = (int16_t *)frame->data[i];
821  cs = &c->status[i];
822  for (n = nb_samples >> 1; n > 0; n--) {
823  int v = bytestream2_get_byteu(&gb);
824  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
825  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
826  }
827  }
828  break;
830  {
831  int block_predictor;
832 
833  block_predictor = bytestream2_get_byteu(&gb);
834  if (block_predictor > 6) {
835  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
836  block_predictor);
837  return AVERROR_INVALIDDATA;
838  }
839  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
840  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
841  if (st) {
842  block_predictor = bytestream2_get_byteu(&gb);
843  if (block_predictor > 6) {
844  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
845  block_predictor);
846  return AVERROR_INVALIDDATA;
847  }
848  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
849  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
850  }
851  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
852  if (st){
853  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
854  }
855 
856  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
857  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
858  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
859  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
860 
861  *samples++ = c->status[0].sample2;
862  if (st) *samples++ = c->status[1].sample2;
863  *samples++ = c->status[0].sample1;
864  if (st) *samples++ = c->status[1].sample1;
865  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
866  int byte = bytestream2_get_byteu(&gb);
867  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
868  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
869  }
870  break;
871  }
873  for (channel = 0; channel < avctx->channels; channel++) {
874  cs = &c->status[channel];
875  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
876  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
877  if (cs->step_index > 88u){
878  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
879  channel, cs->step_index);
880  return AVERROR_INVALIDDATA;
881  }
882  }
883  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
884  int v = bytestream2_get_byteu(&gb);
885  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
886  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
887  }
888  break;
890  {
891  int last_byte = 0;
892  int nibble;
893  int decode_top_nibble_next = 0;
894  int diff_channel;
895  const int16_t *samples_end = samples + avctx->channels * nb_samples;
896 
897  bytestream2_skipu(&gb, 10);
898  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
899  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
900  c->status[0].step_index = bytestream2_get_byteu(&gb);
901  c->status[1].step_index = bytestream2_get_byteu(&gb);
902  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
903  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
904  c->status[0].step_index, c->status[1].step_index);
905  return AVERROR_INVALIDDATA;
906  }
907  /* sign extend the predictors */
908  diff_channel = c->status[1].predictor;
909 
910  /* DK3 ADPCM support macro */
911 #define DK3_GET_NEXT_NIBBLE() \
912  if (decode_top_nibble_next) { \
913  nibble = last_byte >> 4; \
914  decode_top_nibble_next = 0; \
915  } else { \
916  last_byte = bytestream2_get_byteu(&gb); \
917  nibble = last_byte & 0x0F; \
918  decode_top_nibble_next = 1; \
919  }
920 
921  while (samples < samples_end) {
922 
923  /* for this algorithm, c->status[0] is the sum channel and
924  * c->status[1] is the diff channel */
925 
926  /* process the first predictor of the sum channel */
928  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
929 
930  /* process the diff channel predictor */
932  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
933 
934  /* process the first pair of stereo PCM samples */
935  diff_channel = (diff_channel + c->status[1].predictor) / 2;
936  *samples++ = c->status[0].predictor + c->status[1].predictor;
937  *samples++ = c->status[0].predictor - c->status[1].predictor;
938 
939  /* process the second predictor of the sum channel */
941  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
942 
943  /* process the second pair of stereo PCM samples */
944  diff_channel = (diff_channel + c->status[1].predictor) / 2;
945  *samples++ = c->status[0].predictor + c->status[1].predictor;
946  *samples++ = c->status[0].predictor - c->status[1].predictor;
947  }
948 
949  if ((bytestream2_tell(&gb) & 1))
950  bytestream2_skip(&gb, 1);
951  break;
952  }
954  for (channel = 0; channel < avctx->channels; channel++) {
955  cs = &c->status[channel];
956  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
957  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
958  if (cs->step_index > 88u){
959  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
960  channel, cs->step_index);
961  return AVERROR_INVALIDDATA;
962  }
963  }
964 
965  for (n = nb_samples >> (1 - st); n > 0; n--) {
966  int v1, v2;
967  int v = bytestream2_get_byteu(&gb);
968  /* nibbles are swapped for mono */
969  if (st) {
970  v1 = v >> 4;
971  v2 = v & 0x0F;
972  } else {
973  v2 = v >> 4;
974  v1 = v & 0x0F;
975  }
976  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
977  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
978  }
979  break;
981  while (bytestream2_get_bytes_left(&gb) > 0) {
982  int v = bytestream2_get_byteu(&gb);
983  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
984  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
985  }
986  break;
988  while (bytestream2_get_bytes_left(&gb) > 0) {
989  int v = bytestream2_get_byteu(&gb);
990  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
991  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
992  }
993  break;
995  for (channel = 0; channel < avctx->channels; channel++) {
996  cs = &c->status[channel];
997  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
998  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
999  if (cs->step_index > 88u){
1000  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1001  channel, cs->step_index);
1002  return AVERROR_INVALIDDATA;
1003  }
1004  }
1005  for (n = 0; n < nb_samples / 2; n++) {
1006  int byte[2];
1007 
1008  byte[0] = bytestream2_get_byteu(&gb);
1009  if (st)
1010  byte[1] = bytestream2_get_byteu(&gb);
1011  for(channel = 0; channel < avctx->channels; channel++) {
1012  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1013  }
1014  for(channel = 0; channel < avctx->channels; channel++) {
1015  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1016  }
1017  }
1018  break;
1020  if (c->vqa_version == 3) {
1021  for (channel = 0; channel < avctx->channels; channel++) {
1022  int16_t *smp = samples_p[channel];
1023 
1024  for (n = nb_samples / 2; n > 0; n--) {
1025  int v = bytestream2_get_byteu(&gb);
1026  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1027  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1028  }
1029  }
1030  } else {
1031  for (n = nb_samples / 2; n > 0; n--) {
1032  for (channel = 0; channel < avctx->channels; channel++) {
1033  int v = bytestream2_get_byteu(&gb);
1034  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1035  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1036  }
1037  samples += avctx->channels;
1038  }
1039  }
1040  bytestream2_seek(&gb, 0, SEEK_END);
1041  break;
1042  case AV_CODEC_ID_ADPCM_XA:
1043  {
1044  int16_t *out0 = samples_p[0];
1045  int16_t *out1 = samples_p[1];
1046  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1047  int sample_offset = 0;
1048  while (bytestream2_get_bytes_left(&gb) >= 128) {
1049  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1050  &c->status[0], &c->status[1],
1051  avctx->channels, sample_offset)) < 0)
1052  return ret;
1053  bytestream2_skipu(&gb, 128);
1054  sample_offset += samples_per_block;
1055  }
1056  break;
1057  }
1059  for (i=0; i<=st; i++) {
1060  c->status[i].step_index = bytestream2_get_le32u(&gb);
1061  if (c->status[i].step_index > 88u) {
1062  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1063  i, c->status[i].step_index);
1064  return AVERROR_INVALIDDATA;
1065  }
1066  }
1067  for (i=0; i<=st; i++)
1068  c->status[i].predictor = bytestream2_get_le32u(&gb);
1069 
1070  for (n = nb_samples >> (1 - st); n > 0; n--) {
1071  int byte = bytestream2_get_byteu(&gb);
1072  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1073  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1074  }
1075  break;
1077  for (n = nb_samples >> (1 - st); n > 0; n--) {
1078  int byte = bytestream2_get_byteu(&gb);
1079  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1080  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1081  }
1082  break;
1083  case AV_CODEC_ID_ADPCM_EA:
1084  {
1085  int previous_left_sample, previous_right_sample;
1086  int current_left_sample, current_right_sample;
1087  int next_left_sample, next_right_sample;
1088  int coeff1l, coeff2l, coeff1r, coeff2r;
1089  int shift_left, shift_right;
1090 
1091  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1092  each coding 28 stereo samples. */
1093 
1094  if(avctx->channels != 2)
1095  return AVERROR_INVALIDDATA;
1096 
1097  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1098  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1099  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1100  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1101 
1102  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1103  int byte = bytestream2_get_byteu(&gb);
1104  coeff1l = ea_adpcm_table[ byte >> 4 ];
1105  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1106  coeff1r = ea_adpcm_table[ byte & 0x0F];
1107  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1108 
1109  byte = bytestream2_get_byteu(&gb);
1110  shift_left = 20 - (byte >> 4);
1111  shift_right = 20 - (byte & 0x0F);
1112 
1113  for (count2 = 0; count2 < 28; count2++) {
1114  byte = bytestream2_get_byteu(&gb);
1115  next_left_sample = sign_extend(byte >> 4, 4) << shift_left;
1116  next_right_sample = sign_extend(byte, 4) << shift_right;
1117 
1118  next_left_sample = (next_left_sample +
1119  (current_left_sample * coeff1l) +
1120  (previous_left_sample * coeff2l) + 0x80) >> 8;
1121  next_right_sample = (next_right_sample +
1122  (current_right_sample * coeff1r) +
1123  (previous_right_sample * coeff2r) + 0x80) >> 8;
1124 
1125  previous_left_sample = current_left_sample;
1126  current_left_sample = av_clip_int16(next_left_sample);
1127  previous_right_sample = current_right_sample;
1128  current_right_sample = av_clip_int16(next_right_sample);
1129  *samples++ = current_left_sample;
1130  *samples++ = current_right_sample;
1131  }
1132  }
1133 
1134  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1135 
1136  break;
1137  }
1139  {
1140  int coeff[2][2], shift[2];
1141 
1142  for(channel = 0; channel < avctx->channels; channel++) {
1143  int byte = bytestream2_get_byteu(&gb);
1144  for (i=0; i<2; i++)
1145  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1146  shift[channel] = 20 - (byte & 0x0F);
1147  }
1148  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1149  int byte[2];
1150 
1151  byte[0] = bytestream2_get_byteu(&gb);
1152  if (st) byte[1] = bytestream2_get_byteu(&gb);
1153  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1154  for(channel = 0; channel < avctx->channels; channel++) {
1155  int sample = sign_extend(byte[channel] >> i, 4) << shift[channel];
1156  sample = (sample +
1157  c->status[channel].sample1 * coeff[channel][0] +
1158  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1159  c->status[channel].sample2 = c->status[channel].sample1;
1160  c->status[channel].sample1 = av_clip_int16(sample);
1161  *samples++ = c->status[channel].sample1;
1162  }
1163  }
1164  }
1165  bytestream2_seek(&gb, 0, SEEK_END);
1166  break;
1167  }
1170  case AV_CODEC_ID_ADPCM_EA_R3: {
1171  /* channel numbering
1172  2chan: 0=fl, 1=fr
1173  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1174  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1175  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1176  int previous_sample, current_sample, next_sample;
1177  int coeff1, coeff2;
1178  int shift;
1179  unsigned int channel;
1180  uint16_t *samplesC;
1181  int count = 0;
1182  int offsets[6];
1183 
1184  for (channel=0; channel<avctx->channels; channel++)
1185  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1186  bytestream2_get_le32(&gb)) +
1187  (avctx->channels + 1) * 4;
1188 
1189  for (channel=0; channel<avctx->channels; channel++) {
1190  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1191  samplesC = samples_p[channel];
1192 
1193  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1194  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1195  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1196  } else {
1197  current_sample = c->status[channel].predictor;
1198  previous_sample = c->status[channel].prev_sample;
1199  }
1200 
1201  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1202  int byte = bytestream2_get_byte(&gb);
1203  if (byte == 0xEE) { /* only seen in R2 and R3 */
1204  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1205  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1206 
1207  for (count2=0; count2<28; count2++)
1208  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1209  } else {
1210  coeff1 = ea_adpcm_table[ byte >> 4 ];
1211  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1212  shift = 20 - (byte & 0x0F);
1213 
1214  for (count2=0; count2<28; count2++) {
1215  if (count2 & 1)
1216  next_sample = sign_extend(byte, 4) << shift;
1217  else {
1218  byte = bytestream2_get_byte(&gb);
1219  next_sample = sign_extend(byte >> 4, 4) << shift;
1220  }
1221 
1222  next_sample += (current_sample * coeff1) +
1223  (previous_sample * coeff2);
1224  next_sample = av_clip_int16(next_sample >> 8);
1225 
1226  previous_sample = current_sample;
1227  current_sample = next_sample;
1228  *samplesC++ = current_sample;
1229  }
1230  }
1231  }
1232  if (!count) {
1233  count = count1;
1234  } else if (count != count1) {
1235  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1236  count = FFMAX(count, count1);
1237  }
1238 
1239  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1240  c->status[channel].predictor = current_sample;
1241  c->status[channel].prev_sample = previous_sample;
1242  }
1243  }
1244 
1245  frame->nb_samples = count * 28;
1246  bytestream2_seek(&gb, 0, SEEK_END);
1247  break;
1248  }
1250  for (channel=0; channel<avctx->channels; channel++) {
1251  int coeff[2][4], shift[4];
1252  int16_t *s = samples_p[channel];
1253  for (n = 0; n < 4; n++, s += 32) {
1254  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1255  for (i=0; i<2; i++)
1256  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1257  s[0] = val & ~0x0F;
1258 
1259  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1260  shift[n] = 20 - (val & 0x0F);
1261  s[1] = val & ~0x0F;
1262  }
1263 
1264  for (m=2; m<32; m+=2) {
1265  s = &samples_p[channel][m];
1266  for (n = 0; n < 4; n++, s += 32) {
1267  int level, pred;
1268  int byte = bytestream2_get_byteu(&gb);
1269 
1270  level = sign_extend(byte >> 4, 4) << shift[n];
1271  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1272  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1273 
1274  level = sign_extend(byte, 4) << shift[n];
1275  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1276  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1277  }
1278  }
1279  }
1280  break;
1282  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1283  c->status[0].step_index = bytestream2_get_le16u(&gb);
1284  bytestream2_skipu(&gb, 4);
1285  if (c->status[0].step_index > 88u) {
1286  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1287  c->status[0].step_index);
1288  return AVERROR_INVALIDDATA;
1289  }
1290 
1291  for (n = nb_samples >> (1 - st); n > 0; n--) {
1292  int v = bytestream2_get_byteu(&gb);
1293 
1294  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1295  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1296  }
1297  break;
1299  for (i = 0; i < avctx->channels; i++) {
1300  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1301  c->status[i].step_index = bytestream2_get_byteu(&gb);
1302  bytestream2_skipu(&gb, 1);
1303  if (c->status[i].step_index > 88u) {
1304  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1305  c->status[i].step_index);
1306  return AVERROR_INVALIDDATA;
1307  }
1308  }
1309 
1310  for (n = nb_samples >> (1 - st); n > 0; n--) {
1311  int v = bytestream2_get_byteu(&gb);
1312 
1313  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4, 3);
1314  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf, 3);
1315  }
1316  break;
1317  case AV_CODEC_ID_ADPCM_CT:
1318  for (n = nb_samples >> (1 - st); n > 0; n--) {
1319  int v = bytestream2_get_byteu(&gb);
1320  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1321  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1322  }
1323  break;
1327  if (!c->status[0].step_index) {
1328  /* the first byte is a raw sample */
1329  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1330  if (st)
1331  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1332  c->status[0].step_index = 1;
1333  nb_samples--;
1334  }
1335  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1336  for (n = nb_samples >> (1 - st); n > 0; n--) {
1337  int byte = bytestream2_get_byteu(&gb);
1338  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1339  byte >> 4, 4, 0);
1340  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1341  byte & 0x0F, 4, 0);
1342  }
1343  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1344  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1345  int byte = bytestream2_get_byteu(&gb);
1346  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1347  byte >> 5 , 3, 0);
1348  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1349  (byte >> 2) & 0x07, 3, 0);
1350  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1351  byte & 0x03, 2, 0);
1352  }
1353  } else {
1354  for (n = nb_samples >> (2 - st); n > 0; n--) {
1355  int byte = bytestream2_get_byteu(&gb);
1356  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1357  byte >> 6 , 2, 2);
1358  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1359  (byte >> 4) & 0x03, 2, 2);
1360  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1361  (byte >> 2) & 0x03, 2, 2);
1362  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1363  byte & 0x03, 2, 2);
1364  }
1365  }
1366  break;
1367  case AV_CODEC_ID_ADPCM_SWF:
1368  adpcm_swf_decode(avctx, buf, buf_size, samples);
1369  bytestream2_seek(&gb, 0, SEEK_END);
1370  break;
1372  for (n = nb_samples >> (1 - st); n > 0; n--) {
1373  int v = bytestream2_get_byteu(&gb);
1374  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1375  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1376  }
1377  break;
1378  case AV_CODEC_ID_ADPCM_AFC:
1379  {
1380  int samples_per_block;
1381  int blocks;
1382 
1383  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1384  samples_per_block = avctx->extradata[0] / 16;
1385  blocks = nb_samples / avctx->extradata[0];
1386  } else {
1387  samples_per_block = nb_samples / 16;
1388  blocks = 1;
1389  }
1390 
1391  for (m = 0; m < blocks; m++) {
1392  for (channel = 0; channel < avctx->channels; channel++) {
1393  int prev1 = c->status[channel].sample1;
1394  int prev2 = c->status[channel].sample2;
1395 
1396  samples = samples_p[channel] + m * 16;
1397  /* Read in every sample for this channel. */
1398  for (i = 0; i < samples_per_block; i++) {
1399  int byte = bytestream2_get_byteu(&gb);
1400  int scale = 1 << (byte >> 4);
1401  int index = byte & 0xf;
1402  int factor1 = ff_adpcm_afc_coeffs[0][index];
1403  int factor2 = ff_adpcm_afc_coeffs[1][index];
1404 
1405  /* Decode 16 samples. */
1406  for (n = 0; n < 16; n++) {
1407  int32_t sampledat;
1408 
1409  if (n & 1) {
1410  sampledat = sign_extend(byte, 4);
1411  } else {
1412  byte = bytestream2_get_byteu(&gb);
1413  sampledat = sign_extend(byte >> 4, 4);
1414  }
1415 
1416  sampledat = ((prev1 * factor1 + prev2 * factor2) +
1417  ((sampledat * scale) << 11)) >> 11;
1418  *samples = av_clip_int16(sampledat);
1419  prev2 = prev1;
1420  prev1 = *samples++;
1421  }
1422  }
1423 
1424  c->status[channel].sample1 = prev1;
1425  c->status[channel].sample2 = prev2;
1426  }
1427  }
1428  bytestream2_seek(&gb, 0, SEEK_END);
1429  break;
1430  }
1431  case AV_CODEC_ID_ADPCM_THP:
1433  {
1434  int table[10][16];
1435  int ch;
1436 
1437 #define THP_GET16(g) \
1438  sign_extend( \
1439  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1440  bytestream2_get_le16u(&(g)) : \
1441  bytestream2_get_be16u(&(g)), 16)
1442 
1443  if (avctx->extradata) {
1445  if (avctx->extradata_size < 32 * avctx->channels) {
1446  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1447  return AVERROR_INVALIDDATA;
1448  }
1449 
1450  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1451  for (i = 0; i < avctx->channels; i++)
1452  for (n = 0; n < 16; n++)
1453  table[i][n] = THP_GET16(tb);
1454  } else {
1455  for (i = 0; i < avctx->channels; i++)
1456  for (n = 0; n < 16; n++)
1457  table[i][n] = THP_GET16(gb);
1458 
1459  if (!c->has_status) {
1460  /* Initialize the previous sample. */
1461  for (i = 0; i < avctx->channels; i++) {
1462  c->status[i].sample1 = THP_GET16(gb);
1463  c->status[i].sample2 = THP_GET16(gb);
1464  }
1465  c->has_status = 1;
1466  } else {
1467  bytestream2_skip(&gb, avctx->channels * 4);
1468  }
1469  }
1470 
1471  for (ch = 0; ch < avctx->channels; ch++) {
1472  samples = samples_p[ch];
1473 
1474  /* Read in every sample for this channel. */
1475  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1476  int byte = bytestream2_get_byteu(&gb);
1477  int index = (byte >> 4) & 7;
1478  unsigned int exp = byte & 0x0F;
1479  int factor1 = table[ch][index * 2];
1480  int factor2 = table[ch][index * 2 + 1];
1481 
1482  /* Decode 14 samples. */
1483  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1484  int32_t sampledat;
1485 
1486  if (n & 1) {
1487  sampledat = sign_extend(byte, 4);
1488  } else {
1489  byte = bytestream2_get_byteu(&gb);
1490  sampledat = sign_extend(byte >> 4, 4);
1491  }
1492 
1493  sampledat = ((c->status[ch].sample1 * factor1
1494  + c->status[ch].sample2 * factor2) >> 11) + (sampledat << exp);
1495  *samples = av_clip_int16(sampledat);
1496  c->status[ch].sample2 = c->status[ch].sample1;
1497  c->status[ch].sample1 = *samples++;
1498  }
1499  }
1500  }
1501  break;
1502  }
1503  case AV_CODEC_ID_ADPCM_DTK:
1504  for (channel = 0; channel < avctx->channels; channel++) {
1505  samples = samples_p[channel];
1506 
1507  /* Read in every sample for this channel. */
1508  for (i = 0; i < nb_samples / 28; i++) {
1509  int byte, header;
1510  if (channel)
1511  bytestream2_skipu(&gb, 1);
1512  header = bytestream2_get_byteu(&gb);
1513  bytestream2_skipu(&gb, 3 - channel);
1514 
1515  /* Decode 28 samples. */
1516  for (n = 0; n < 28; n++) {
1517  int32_t sampledat, prev;
1518 
1519  switch (header >> 4) {
1520  case 1:
1521  prev = (c->status[channel].sample1 * 0x3c);
1522  break;
1523  case 2:
1524  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1525  break;
1526  case 3:
1527  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1528  break;
1529  default:
1530  prev = 0;
1531  }
1532 
1533  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1534 
1535  byte = bytestream2_get_byteu(&gb);
1536  if (!channel)
1537  sampledat = sign_extend(byte, 4);
1538  else
1539  sampledat = sign_extend(byte >> 4, 4);
1540 
1541  sampledat = (((sampledat << 12) >> (header & 0xf)) << 6) + prev;
1542  *samples++ = av_clip_int16(sampledat >> 6);
1543  c->status[channel].sample2 = c->status[channel].sample1;
1544  c->status[channel].sample1 = sampledat;
1545  }
1546  }
1547  if (!channel)
1548  bytestream2_seek(&gb, 0, SEEK_SET);
1549  }
1550  break;
1551 
1552  default:
1553  return -1;
1554  }
1555 
1556  if (avpkt->size && bytestream2_tell(&gb) == 0) {
1557  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
1558  return AVERROR_INVALIDDATA;
1559  }
1560 
1561  *got_frame_ptr = 1;
1562 
1563  if (avpkt->size < bytestream2_tell(&gb)) {
1564  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
1565  return avpkt->size;
1566  }
1567 
1568  return bytestream2_tell(&gb);
1569 }
1570 
1571 static void adpcm_flush(AVCodecContext *avctx)
1572 {
1573  ADPCMDecodeContext *c = avctx->priv_data;
1574  c->has_status = 0;
1575 }
1576 
1577 
1585 
1586 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
1587 AVCodec ff_ ## name_ ## _decoder = { \
1588  .name = #name_, \
1589  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
1590  .type = AVMEDIA_TYPE_AUDIO, \
1591  .id = id_, \
1592  .priv_data_size = sizeof(ADPCMDecodeContext), \
1593  .init = adpcm_decode_init, \
1594  .decode = adpcm_decode_frame, \
1595  .flush = adpcm_flush, \
1596  .capabilities = AV_CODEC_CAP_DR1, \
1597  .sample_fmts = sample_fmts_, \
1598 }
1599 
1600 /* Note: Do not forget to add new entries to the Makefile as well. */
1601 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
1602 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
1603 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
1604 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
1605 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
1606 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
1607 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
1608 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
1609 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
1610 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
1611 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
1612 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
1613 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
1614 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
1615 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
1616 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
1617 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
1618 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
1619 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
1620 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
1621 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
1622 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
1623 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
1624 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_s16, adpcm_ms, "ADPCM Microsoft");
1625 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
1626 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
1627 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
1628 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
1629 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
1630 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
1631 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
1632 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1511
const char const char void * val
Definition: avisynth_c.h:634
ADPCMChannelStatus status[10]
Definition: adpcm.c:87
float v
const char * s
Definition: avisynth_c.h:631
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
Definition: adpcm.c:304
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
#define THP_GET16(g)
const int16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:260
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
const char * g
Definition: vf_curves.c:108
static short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
Definition: adpcm.c:283
int size
Definition: avcodec.h:1424
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:1578
#define sample
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2299
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:245
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2270
uint8_t
#define av_cold
Definition: attributes.h:74
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:92
float delta
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:1571
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:418
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1617
static const int xa_adpcm_table[5][2]
Definition: adpcm.c:60
ADPCM tables.
static AVFrame * frame
uint8_t * data
Definition: avcodec.h:1423
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:212
const uint8_t * buffer
Definition: bytestream.h:34
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples that will be decoded from the packet.
Definition: adpcm.c:486
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
bitstream reader API header.
ptrdiff_t size
Definition: opengl_enc.c:101
static const uint8_t header[24]
Definition: sdr2.c:67
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2996
#define av_log(a,...)
unsigned m
Definition: audioconvert.c:187
static void predictor(uint8_t *src, int size)
Definition: exr.c:220
enum AVCodecID id
Definition: avcodec.h:3486
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ADPCM encoder/decoder common header.
static short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
Definition: adpcm.c:324
static const int ea_adpcm_table[]
Definition: adpcm.c:68
#define AVERROR(e)
Definition: error.h:43
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
static const struct endianess table[]
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
Definition: adpcm.c:217
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
GLsizei count
Definition: opengl_enc.c:109
#define FFMAX(a, b)
Definition: common.h:79
Libavcodec external API header.
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:194
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:338
#define FFMIN(a, b)
Definition: common.h:81
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
int vqa_version
VQA version.
Definition: adpcm.c:88
int32_t
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:1580
float u
int n
Definition: avisynth_c.h:547
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
#define FF_ARRAY_ELEMS(a)
static short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
Definition: adpcm.c:168
static const float pred[4]
Definition: siprdata.h:259
static const int swf_index_tables[4][16]
Definition: adpcm.c:77
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:59
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:441
static short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:243
main external API structure.
Definition: avcodec.h:1502
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:1040
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
void * buf
Definition: avisynth_c.h:553
int extradata_size
Definition: avcodec.h:1618
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:410
static short adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:262
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:138
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:272
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
uint8_t level
Definition: svq3.c:150
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:679
signed 16 bits
Definition: samplefmt.h:62
static double c[64]
unsigned bps
Definition: movenc.c:1335
void * priv_data
Definition: avcodec.h:1544
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int channels
number of audio channels
Definition: avcodec.h:2263
static const double coeff[2][5]
Definition: vf_owdenoise.c:71
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:1582
int16_t step_index
Definition: adpcm.h:35
signed 16 bits, planar
Definition: samplefmt.h:68
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:215
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1400
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:225
for(j=16;j >0;--j)
#define tb
Definition: regdef.h:68
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:1586