FFmpeg
adpcmenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "avcodec.h"
26 #include "put_bits.h"
27 #include "bytestream.h"
28 #include "adpcm.h"
29 #include "adpcm_data.h"
30 #include "internal.h"
31 
32 /**
33  * @file
34  * ADPCM encoders
35  * See ADPCM decoder reference documents for codec information.
36  */
37 
38 typedef struct TrellisPath {
39  int nibble;
40  int prev;
41 } TrellisPath;
42 
43 typedef struct TrellisNode {
44  uint32_t ssd;
45  int path;
46  int sample1;
47  int sample2;
48  int step;
49 } TrellisNode;
50 
51 typedef struct ADPCMEncodeContext {
58 
59 #define FREEZE_INTERVAL 128
60 
61 static av_cold int adpcm_encode_close(AVCodecContext *avctx);
62 
64 {
65  ADPCMEncodeContext *s = avctx->priv_data;
66  uint8_t *extradata;
67  int i;
68  int ret = AVERROR(ENOMEM);
69 
70  if (avctx->channels > 2) {
71  av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
72  return AVERROR(EINVAL);
73  }
74 
75  if (avctx->trellis && (unsigned)avctx->trellis > 16U) {
76  av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
77  return AVERROR(EINVAL);
78  }
79 
80  if (avctx->trellis && avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_SSI) {
81  /*
82  * The current trellis implementation doesn't work for extended
83  * runs of samples without periodic resets. Disallow it.
84  */
85  av_log(avctx, AV_LOG_ERROR, "trellis not supported\n");
86  return AVERROR_PATCHWELCOME;
87  }
88 
89  if (avctx->trellis) {
90  int frontier = 1 << avctx->trellis;
91  int max_paths = frontier * FREEZE_INTERVAL;
92  FF_ALLOC_OR_GOTO(avctx, s->paths,
93  max_paths * sizeof(*s->paths), error);
94  FF_ALLOC_OR_GOTO(avctx, s->node_buf,
95  2 * frontier * sizeof(*s->node_buf), error);
96  FF_ALLOC_OR_GOTO(avctx, s->nodep_buf,
97  2 * frontier * sizeof(*s->nodep_buf), error);
98  FF_ALLOC_OR_GOTO(avctx, s->trellis_hash,
99  65536 * sizeof(*s->trellis_hash), error);
100  }
101 
103 
104  switch (avctx->codec->id) {
106  /* each 16 bits sample gives one nibble
107  and we have 4 bytes per channel overhead */
108  avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
109  (4 * avctx->channels) + 1;
110  /* seems frame_size isn't taken into account...
111  have to buffer the samples :-( */
112  avctx->block_align = BLKSIZE;
113  avctx->bits_per_coded_sample = 4;
114  break;
116  avctx->frame_size = 64;
117  avctx->block_align = 34 * avctx->channels;
118  break;
120  /* each 16 bits sample gives one nibble
121  and we have 7 bytes per channel overhead */
122  avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
123  avctx->bits_per_coded_sample = 4;
124  avctx->block_align = BLKSIZE;
126  goto error;
127  avctx->extradata_size = 32;
128  extradata = avctx->extradata;
129  bytestream_put_le16(&extradata, avctx->frame_size);
130  bytestream_put_le16(&extradata, 7); /* wNumCoef */
131  for (i = 0; i < 7; i++) {
132  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
133  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
134  }
135  break;
137  avctx->frame_size = BLKSIZE * 2 / avctx->channels;
138  avctx->block_align = BLKSIZE;
139  break;
141  if (avctx->sample_rate != 11025 &&
142  avctx->sample_rate != 22050 &&
143  avctx->sample_rate != 44100) {
144  av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
145  "22050 or 44100\n");
146  ret = AVERROR(EINVAL);
147  goto error;
148  }
149  avctx->frame_size = 512 * (avctx->sample_rate / 11025);
150  break;
152  avctx->frame_size = BLKSIZE * 2 / avctx->channels;
153  avctx->block_align = BLKSIZE;
154  break;
155  default:
156  ret = AVERROR(EINVAL);
157  goto error;
158  }
159 
160  return 0;
161 error:
162  return ret;
163 }
164 
166 {
167  ADPCMEncodeContext *s = avctx->priv_data;
168  av_freep(&s->paths);
169  av_freep(&s->node_buf);
170  av_freep(&s->nodep_buf);
171  av_freep(&s->trellis_hash);
172 
173  return 0;
174 }
175 
176 
178  int16_t sample)
179 {
180  int delta = sample - c->prev_sample;
181  int nibble = FFMIN(7, abs(delta) * 4 /
182  ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
183  c->prev_sample += ((ff_adpcm_step_table[c->step_index] *
184  ff_adpcm_yamaha_difflookup[nibble]) / 8);
185  c->prev_sample = av_clip_int16(c->prev_sample);
186  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
187  return nibble;
188 }
189 
191  int16_t sample)
192 {
193  int delta = sample - c->prev_sample;
194  int diff, step = ff_adpcm_step_table[c->step_index];
195  int nibble = 8*(delta < 0);
196 
197  delta= abs(delta);
198  diff = delta + (step >> 3);
199 
200  if (delta >= step) {
201  nibble |= 4;
202  delta -= step;
203  }
204  step >>= 1;
205  if (delta >= step) {
206  nibble |= 2;
207  delta -= step;
208  }
209  step >>= 1;
210  if (delta >= step) {
211  nibble |= 1;
212  delta -= step;
213  }
214  diff -= delta;
215 
216  if (nibble & 8)
217  c->prev_sample -= diff;
218  else
219  c->prev_sample += diff;
220 
221  c->prev_sample = av_clip_int16(c->prev_sample);
222  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
223 
224  return nibble;
225 }
226 
228  int16_t sample)
229 {
230  int predictor, nibble, bias;
231 
232  predictor = (((c->sample1) * (c->coeff1)) +
233  (( c->sample2) * (c->coeff2))) / 64;
234 
235  nibble = sample - predictor;
236  if (nibble >= 0)
237  bias = c->idelta / 2;
238  else
239  bias = -c->idelta / 2;
240 
241  nibble = (nibble + bias) / c->idelta;
242  nibble = av_clip_intp2(nibble, 3) & 0x0F;
243 
244  predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
245 
246  c->sample2 = c->sample1;
247  c->sample1 = av_clip_int16(predictor);
248 
249  c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8;
250  if (c->idelta < 16)
251  c->idelta = 16;
252 
253  return nibble;
254 }
255 
257  int16_t sample)
258 {
259  int nibble, delta;
260 
261  if (!c->step) {
262  c->predictor = 0;
263  c->step = 127;
264  }
265 
266  delta = sample - c->predictor;
267 
268  nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
269 
270  c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
271  c->predictor = av_clip_int16(c->predictor);
272  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
273  c->step = av_clip(c->step, 127, 24576);
274 
275  return nibble;
276 }
277 
279  const int16_t *samples, uint8_t *dst,
280  ADPCMChannelStatus *c, int n, int stride)
281 {
282  //FIXME 6% faster if frontier is a compile-time constant
283  ADPCMEncodeContext *s = avctx->priv_data;
284  const int frontier = 1 << avctx->trellis;
285  const int version = avctx->codec->id;
286  TrellisPath *paths = s->paths, *p;
287  TrellisNode *node_buf = s->node_buf;
288  TrellisNode **nodep_buf = s->nodep_buf;
289  TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
290  TrellisNode **nodes_next = nodep_buf + frontier;
291  int pathn = 0, froze = -1, i, j, k, generation = 0;
292  uint8_t *hash = s->trellis_hash;
293  memset(hash, 0xff, 65536 * sizeof(*hash));
294 
295  memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
296  nodes[0] = node_buf + frontier;
297  nodes[0]->ssd = 0;
298  nodes[0]->path = 0;
299  nodes[0]->step = c->step_index;
300  nodes[0]->sample1 = c->sample1;
301  nodes[0]->sample2 = c->sample2;
305  nodes[0]->sample1 = c->prev_sample;
307  nodes[0]->step = c->idelta;
309  if (c->step == 0) {
310  nodes[0]->step = 127;
311  nodes[0]->sample1 = 0;
312  } else {
313  nodes[0]->step = c->step;
314  nodes[0]->sample1 = c->predictor;
315  }
316  }
317 
318  for (i = 0; i < n; i++) {
319  TrellisNode *t = node_buf + frontier*(i&1);
320  TrellisNode **u;
321  int sample = samples[i * stride];
322  int heap_pos = 0;
323  memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
324  for (j = 0; j < frontier && nodes[j]; j++) {
325  // higher j have higher ssd already, so they're likely
326  // to yield a suboptimal next sample too
327  const int range = (j < frontier / 2) ? 1 : 0;
328  const int step = nodes[j]->step;
329  int nidx;
330  if (version == AV_CODEC_ID_ADPCM_MS) {
331  const int predictor = ((nodes[j]->sample1 * c->coeff1) +
332  (nodes[j]->sample2 * c->coeff2)) / 64;
333  const int div = (sample - predictor) / step;
334  const int nmin = av_clip(div-range, -8, 6);
335  const int nmax = av_clip(div+range, -7, 7);
336  for (nidx = nmin; nidx <= nmax; nidx++) {
337  const int nibble = nidx & 0xf;
338  int dec_sample = predictor + nidx * step;
339 #define STORE_NODE(NAME, STEP_INDEX)\
340  int d;\
341  uint32_t ssd;\
342  int pos;\
343  TrellisNode *u;\
344  uint8_t *h;\
345  dec_sample = av_clip_int16(dec_sample);\
346  d = sample - dec_sample;\
347  ssd = nodes[j]->ssd + d*(unsigned)d;\
348  /* Check for wraparound, skip such samples completely. \
349  * Note, changing ssd to a 64 bit variable would be \
350  * simpler, avoiding this check, but it's slower on \
351  * x86 32 bit at the moment. */\
352  if (ssd < nodes[j]->ssd)\
353  goto next_##NAME;\
354  /* Collapse any two states with the same previous sample value. \
355  * One could also distinguish states by step and by 2nd to last
356  * sample, but the effects of that are negligible.
357  * Since nodes in the previous generation are iterated
358  * through a heap, they're roughly ordered from better to
359  * worse, but not strictly ordered. Therefore, an earlier
360  * node with the same sample value is better in most cases
361  * (and thus the current is skipped), but not strictly
362  * in all cases. Only skipping samples where ssd >=
363  * ssd of the earlier node with the same sample gives
364  * slightly worse quality, though, for some reason. */ \
365  h = &hash[(uint16_t) dec_sample];\
366  if (*h == generation)\
367  goto next_##NAME;\
368  if (heap_pos < frontier) {\
369  pos = heap_pos++;\
370  } else {\
371  /* Try to replace one of the leaf nodes with the new \
372  * one, but try a different slot each time. */\
373  pos = (frontier >> 1) +\
374  (heap_pos & ((frontier >> 1) - 1));\
375  if (ssd > nodes_next[pos]->ssd)\
376  goto next_##NAME;\
377  heap_pos++;\
378  }\
379  *h = generation;\
380  u = nodes_next[pos];\
381  if (!u) {\
382  av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
383  u = t++;\
384  nodes_next[pos] = u;\
385  u->path = pathn++;\
386  }\
387  u->ssd = ssd;\
388  u->step = STEP_INDEX;\
389  u->sample2 = nodes[j]->sample1;\
390  u->sample1 = dec_sample;\
391  paths[u->path].nibble = nibble;\
392  paths[u->path].prev = nodes[j]->path;\
393  /* Sift the newly inserted node up in the heap to \
394  * restore the heap property. */\
395  while (pos > 0) {\
396  int parent = (pos - 1) >> 1;\
397  if (nodes_next[parent]->ssd <= ssd)\
398  break;\
399  FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
400  pos = parent;\
401  }\
402  next_##NAME:;
403  STORE_NODE(ms, FFMAX(16,
404  (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
405  }
406  } else if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
409 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
410  const int predictor = nodes[j]->sample1;\
411  const int div = (sample - predictor) * 4 / STEP_TABLE;\
412  int nmin = av_clip(div - range, -7, 6);\
413  int nmax = av_clip(div + range, -6, 7);\
414  if (nmin <= 0)\
415  nmin--; /* distinguish -0 from +0 */\
416  if (nmax < 0)\
417  nmax--;\
418  for (nidx = nmin; nidx <= nmax; nidx++) {\
419  const int nibble = nidx < 0 ? 7 - nidx : nidx;\
420  int dec_sample = predictor +\
421  (STEP_TABLE *\
422  ff_adpcm_yamaha_difflookup[nibble]) / 8;\
423  STORE_NODE(NAME, STEP_INDEX);\
424  }
426  av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
427  } else { //AV_CODEC_ID_ADPCM_YAMAHA
428  LOOP_NODES(yamaha, step,
429  av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
430  127, 24576));
431 #undef LOOP_NODES
432 #undef STORE_NODE
433  }
434  }
435 
436  u = nodes;
437  nodes = nodes_next;
438  nodes_next = u;
439 
440  generation++;
441  if (generation == 255) {
442  memset(hash, 0xff, 65536 * sizeof(*hash));
443  generation = 0;
444  }
445 
446  // prevent overflow
447  if (nodes[0]->ssd > (1 << 28)) {
448  for (j = 1; j < frontier && nodes[j]; j++)
449  nodes[j]->ssd -= nodes[0]->ssd;
450  nodes[0]->ssd = 0;
451  }
452 
453  // merge old paths to save memory
454  if (i == froze + FREEZE_INTERVAL) {
455  p = &paths[nodes[0]->path];
456  for (k = i; k > froze; k--) {
457  dst[k] = p->nibble;
458  p = &paths[p->prev];
459  }
460  froze = i;
461  pathn = 0;
462  // other nodes might use paths that don't coincide with the frozen one.
463  // checking which nodes do so is too slow, so just kill them all.
464  // this also slightly improves quality, but I don't know why.
465  memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
466  }
467  }
468 
469  p = &paths[nodes[0]->path];
470  for (i = n - 1; i > froze; i--) {
471  dst[i] = p->nibble;
472  p = &paths[p->prev];
473  }
474 
475  c->predictor = nodes[0]->sample1;
476  c->sample1 = nodes[0]->sample1;
477  c->sample2 = nodes[0]->sample2;
478  c->step_index = nodes[0]->step;
479  c->step = nodes[0]->step;
480  c->idelta = nodes[0]->step;
481 }
482 
483 static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
484  const AVFrame *frame, int *got_packet_ptr)
485 {
486  int n, i, ch, st, pkt_size, ret;
487  const int16_t *samples;
488  int16_t **samples_p;
489  uint8_t *dst;
490  ADPCMEncodeContext *c = avctx->priv_data;
491  uint8_t *buf;
492 
493  samples = (const int16_t *)frame->data[0];
494  samples_p = (int16_t **)frame->extended_data;
495  st = avctx->channels == 2;
496 
497  if (avctx->codec_id == AV_CODEC_ID_ADPCM_SWF)
498  pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8;
499  else if (avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_SSI)
500  pkt_size = (frame->nb_samples * avctx->channels) / 2;
501  else
502  pkt_size = avctx->block_align;
503  if ((ret = ff_alloc_packet2(avctx, avpkt, pkt_size, 0)) < 0)
504  return ret;
505  dst = avpkt->data;
506 
507  switch(avctx->codec->id) {
509  {
510  int blocks, j;
511 
512  blocks = (frame->nb_samples - 1) / 8;
513 
514  for (ch = 0; ch < avctx->channels; ch++) {
515  ADPCMChannelStatus *status = &c->status[ch];
516  status->prev_sample = samples_p[ch][0];
517  /* status->step_index = 0;
518  XXX: not sure how to init the state machine */
519  bytestream_put_le16(&dst, status->prev_sample);
520  *dst++ = status->step_index;
521  *dst++ = 0; /* unknown */
522  }
523 
524  /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
525  if (avctx->trellis > 0) {
526  FF_ALLOC_ARRAY_OR_GOTO(avctx, buf, avctx->channels, blocks * 8, error);
527  for (ch = 0; ch < avctx->channels; ch++) {
528  adpcm_compress_trellis(avctx, &samples_p[ch][1],
529  buf + ch * blocks * 8, &c->status[ch],
530  blocks * 8, 1);
531  }
532  for (i = 0; i < blocks; i++) {
533  for (ch = 0; ch < avctx->channels; ch++) {
534  uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
535  for (j = 0; j < 8; j += 2)
536  *dst++ = buf1[j] | (buf1[j + 1] << 4);
537  }
538  }
539  av_free(buf);
540  } else {
541  for (i = 0; i < blocks; i++) {
542  for (ch = 0; ch < avctx->channels; ch++) {
543  ADPCMChannelStatus *status = &c->status[ch];
544  const int16_t *smp = &samples_p[ch][1 + i * 8];
545  for (j = 0; j < 8; j += 2) {
547  v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4;
548  *dst++ = v;
549  }
550  }
551  }
552  }
553  break;
554  }
556  {
557  PutBitContext pb;
558  init_put_bits(&pb, dst, pkt_size);
559 
560  for (ch = 0; ch < avctx->channels; ch++) {
561  ADPCMChannelStatus *status = &c->status[ch];
562  put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7);
563  put_bits(&pb, 7, status->step_index);
564  if (avctx->trellis > 0) {
565  uint8_t buf[64];
566  adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
567  64, 1);
568  for (i = 0; i < 64; i++)
569  put_bits(&pb, 4, buf[i ^ 1]);
570  status->prev_sample = status->predictor;
571  } else {
572  for (i = 0; i < 64; i += 2) {
573  int t1, t2;
574  t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]);
575  t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]);
576  put_bits(&pb, 4, t2);
577  put_bits(&pb, 4, t1);
578  }
579  }
580  }
581 
582  flush_put_bits(&pb);
583  break;
584  }
586  {
587  PutBitContext pb;
588  init_put_bits(&pb, dst, pkt_size);
589 
590  av_assert0(avctx->trellis == 0);
591 
592  for (i = 0; i < frame->nb_samples; i++) {
593  for (ch = 0; ch < avctx->channels; ch++) {
594  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
595  }
596  }
597 
598  flush_put_bits(&pb);
599  break;
600  }
602  {
603  PutBitContext pb;
604  init_put_bits(&pb, dst, pkt_size);
605 
606  n = frame->nb_samples - 1;
607 
608  // store AdpcmCodeSize
609  put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
610 
611  // init the encoder state
612  for (i = 0; i < avctx->channels; i++) {
613  // clip step so it fits 6 bits
614  c->status[i].step_index = av_clip_uintp2(c->status[i].step_index, 6);
615  put_sbits(&pb, 16, samples[i]);
616  put_bits(&pb, 6, c->status[i].step_index);
617  c->status[i].prev_sample = samples[i];
618  }
619 
620  if (avctx->trellis > 0) {
621  FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
622  adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
623  &c->status[0], n, avctx->channels);
624  if (avctx->channels == 2)
625  adpcm_compress_trellis(avctx, samples + avctx->channels + 1,
626  buf + n, &c->status[1], n,
627  avctx->channels);
628  for (i = 0; i < n; i++) {
629  put_bits(&pb, 4, buf[i]);
630  if (avctx->channels == 2)
631  put_bits(&pb, 4, buf[n + i]);
632  }
633  av_free(buf);
634  } else {
635  for (i = 1; i < frame->nb_samples; i++) {
636  put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
637  samples[avctx->channels * i]));
638  if (avctx->channels == 2)
639  put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1],
640  samples[2 * i + 1]));
641  }
642  }
643  flush_put_bits(&pb);
644  break;
645  }
647  for (i = 0; i < avctx->channels; i++) {
648  int predictor = 0;
649  *dst++ = predictor;
650  c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
651  c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
652  }
653  for (i = 0; i < avctx->channels; i++) {
654  if (c->status[i].idelta < 16)
655  c->status[i].idelta = 16;
656  bytestream_put_le16(&dst, c->status[i].idelta);
657  }
658  for (i = 0; i < avctx->channels; i++)
659  c->status[i].sample2= *samples++;
660  for (i = 0; i < avctx->channels; i++) {
661  c->status[i].sample1 = *samples++;
662  bytestream_put_le16(&dst, c->status[i].sample1);
663  }
664  for (i = 0; i < avctx->channels; i++)
665  bytestream_put_le16(&dst, c->status[i].sample2);
666 
667  if (avctx->trellis > 0) {
668  n = avctx->block_align - 7 * avctx->channels;
669  FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
670  if (avctx->channels == 1) {
671  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
672  avctx->channels);
673  for (i = 0; i < n; i += 2)
674  *dst++ = (buf[i] << 4) | buf[i + 1];
675  } else {
676  adpcm_compress_trellis(avctx, samples, buf,
677  &c->status[0], n, avctx->channels);
678  adpcm_compress_trellis(avctx, samples + 1, buf + n,
679  &c->status[1], n, avctx->channels);
680  for (i = 0; i < n; i++)
681  *dst++ = (buf[i] << 4) | buf[n + i];
682  }
683  av_free(buf);
684  } else {
685  for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
686  int nibble;
687  nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
688  nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
689  *dst++ = nibble;
690  }
691  }
692  break;
694  n = frame->nb_samples / 2;
695  if (avctx->trellis > 0) {
696  FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
697  n *= 2;
698  if (avctx->channels == 1) {
699  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
700  avctx->channels);
701  for (i = 0; i < n; i += 2)
702  *dst++ = buf[i] | (buf[i + 1] << 4);
703  } else {
704  adpcm_compress_trellis(avctx, samples, buf,
705  &c->status[0], n, avctx->channels);
706  adpcm_compress_trellis(avctx, samples + 1, buf + n,
707  &c->status[1], n, avctx->channels);
708  for (i = 0; i < n; i++)
709  *dst++ = buf[i] | (buf[n + i] << 4);
710  }
711  av_free(buf);
712  } else
713  for (n *= avctx->channels; n > 0; n--) {
714  int nibble;
715  nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
716  nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
717  *dst++ = nibble;
718  }
719  break;
720  default:
721  return AVERROR(EINVAL);
722  }
723 
724  avpkt->size = pkt_size;
725  *got_packet_ptr = 1;
726  return 0;
727 error:
728  return AVERROR(ENOMEM);
729 }
730 
731 static const enum AVSampleFormat sample_fmts[] = {
733 };
734 
735 static const enum AVSampleFormat sample_fmts_p[] = {
737 };
738 
739 #define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_) \
740 AVCodec ff_ ## name_ ## _encoder = { \
741  .name = #name_, \
742  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
743  .type = AVMEDIA_TYPE_AUDIO, \
744  .id = id_, \
745  .priv_data_size = sizeof(ADPCMEncodeContext), \
746  .init = adpcm_encode_init, \
747  .encode2 = adpcm_encode_frame, \
748  .close = adpcm_encode_close, \
749  .sample_fmts = sample_fmts_, \
750  .capabilities = capabilities_, \
751  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
752 }
753 
754 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, sample_fmts_p, 0, "ADPCM IMA QuickTime");
755 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_SSI, adpcm_ima_ssi, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Simon & Schuster Interactive");
756 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, sample_fmts_p, 0, "ADPCM IMA WAV");
757 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, sample_fmts, 0, "ADPCM Microsoft");
758 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, sample_fmts, 0, "ADPCM Shockwave Flash");
759 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, sample_fmts, 0, "ADPCM Yamaha");
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:29
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1206
adpcm_yamaha_compress_sample
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:256
stride
int stride
Definition: mace.c:144
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:346
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:340
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
TrellisNode::sample1
int sample1
Definition: adpcmenc.c:46
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
LOOP_NODES
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1186
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:716
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
TrellisNode::path
int path
Definition: adpcmenc.c:45
put_sbits
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:240
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
TrellisNode::sample2
int sample2
Definition: adpcmenc.c:47
TrellisNode::step
int step
Definition: adpcmenc.c:48
t1
#define t1
Definition: regdef.h:29
ADPCM_ENCODER
#define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_)
Definition: adpcmenc.c:724
hash
uint8_t hash[HASH_SIZE]
Definition: movenc.c:57
ADPCMEncodeContext::nodep_buf
TrellisNode ** nodep_buf
Definition: adpcmenc.c:55
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
U
#define U(x)
Definition: vp56_arith.h:37
STORE_NODE
#define STORE_NODE(NAME, STEP_INDEX)
TrellisNode
Definition: adpcmenc.c:43
ADPCMEncodeContext::status
ADPCMChannelStatus status[6]
Definition: adpcmenc.c:52
ADPCMEncodeContext::paths
TrellisPath * paths
Definition: adpcmenc.c:53
ADPCMEncodeContext::node_buf
TrellisNode * node_buf
Definition: adpcmenc.c:54
TrellisNode::ssd
uint32_t ssd
Definition: adpcmenc.c:44
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
TrellisPath::nibble
int nibble
Definition: adpcmenc.c:39
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
PutBitContext
Definition: put_bits.h:35
adpcm_compress_trellis
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
Definition: adpcmenc.c:278
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
if
if(ret)
Definition: filter_design.txt:179
TrellisPath
Definition: aaccoder.c:188
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:354
abs
#define abs(x)
Definition: cuda_runtime.h:35
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1475
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
adpcm.h
av_get_bits_per_sample
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
Definition: utils.c:1574
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
AVPacket::size
int size
Definition: packet.h:356
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:353
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
version
version
Definition: libkvazaar.c:292
FREEZE_INTERVAL
#define FREEZE_INTERVAL
Definition: adpcmenc.c:59
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:1187
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1750
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
BLKSIZE
#define BLKSIZE
Definition: adpcm.h:31
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
ADPCMEncodeContext::trellis_hash
uint8_t * trellis_hash
Definition: adpcmenc.c:56
delta
float delta
Definition: vorbis_enc_data.h:457
adpcm_ima_compress_sample
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:177
uint8_t
uint8_t
Definition: audio_convert.c:194
TrellisPath::prev
int prev
Definition: aaccoder.c:190
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
adpcm_encode_frame
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: adpcmenc.c:468
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1223
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ADPCMEncodeContext
Definition: adpcmenc.c:51
sample_fmts_p
static enum AVSampleFormat sample_fmts_p[]
Definition: adpcmenc.c:720
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
AVCodecContext
main external API structure.
Definition: avcodec.h:526
t2
#define t2
Definition: regdef.h:30
ima
#define ima
Definition: vf_colormatrix.c:110
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
FF_ALLOC_ARRAY_OR_GOTO
#define FF_ALLOC_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
Definition: internal.h:158
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
FF_ALLOC_OR_GOTO
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:140
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:384
adpcm_encode_init
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
Definition: adpcmenc.c:63
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:341
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
adpcm_ima_qt_compress_sample
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:190
bytestream.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:80
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
put_bits.h
ADPCMChannelStatus
Definition: adpcm.h:33
adpcm_ms_compress_sample
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:227
adpcm_encode_close
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
Definition: adpcmenc.c:165