FFmpeg
adpcmenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "avcodec.h"
26 #include "put_bits.h"
27 #include "bytestream.h"
28 #include "adpcm.h"
29 #include "adpcm_data.h"
30 #include "internal.h"
31 
32 /**
33  * @file
34  * ADPCM encoders
35  * See ADPCM decoder reference documents for codec information.
36  */
37 
38 typedef struct TrellisPath {
39  int nibble;
40  int prev;
41 } TrellisPath;
42 
43 typedef struct TrellisNode {
44  uint32_t ssd;
45  int path;
46  int sample1;
47  int sample2;
48  int step;
49 } TrellisNode;
50 
51 typedef struct ADPCMEncodeContext {
58 
59 #define FREEZE_INTERVAL 128
60 
62 {
63  ADPCMEncodeContext *s = avctx->priv_data;
64  uint8_t *extradata;
65  int i;
66 
67  if (avctx->channels > 2) {
68  av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
69  return AVERROR(EINVAL);
70  }
71 
72  if (avctx->trellis) {
73  int frontier, max_paths;
74 
75  if ((unsigned)avctx->trellis > 16U) {
76  av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
77  return AVERROR(EINVAL);
78  }
79 
80  if (avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_SSI ||
81  avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_APM ||
82  avctx->codec->id == AV_CODEC_ID_ADPCM_ARGO) {
83  /*
84  * The current trellis implementation doesn't work for extended
85  * runs of samples without periodic resets. Disallow it.
86  */
87  av_log(avctx, AV_LOG_ERROR, "trellis not supported\n");
88  return AVERROR_PATCHWELCOME;
89  }
90 
91  frontier = 1 << avctx->trellis;
92  max_paths = frontier * FREEZE_INTERVAL;
93  if (!FF_ALLOC_TYPED_ARRAY(s->paths, max_paths) ||
94  !FF_ALLOC_TYPED_ARRAY(s->node_buf, 2 * frontier) ||
95  !FF_ALLOC_TYPED_ARRAY(s->nodep_buf, 2 * frontier) ||
97  return AVERROR(ENOMEM);
98  }
99 
101 
102  switch (avctx->codec->id) {
104  /* each 16 bits sample gives one nibble
105  and we have 4 bytes per channel overhead */
106  avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
107  (4 * avctx->channels) + 1;
108  /* seems frame_size isn't taken into account...
109  have to buffer the samples :-( */
110  avctx->block_align = BLKSIZE;
111  avctx->bits_per_coded_sample = 4;
112  break;
114  avctx->frame_size = 64;
115  avctx->block_align = 34 * avctx->channels;
116  break;
118  /* each 16 bits sample gives one nibble
119  and we have 7 bytes per channel overhead */
120  avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
121  avctx->bits_per_coded_sample = 4;
122  avctx->block_align = BLKSIZE;
124  return AVERROR(ENOMEM);
125  avctx->extradata_size = 32;
126  extradata = avctx->extradata;
127  bytestream_put_le16(&extradata, avctx->frame_size);
128  bytestream_put_le16(&extradata, 7); /* wNumCoef */
129  for (i = 0; i < 7; i++) {
130  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
131  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
132  }
133  break;
135  avctx->frame_size = BLKSIZE * 2 / avctx->channels;
136  avctx->block_align = BLKSIZE;
137  break;
139  if (avctx->sample_rate != 11025 &&
140  avctx->sample_rate != 22050 &&
141  avctx->sample_rate != 44100) {
142  av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
143  "22050 or 44100\n");
144  return AVERROR(EINVAL);
145  }
146  avctx->frame_size = 512 * (avctx->sample_rate / 11025);
147  break;
149  avctx->frame_size = BLKSIZE * 2 / avctx->channels;
150  avctx->block_align = BLKSIZE;
151  break;
153  avctx->frame_size = BLKSIZE * 2 / avctx->channels;
154  avctx->block_align = BLKSIZE;
155 
156  if (!(avctx->extradata = av_mallocz(28 + AV_INPUT_BUFFER_PADDING_SIZE)))
157  return AVERROR(ENOMEM);
158  avctx->extradata_size = 28;
159  break;
161  avctx->frame_size = 32;
162  avctx->block_align = 17 * avctx->channels;
163  break;
164  default:
165  return AVERROR(EINVAL);
166  }
167 
168  return 0;
169 }
170 
172 {
173  ADPCMEncodeContext *s = avctx->priv_data;
174  av_freep(&s->paths);
175  av_freep(&s->node_buf);
176  av_freep(&s->nodep_buf);
177  av_freep(&s->trellis_hash);
178 
179  return 0;
180 }
181 
182 
184  int16_t sample)
185 {
186  int delta = sample - c->prev_sample;
187  int nibble = FFMIN(7, abs(delta) * 4 /
188  ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
191  c->prev_sample = av_clip_int16(c->prev_sample);
192  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
193  return nibble;
194 }
195 
197  int16_t sample)
198 {
199  int delta = sample - c->prev_sample;
201  int nibble = 8*(delta < 0);
202 
203  delta= abs(delta);
204  diff = delta + (step >> 3);
205 
206  if (delta >= step) {
207  nibble |= 4;
208  delta -= step;
209  }
210  step >>= 1;
211  if (delta >= step) {
212  nibble |= 2;
213  delta -= step;
214  }
215  step >>= 1;
216  if (delta >= step) {
217  nibble |= 1;
218  delta -= step;
219  }
220  diff -= delta;
221 
222  if (nibble & 8)
223  c->prev_sample -= diff;
224  else
225  c->prev_sample += diff;
226 
227  c->prev_sample = av_clip_int16(c->prev_sample);
228  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
229 
230  return nibble;
231 }
232 
234  int16_t sample)
235 {
236  int predictor, nibble, bias;
237 
238  predictor = (((c->sample1) * (c->coeff1)) +
239  (( c->sample2) * (c->coeff2))) / 64;
240 
241  nibble = sample - predictor;
242  if (nibble >= 0)
243  bias = c->idelta / 2;
244  else
245  bias = -c->idelta / 2;
246 
247  nibble = (nibble + bias) / c->idelta;
248  nibble = av_clip_intp2(nibble, 3) & 0x0F;
249 
250  predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
251 
252  c->sample2 = c->sample1;
253  c->sample1 = av_clip_int16(predictor);
254 
255  c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8;
256  if (c->idelta < 16)
257  c->idelta = 16;
258 
259  return nibble;
260 }
261 
263  int16_t sample)
264 {
265  int nibble, delta;
266 
267  if (!c->step) {
268  c->predictor = 0;
269  c->step = 127;
270  }
271 
272  delta = sample - c->predictor;
273 
274  nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
275 
276  c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
277  c->predictor = av_clip_int16(c->predictor);
278  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
279  c->step = av_clip(c->step, 127, 24576);
280 
281  return nibble;
282 }
283 
285  const int16_t *samples, uint8_t *dst,
286  ADPCMChannelStatus *c, int n, int stride)
287 {
288  //FIXME 6% faster if frontier is a compile-time constant
289  ADPCMEncodeContext *s = avctx->priv_data;
290  const int frontier = 1 << avctx->trellis;
291  const int version = avctx->codec->id;
292  TrellisPath *paths = s->paths, *p;
293  TrellisNode *node_buf = s->node_buf;
294  TrellisNode **nodep_buf = s->nodep_buf;
295  TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
296  TrellisNode **nodes_next = nodep_buf + frontier;
297  int pathn = 0, froze = -1, i, j, k, generation = 0;
298  uint8_t *hash = s->trellis_hash;
299  memset(hash, 0xff, 65536 * sizeof(*hash));
300 
301  memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
302  nodes[0] = node_buf + frontier;
303  nodes[0]->ssd = 0;
304  nodes[0]->path = 0;
305  nodes[0]->step = c->step_index;
306  nodes[0]->sample1 = c->sample1;
307  nodes[0]->sample2 = c->sample2;
308  if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
309  version == AV_CODEC_ID_ADPCM_IMA_QT ||
310  version == AV_CODEC_ID_ADPCM_SWF)
311  nodes[0]->sample1 = c->prev_sample;
312  if (version == AV_CODEC_ID_ADPCM_MS)
313  nodes[0]->step = c->idelta;
314  if (version == AV_CODEC_ID_ADPCM_YAMAHA) {
315  if (c->step == 0) {
316  nodes[0]->step = 127;
317  nodes[0]->sample1 = 0;
318  } else {
319  nodes[0]->step = c->step;
320  nodes[0]->sample1 = c->predictor;
321  }
322  }
323 
324  for (i = 0; i < n; i++) {
325  TrellisNode *t = node_buf + frontier*(i&1);
326  TrellisNode **u;
327  int sample = samples[i * stride];
328  int heap_pos = 0;
329  memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
330  for (j = 0; j < frontier && nodes[j]; j++) {
331  // higher j have higher ssd already, so they're likely
332  // to yield a suboptimal next sample too
333  const int range = (j < frontier / 2) ? 1 : 0;
334  const int step = nodes[j]->step;
335  int nidx;
336  if (version == AV_CODEC_ID_ADPCM_MS) {
337  const int predictor = ((nodes[j]->sample1 * c->coeff1) +
338  (nodes[j]->sample2 * c->coeff2)) / 64;
339  const int div = (sample - predictor) / step;
340  const int nmin = av_clip(div-range, -8, 6);
341  const int nmax = av_clip(div+range, -7, 7);
342  for (nidx = nmin; nidx <= nmax; nidx++) {
343  const int nibble = nidx & 0xf;
344  int dec_sample = predictor + nidx * step;
345 #define STORE_NODE(NAME, STEP_INDEX)\
346  int d;\
347  uint32_t ssd;\
348  int pos;\
349  TrellisNode *u;\
350  uint8_t *h;\
351  dec_sample = av_clip_int16(dec_sample);\
352  d = sample - dec_sample;\
353  ssd = nodes[j]->ssd + d*(unsigned)d;\
354  /* Check for wraparound, skip such samples completely. \
355  * Note, changing ssd to a 64 bit variable would be \
356  * simpler, avoiding this check, but it's slower on \
357  * x86 32 bit at the moment. */\
358  if (ssd < nodes[j]->ssd)\
359  goto next_##NAME;\
360  /* Collapse any two states with the same previous sample value. \
361  * One could also distinguish states by step and by 2nd to last
362  * sample, but the effects of that are negligible.
363  * Since nodes in the previous generation are iterated
364  * through a heap, they're roughly ordered from better to
365  * worse, but not strictly ordered. Therefore, an earlier
366  * node with the same sample value is better in most cases
367  * (and thus the current is skipped), but not strictly
368  * in all cases. Only skipping samples where ssd >=
369  * ssd of the earlier node with the same sample gives
370  * slightly worse quality, though, for some reason. */ \
371  h = &hash[(uint16_t) dec_sample];\
372  if (*h == generation)\
373  goto next_##NAME;\
374  if (heap_pos < frontier) {\
375  pos = heap_pos++;\
376  } else {\
377  /* Try to replace one of the leaf nodes with the new \
378  * one, but try a different slot each time. */\
379  pos = (frontier >> 1) +\
380  (heap_pos & ((frontier >> 1) - 1));\
381  if (ssd > nodes_next[pos]->ssd)\
382  goto next_##NAME;\
383  heap_pos++;\
384  }\
385  *h = generation;\
386  u = nodes_next[pos];\
387  if (!u) {\
388  av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
389  u = t++;\
390  nodes_next[pos] = u;\
391  u->path = pathn++;\
392  }\
393  u->ssd = ssd;\
394  u->step = STEP_INDEX;\
395  u->sample2 = nodes[j]->sample1;\
396  u->sample1 = dec_sample;\
397  paths[u->path].nibble = nibble;\
398  paths[u->path].prev = nodes[j]->path;\
399  /* Sift the newly inserted node up in the heap to \
400  * restore the heap property. */\
401  while (pos > 0) {\
402  int parent = (pos - 1) >> 1;\
403  if (nodes_next[parent]->ssd <= ssd)\
404  break;\
405  FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
406  pos = parent;\
407  }\
408  next_##NAME:;
409  STORE_NODE(ms, FFMAX(16,
410  (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
411  }
412  } else if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
413  version == AV_CODEC_ID_ADPCM_IMA_QT ||
414  version == AV_CODEC_ID_ADPCM_SWF) {
415 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
416  const int predictor = nodes[j]->sample1;\
417  const int div = (sample - predictor) * 4 / STEP_TABLE;\
418  int nmin = av_clip(div - range, -7, 6);\
419  int nmax = av_clip(div + range, -6, 7);\
420  if (nmin <= 0)\
421  nmin--; /* distinguish -0 from +0 */\
422  if (nmax < 0)\
423  nmax--;\
424  for (nidx = nmin; nidx <= nmax; nidx++) {\
425  const int nibble = nidx < 0 ? 7 - nidx : nidx;\
426  int dec_sample = predictor +\
427  (STEP_TABLE *\
428  ff_adpcm_yamaha_difflookup[nibble]) / 8;\
429  STORE_NODE(NAME, STEP_INDEX);\
430  }
432  av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
433  } else { //AV_CODEC_ID_ADPCM_YAMAHA
434  LOOP_NODES(yamaha, step,
435  av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
436  127, 24576));
437 #undef LOOP_NODES
438 #undef STORE_NODE
439  }
440  }
441 
442  u = nodes;
443  nodes = nodes_next;
444  nodes_next = u;
445 
446  generation++;
447  if (generation == 255) {
448  memset(hash, 0xff, 65536 * sizeof(*hash));
449  generation = 0;
450  }
451 
452  // prevent overflow
453  if (nodes[0]->ssd > (1 << 28)) {
454  for (j = 1; j < frontier && nodes[j]; j++)
455  nodes[j]->ssd -= nodes[0]->ssd;
456  nodes[0]->ssd = 0;
457  }
458 
459  // merge old paths to save memory
460  if (i == froze + FREEZE_INTERVAL) {
461  p = &paths[nodes[0]->path];
462  for (k = i; k > froze; k--) {
463  dst[k] = p->nibble;
464  p = &paths[p->prev];
465  }
466  froze = i;
467  pathn = 0;
468  // other nodes might use paths that don't coincide with the frozen one.
469  // checking which nodes do so is too slow, so just kill them all.
470  // this also slightly improves quality, but I don't know why.
471  memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
472  }
473  }
474 
475  p = &paths[nodes[0]->path];
476  for (i = n - 1; i > froze; i--) {
477  dst[i] = p->nibble;
478  p = &paths[p->prev];
479  }
480 
481  c->predictor = nodes[0]->sample1;
482  c->sample1 = nodes[0]->sample1;
483  c->sample2 = nodes[0]->sample2;
484  c->step_index = nodes[0]->step;
485  c->step = nodes[0]->step;
486  c->idelta = nodes[0]->step;
487 }
488 
489 static inline int adpcm_argo_compress_nibble(const ADPCMChannelStatus *cs, int16_t s,
490  int shift, int flag)
491 {
492  int nibble;
493 
494  if (flag)
495  nibble = 4 * s - 8 * cs->sample1 + 4 * cs->sample2;
496  else
497  nibble = 4 * s - 4 * cs->sample1;
498 
499  return (nibble >> shift) & 0x0F;
500 }
501 
503  const int16_t *samples, int nsamples,
504  int shift, int flag)
505 {
506  int64_t error = 0;
507 
508  if (pb) {
509  put_bits(pb, 4, shift - 2);
510  put_bits(pb, 1, 0);
511  put_bits(pb, 1, !!flag);
512  put_bits(pb, 2, 0);
513  }
514 
515  for (int n = 0; n < nsamples; n++) {
516  /* Compress the nibble, then expand it to see how much precision we've lost. */
517  int nibble = adpcm_argo_compress_nibble(cs, samples[n], shift, flag);
518  int16_t sample = ff_adpcm_argo_expand_nibble(cs, nibble, shift, flag);
519 
520  error += abs(samples[n] - sample);
521 
522  if (pb)
523  put_bits(pb, 4, nibble);
524  }
525 
526  return error;
527 }
528 
529 static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
530  const AVFrame *frame, int *got_packet_ptr)
531 {
532  int n, i, ch, st, pkt_size, ret;
533  const int16_t *samples;
534  int16_t **samples_p;
535  uint8_t *dst;
536  ADPCMEncodeContext *c = avctx->priv_data;
537  uint8_t *buf;
538 
539  samples = (const int16_t *)frame->data[0];
540  samples_p = (int16_t **)frame->extended_data;
541  st = avctx->channels == 2;
542 
543  if (avctx->codec_id == AV_CODEC_ID_ADPCM_SWF)
544  pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8;
545  else if (avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_SSI ||
547  pkt_size = (frame->nb_samples * avctx->channels) / 2;
548  else
549  pkt_size = avctx->block_align;
550  if ((ret = ff_alloc_packet2(avctx, avpkt, pkt_size, 0)) < 0)
551  return ret;
552  dst = avpkt->data;
553 
554  switch(avctx->codec->id) {
556  {
557  int blocks, j;
558 
559  blocks = (frame->nb_samples - 1) / 8;
560 
561  for (ch = 0; ch < avctx->channels; ch++) {
563  status->prev_sample = samples_p[ch][0];
564  /* status->step_index = 0;
565  XXX: not sure how to init the state machine */
566  bytestream_put_le16(&dst, status->prev_sample);
567  *dst++ = status->step_index;
568  *dst++ = 0; /* unknown */
569  }
570 
571  /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
572  if (avctx->trellis > 0) {
573  if (!FF_ALLOC_TYPED_ARRAY(buf, avctx->channels * blocks * 8))
574  return AVERROR(ENOMEM);
575  for (ch = 0; ch < avctx->channels; ch++) {
576  adpcm_compress_trellis(avctx, &samples_p[ch][1],
577  buf + ch * blocks * 8, &c->status[ch],
578  blocks * 8, 1);
579  }
580  for (i = 0; i < blocks; i++) {
581  for (ch = 0; ch < avctx->channels; ch++) {
582  uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
583  for (j = 0; j < 8; j += 2)
584  *dst++ = buf1[j] | (buf1[j + 1] << 4);
585  }
586  }
587  av_free(buf);
588  } else {
589  for (i = 0; i < blocks; i++) {
590  for (ch = 0; ch < avctx->channels; ch++) {
592  const int16_t *smp = &samples_p[ch][1 + i * 8];
593  for (j = 0; j < 8; j += 2) {
594  uint8_t v = adpcm_ima_compress_sample(status, smp[j ]);
595  v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4;
596  *dst++ = v;
597  }
598  }
599  }
600  }
601  break;
602  }
604  {
605  PutBitContext pb;
606  init_put_bits(&pb, dst, pkt_size);
607 
608  for (ch = 0; ch < avctx->channels; ch++) {
610  put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7);
611  put_bits(&pb, 7, status->step_index);
612  if (avctx->trellis > 0) {
613  uint8_t buf[64];
614  adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
615  64, 1);
616  for (i = 0; i < 64; i++)
617  put_bits(&pb, 4, buf[i ^ 1]);
618  status->prev_sample = status->predictor;
619  } else {
620  for (i = 0; i < 64; i += 2) {
621  int t1, t2;
622  t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]);
623  t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]);
624  put_bits(&pb, 4, t2);
625  put_bits(&pb, 4, t1);
626  }
627  }
628  }
629 
630  flush_put_bits(&pb);
631  break;
632  }
634  {
635  PutBitContext pb;
636  init_put_bits(&pb, dst, pkt_size);
637 
638  av_assert0(avctx->trellis == 0);
639 
640  for (i = 0; i < frame->nb_samples; i++) {
641  for (ch = 0; ch < avctx->channels; ch++) {
642  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
643  }
644  }
645 
646  flush_put_bits(&pb);
647  break;
648  }
650  {
651  PutBitContext pb;
652  init_put_bits(&pb, dst, pkt_size);
653 
654  n = frame->nb_samples - 1;
655 
656  // store AdpcmCodeSize
657  put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
658 
659  // init the encoder state
660  for (i = 0; i < avctx->channels; i++) {
661  // clip step so it fits 6 bits
662  c->status[i].step_index = av_clip_uintp2(c->status[i].step_index, 6);
663  put_sbits(&pb, 16, samples[i]);
664  put_bits(&pb, 6, c->status[i].step_index);
665  c->status[i].prev_sample = samples[i];
666  }
667 
668  if (avctx->trellis > 0) {
669  if (!(buf = av_malloc(2 * n)))
670  return AVERROR(ENOMEM);
671  adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
672  &c->status[0], n, avctx->channels);
673  if (avctx->channels == 2)
674  adpcm_compress_trellis(avctx, samples + avctx->channels + 1,
675  buf + n, &c->status[1], n,
676  avctx->channels);
677  for (i = 0; i < n; i++) {
678  put_bits(&pb, 4, buf[i]);
679  if (avctx->channels == 2)
680  put_bits(&pb, 4, buf[n + i]);
681  }
682  av_free(buf);
683  } else {
684  for (i = 1; i < frame->nb_samples; i++) {
686  samples[avctx->channels * i]));
687  if (avctx->channels == 2)
689  samples[2 * i + 1]));
690  }
691  }
692  flush_put_bits(&pb);
693  break;
694  }
696  for (i = 0; i < avctx->channels; i++) {
697  int predictor = 0;
698  *dst++ = predictor;
699  c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
700  c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
701  }
702  for (i = 0; i < avctx->channels; i++) {
703  if (c->status[i].idelta < 16)
704  c->status[i].idelta = 16;
705  bytestream_put_le16(&dst, c->status[i].idelta);
706  }
707  for (i = 0; i < avctx->channels; i++)
708  c->status[i].sample2= *samples++;
709  for (i = 0; i < avctx->channels; i++) {
710  c->status[i].sample1 = *samples++;
711  bytestream_put_le16(&dst, c->status[i].sample1);
712  }
713  for (i = 0; i < avctx->channels; i++)
714  bytestream_put_le16(&dst, c->status[i].sample2);
715 
716  if (avctx->trellis > 0) {
717  n = avctx->block_align - 7 * avctx->channels;
718  if (!(buf = av_malloc(2 * n)))
719  return AVERROR(ENOMEM);
720  if (avctx->channels == 1) {
721  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
722  avctx->channels);
723  for (i = 0; i < n; i += 2)
724  *dst++ = (buf[i] << 4) | buf[i + 1];
725  } else {
726  adpcm_compress_trellis(avctx, samples, buf,
727  &c->status[0], n, avctx->channels);
728  adpcm_compress_trellis(avctx, samples + 1, buf + n,
729  &c->status[1], n, avctx->channels);
730  for (i = 0; i < n; i++)
731  *dst++ = (buf[i] << 4) | buf[n + i];
732  }
733  av_free(buf);
734  } else {
735  for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
736  int nibble;
737  nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
738  nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
739  *dst++ = nibble;
740  }
741  }
742  break;
744  n = frame->nb_samples / 2;
745  if (avctx->trellis > 0) {
746  if (!(buf = av_malloc(2 * n * 2)))
747  return AVERROR(ENOMEM);
748  n *= 2;
749  if (avctx->channels == 1) {
750  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
751  avctx->channels);
752  for (i = 0; i < n; i += 2)
753  *dst++ = buf[i] | (buf[i + 1] << 4);
754  } else {
755  adpcm_compress_trellis(avctx, samples, buf,
756  &c->status[0], n, avctx->channels);
757  adpcm_compress_trellis(avctx, samples + 1, buf + n,
758  &c->status[1], n, avctx->channels);
759  for (i = 0; i < n; i++)
760  *dst++ = buf[i] | (buf[n + i] << 4);
761  }
762  av_free(buf);
763  } else
764  for (n *= avctx->channels; n > 0; n--) {
765  int nibble;
766  nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
767  nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
768  *dst++ = nibble;
769  }
770  break;
772  {
773  PutBitContext pb;
774  init_put_bits(&pb, dst, pkt_size);
775 
776  av_assert0(avctx->trellis == 0);
777 
778  for (n = frame->nb_samples / 2; n > 0; n--) {
779  for (ch = 0; ch < avctx->channels; ch++) {
780  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
781  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, samples[st]));
782  }
783  samples += avctx->channels;
784  }
785 
786  flush_put_bits(&pb);
787  break;
788  }
790  {
791  PutBitContext pb;
792  init_put_bits(&pb, dst, pkt_size);
793 
794  av_assert0(frame->nb_samples == 32);
795 
796  for (ch = 0; ch < avctx->channels; ch++) {
797  int64_t error = INT64_MAX, tmperr = INT64_MAX;
798  int shift = 2, flag = 0;
799  int saved1 = c->status[ch].sample1;
800  int saved2 = c->status[ch].sample2;
801 
802  /* Find the optimal coefficients, bail early if we find a perfect result. */
803  for (int s = 2; s < 18 && tmperr != 0; s++) {
804  for (int f = 0; f < 2 && tmperr != 0; f++) {
805  c->status[ch].sample1 = saved1;
806  c->status[ch].sample2 = saved2;
807  tmperr = adpcm_argo_compress_block(c->status + ch, NULL, samples_p[ch],
808  frame->nb_samples, s, f);
809  if (tmperr < error) {
810  shift = s;
811  flag = f;
812  error = tmperr;
813  }
814  }
815  }
816 
817  /* Now actually do the encode. */
818  c->status[ch].sample1 = saved1;
819  c->status[ch].sample2 = saved2;
820  adpcm_argo_compress_block(c->status + ch, &pb, samples_p[ch],
821  frame->nb_samples, shift, flag);
822  }
823 
824  flush_put_bits(&pb);
825  break;
826  }
827  default:
828  return AVERROR(EINVAL);
829  }
830 
831  avpkt->size = pkt_size;
832  *got_packet_ptr = 1;
833  return 0;
834 }
835 
836 static const enum AVSampleFormat sample_fmts[] = {
838 };
839 
840 static const enum AVSampleFormat sample_fmts_p[] = {
842 };
843 
844 #define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_) \
845 AVCodec ff_ ## name_ ## _encoder = { \
846  .name = #name_, \
847  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
848  .type = AVMEDIA_TYPE_AUDIO, \
849  .id = id_, \
850  .priv_data_size = sizeof(ADPCMEncodeContext), \
851  .init = adpcm_encode_init, \
852  .encode2 = adpcm_encode_frame, \
853  .close = adpcm_encode_close, \
854  .sample_fmts = sample_fmts_, \
855  .capabilities = capabilities_, \
856  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
857 }
858 
859 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_ARGO, adpcm_argo, sample_fmts_p, 0, "ADPCM Argonaut Games");
860 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_APM, adpcm_ima_apm, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Ubisoft APM");
861 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, sample_fmts_p, 0, "ADPCM IMA QuickTime");
862 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_SSI, adpcm_ima_ssi, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Simon & Schuster Interactive");
863 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, sample_fmts_p, 0, "ADPCM IMA WAV");
864 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, sample_fmts, 0, "ADPCM Microsoft");
865 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, sample_fmts, 0, "ADPCM Shockwave Flash");
866 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, sample_fmts, 0, "ADPCM Yamaha");
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:535
#define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_)
Definition: adpcmenc.c:844
int sample1
Definition: adpcmenc.c:46
int path
Definition: adpcmenc.c:45
version
Definition: libkvazaar.c:287
static int shift(int a, int b)
Definition: sonic.c:82
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
Definition: adpcmenc.c:61
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:258
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:211
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:233
#define BLKSIZE
Definition: adpcm.h:31
#define ima
int size
Definition: packet.h:364
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:196
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:140
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
Definition: adpcmenc.c:171
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static void error(const char *err)
#define sample
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:1223
uint8_t * trellis_hash
Definition: adpcmenc.c:56
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:262
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
float delta
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
ADPCM tables.
uint8_t * data
Definition: packet.h:363
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1750
#define av_log(a,...)
uint8_t hash[HASH_SIZE]
Definition: movenc.c:57
#define U(x)
Definition: vp56_arith.h:37
uint32_t ssd
Definition: adpcmenc.c:44
enum AVCodecID id
Definition: codec.h:204
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
Definition: utils.c:1563
ADPCM encoder/decoder common header.
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:692
#define STORE_NODE(NAME, STEP_INDEX)
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
#define t1
Definition: regdef.h:29
#define FFMAX(a, b)
Definition: common.h:94
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
#define FREEZE_INTERVAL
Definition: adpcmenc.c:59
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:80
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:183
#define FFMIN(a, b)
Definition: common.h:96
TrellisNode ** nodep_buf
Definition: adpcmenc.c:55
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
Definition: adpcmenc.c:284
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: adpcmenc.c:529
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
TrellisPath * paths
Definition: adpcmenc.c:53
int sample2
Definition: adpcmenc.c:47
if(ret)
TrellisNode * node_buf
Definition: adpcmenc.c:54
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1206
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
enum AVCodecID codec_id
Definition: avcodec.h:536
int sample_rate
samples per second
Definition: avcodec.h:1186
#define abs(x)
Definition: cuda_runtime.h:35
main external API structure.
Definition: avcodec.h:526
int nibble
Definition: adpcmenc.c:39
int extradata_size
Definition: avcodec.h:628
static int adpcm_argo_compress_nibble(const ADPCMChannelStatus *cs, int16_t s, int shift, int flag)
Definition: adpcmenc.c:489
int step
Definition: adpcmenc.c:48
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:115
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
signed 16 bits
Definition: samplefmt.h:61
#define flag(name)
Definition: cbs_av1.c:552
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
static int64_t adpcm_argo_compress_block(ADPCMChannelStatus *cs, PutBitContext *pb, const int16_t *samples, int nsamples, int shift, int flag)
Definition: adpcmenc.c:502
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
int trellis
trellis RD quantization
Definition: avcodec.h:1475
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
void * priv_data
Definition: avcodec.h:553
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
int channels
number of audio channels
Definition: avcodec.h:1187
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:836
Filter the word “frame” indicates either a video frame or a group of audio samples
#define av_freep(p)
int16_t step_index
Definition: adpcm.h:35
signed 16 bits, planar
Definition: samplefmt.h:67
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
ADPCMChannelStatus status[6]
Definition: adpcmenc.c:52
This structure stores compressed data.
Definition: packet.h:340
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
for(j=16;j >0;--j)
static enum AVSampleFormat sample_fmts_p[]
Definition: adpcmenc.c:840
#define t2
Definition: regdef.h:30
int i
Definition: input.c:407
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
bitstream writer API