FFmpeg
adpcmenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "libavutil/opt.h"
26 
27 #include "avcodec.h"
28 #include "put_bits.h"
29 #include "bytestream.h"
30 #include "adpcm.h"
31 #include "adpcm_data.h"
32 #include "encode.h"
33 #include "internal.h"
34 
35 /**
36  * @file
37  * ADPCM encoders
38  * See ADPCM decoder reference documents for codec information.
39  */
40 
41 typedef struct TrellisPath {
42  int nibble;
43  int prev;
44 } TrellisPath;
45 
46 typedef struct TrellisNode {
47  uint32_t ssd;
48  int path;
49  int sample1;
50  int sample2;
51  int step;
52 } TrellisNode;
53 
54 typedef struct ADPCMEncodeContext {
55  AVClass *class;
57 
62  uint8_t *trellis_hash;
64 
65 #define FREEZE_INTERVAL 128
66 
68 {
69  ADPCMEncodeContext *s = avctx->priv_data;
70  uint8_t *extradata;
71  int i;
72 
73  if (avctx->channels > 2) {
74  av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
75  return AVERROR(EINVAL);
76  }
77 
78  /*
79  * AMV's block size has to match that of the corresponding video
80  * stream. Relax the POT requirement.
81  */
82  if (avctx->codec->id != AV_CODEC_ID_ADPCM_IMA_AMV &&
83  (s->block_size & (s->block_size - 1))) {
84  av_log(avctx, AV_LOG_ERROR, "block size must be power of 2\n");
85  return AVERROR(EINVAL);
86  }
87 
88  if (avctx->trellis) {
89  int frontier, max_paths;
90 
91  if ((unsigned)avctx->trellis > 16U) {
92  av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
93  return AVERROR(EINVAL);
94  }
95 
96  if (avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_SSI ||
97  avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_APM ||
98  avctx->codec->id == AV_CODEC_ID_ADPCM_ARGO ||
99  avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_WS) {
100  /*
101  * The current trellis implementation doesn't work for extended
102  * runs of samples without periodic resets. Disallow it.
103  */
104  av_log(avctx, AV_LOG_ERROR, "trellis not supported\n");
105  return AVERROR_PATCHWELCOME;
106  }
107 
108  frontier = 1 << avctx->trellis;
109  max_paths = frontier * FREEZE_INTERVAL;
110  if (!FF_ALLOC_TYPED_ARRAY(s->paths, max_paths) ||
111  !FF_ALLOC_TYPED_ARRAY(s->node_buf, 2 * frontier) ||
112  !FF_ALLOC_TYPED_ARRAY(s->nodep_buf, 2 * frontier) ||
113  !FF_ALLOC_TYPED_ARRAY(s->trellis_hash, 65536))
114  return AVERROR(ENOMEM);
115  }
116 
118 
119  switch (avctx->codec->id) {
121  /* each 16 bits sample gives one nibble
122  and we have 4 bytes per channel overhead */
123  avctx->frame_size = (s->block_size - 4 * avctx->channels) * 8 /
124  (4 * avctx->channels) + 1;
125  /* seems frame_size isn't taken into account...
126  have to buffer the samples :-( */
127  avctx->block_align = s->block_size;
128  avctx->bits_per_coded_sample = 4;
129  break;
131  avctx->frame_size = 64;
132  avctx->block_align = 34 * avctx->channels;
133  break;
135  /* each 16 bits sample gives one nibble
136  and we have 7 bytes per channel overhead */
137  avctx->frame_size = (s->block_size - 7 * avctx->channels) * 2 / avctx->channels + 2;
138  avctx->bits_per_coded_sample = 4;
139  avctx->block_align = s->block_size;
141  return AVERROR(ENOMEM);
142  avctx->extradata_size = 32;
143  extradata = avctx->extradata;
144  bytestream_put_le16(&extradata, avctx->frame_size);
145  bytestream_put_le16(&extradata, 7); /* wNumCoef */
146  for (i = 0; i < 7; i++) {
147  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
148  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
149  }
150  break;
152  avctx->frame_size = s->block_size * 2 / avctx->channels;
153  avctx->block_align = s->block_size;
154  break;
156  if (avctx->sample_rate != 11025 &&
157  avctx->sample_rate != 22050 &&
158  avctx->sample_rate != 44100) {
159  av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
160  "22050 or 44100\n");
161  return AVERROR(EINVAL);
162  }
163  avctx->frame_size = 4096; /* Hardcoded according to the SWF spec. */
164  avctx->block_align = (2 + avctx->channels * (22 + 4 * (avctx->frame_size - 1)) + 7) / 8;
165  break;
168  avctx->frame_size = s->block_size * 2 / avctx->channels;
169  avctx->block_align = s->block_size;
170  break;
172  if (avctx->sample_rate != 22050) {
173  av_log(avctx, AV_LOG_ERROR, "Sample rate must be 22050\n");
174  return AVERROR(EINVAL);
175  }
176 
177  if (avctx->channels != 1) {
178  av_log(avctx, AV_LOG_ERROR, "Only mono is supported\n");
179  return AVERROR(EINVAL);
180  }
181 
182  avctx->frame_size = s->block_size;
183  avctx->block_align = 8 + (FFALIGN(avctx->frame_size, 2) / 2);
184  break;
186  avctx->frame_size = s->block_size * 2 / avctx->channels;
187  avctx->block_align = s->block_size;
188 
189  if (!(avctx->extradata = av_mallocz(28 + AV_INPUT_BUFFER_PADDING_SIZE)))
190  return AVERROR(ENOMEM);
191  avctx->extradata_size = 28;
192  break;
194  avctx->frame_size = 32;
195  avctx->block_align = 17 * avctx->channels;
196  break;
198  /* each 16 bits sample gives one nibble */
199  avctx->frame_size = s->block_size * 2 / avctx->channels;
200  avctx->block_align = s->block_size;
201  break;
202  default:
203  return AVERROR(EINVAL);
204  }
205 
206  return 0;
207 }
208 
210 {
211  ADPCMEncodeContext *s = avctx->priv_data;
212  av_freep(&s->paths);
213  av_freep(&s->node_buf);
214  av_freep(&s->nodep_buf);
215  av_freep(&s->trellis_hash);
216 
217  return 0;
218 }
219 
220 
222  int16_t sample)
223 {
224  int delta = sample - c->prev_sample;
225  int nibble = FFMIN(7, abs(delta) * 4 /
226  ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
227  c->prev_sample += ((ff_adpcm_step_table[c->step_index] *
228  ff_adpcm_yamaha_difflookup[nibble]) / 8);
229  c->prev_sample = av_clip_int16(c->prev_sample);
230  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
231  return nibble;
232 }
233 
235 {
236  const int delta = sample - c->prev_sample;
237  const int step = ff_adpcm_step_table[c->step_index];
238  const int sign = (delta < 0) * 8;
239 
240  int nibble = FFMIN(abs(delta) * 4 / step, 7);
241  int diff = (step * nibble) >> 2;
242  if (sign)
243  diff = -diff;
244 
245  nibble = sign | nibble;
246 
247  c->prev_sample += diff;
248  c->prev_sample = av_clip_int16(c->prev_sample);
249  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
250  return nibble;
251 }
252 
254  int16_t sample)
255 {
256  int delta = sample - c->prev_sample;
257  int diff, step = ff_adpcm_step_table[c->step_index];
258  int nibble = 8*(delta < 0);
259 
260  delta= abs(delta);
261  diff = delta + (step >> 3);
262 
263  if (delta >= step) {
264  nibble |= 4;
265  delta -= step;
266  }
267  step >>= 1;
268  if (delta >= step) {
269  nibble |= 2;
270  delta -= step;
271  }
272  step >>= 1;
273  if (delta >= step) {
274  nibble |= 1;
275  delta -= step;
276  }
277  diff -= delta;
278 
279  if (nibble & 8)
280  c->prev_sample -= diff;
281  else
282  c->prev_sample += diff;
283 
284  c->prev_sample = av_clip_int16(c->prev_sample);
285  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
286 
287  return nibble;
288 }
289 
291  int16_t sample)
292 {
293  int predictor, nibble, bias;
294 
295  predictor = (((c->sample1) * (c->coeff1)) +
296  (( c->sample2) * (c->coeff2))) / 64;
297 
298  nibble = sample - predictor;
299  if (nibble >= 0)
300  bias = c->idelta / 2;
301  else
302  bias = -c->idelta / 2;
303 
304  nibble = (nibble + bias) / c->idelta;
305  nibble = av_clip_intp2(nibble, 3) & 0x0F;
306 
307  predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
308 
309  c->sample2 = c->sample1;
310  c->sample1 = av_clip_int16(predictor);
311 
312  c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8;
313  if (c->idelta < 16)
314  c->idelta = 16;
315 
316  return nibble;
317 }
318 
320  int16_t sample)
321 {
322  int nibble, delta;
323 
324  if (!c->step) {
325  c->predictor = 0;
326  c->step = 127;
327  }
328 
329  delta = sample - c->predictor;
330 
331  nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
332 
333  c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
334  c->predictor = av_clip_int16(c->predictor);
335  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
336  c->step = av_clip(c->step, 127, 24576);
337 
338  return nibble;
339 }
340 
342  const int16_t *samples, uint8_t *dst,
343  ADPCMChannelStatus *c, int n, int stride)
344 {
345  //FIXME 6% faster if frontier is a compile-time constant
346  ADPCMEncodeContext *s = avctx->priv_data;
347  const int frontier = 1 << avctx->trellis;
348  const int version = avctx->codec->id;
349  TrellisPath *paths = s->paths, *p;
350  TrellisNode *node_buf = s->node_buf;
351  TrellisNode **nodep_buf = s->nodep_buf;
352  TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
353  TrellisNode **nodes_next = nodep_buf + frontier;
354  int pathn = 0, froze = -1, i, j, k, generation = 0;
355  uint8_t *hash = s->trellis_hash;
356  memset(hash, 0xff, 65536 * sizeof(*hash));
357 
358  memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
359  nodes[0] = node_buf + frontier;
360  nodes[0]->ssd = 0;
361  nodes[0]->path = 0;
362  nodes[0]->step = c->step_index;
363  nodes[0]->sample1 = c->sample1;
364  nodes[0]->sample2 = c->sample2;
369  nodes[0]->sample1 = c->prev_sample;
371  nodes[0]->step = c->idelta;
373  if (c->step == 0) {
374  nodes[0]->step = 127;
375  nodes[0]->sample1 = 0;
376  } else {
377  nodes[0]->step = c->step;
378  nodes[0]->sample1 = c->predictor;
379  }
380  }
381 
382  for (i = 0; i < n; i++) {
383  TrellisNode *t = node_buf + frontier*(i&1);
384  TrellisNode **u;
385  int sample = samples[i * stride];
386  int heap_pos = 0;
387  memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
388  for (j = 0; j < frontier && nodes[j]; j++) {
389  // higher j have higher ssd already, so they're likely
390  // to yield a suboptimal next sample too
391  const int range = (j < frontier / 2) ? 1 : 0;
392  const int step = nodes[j]->step;
393  int nidx;
394  if (version == AV_CODEC_ID_ADPCM_MS) {
395  const int predictor = ((nodes[j]->sample1 * c->coeff1) +
396  (nodes[j]->sample2 * c->coeff2)) / 64;
397  const int div = (sample - predictor) / step;
398  const int nmin = av_clip(div-range, -8, 6);
399  const int nmax = av_clip(div+range, -7, 7);
400  for (nidx = nmin; nidx <= nmax; nidx++) {
401  const int nibble = nidx & 0xf;
402  int dec_sample = predictor + nidx * step;
403 #define STORE_NODE(NAME, STEP_INDEX)\
404  int d;\
405  uint32_t ssd;\
406  int pos;\
407  TrellisNode *u;\
408  uint8_t *h;\
409  dec_sample = av_clip_int16(dec_sample);\
410  d = sample - dec_sample;\
411  ssd = nodes[j]->ssd + d*(unsigned)d;\
412  /* Check for wraparound, skip such samples completely. \
413  * Note, changing ssd to a 64 bit variable would be \
414  * simpler, avoiding this check, but it's slower on \
415  * x86 32 bit at the moment. */\
416  if (ssd < nodes[j]->ssd)\
417  goto next_##NAME;\
418  /* Collapse any two states with the same previous sample value. \
419  * One could also distinguish states by step and by 2nd to last
420  * sample, but the effects of that are negligible.
421  * Since nodes in the previous generation are iterated
422  * through a heap, they're roughly ordered from better to
423  * worse, but not strictly ordered. Therefore, an earlier
424  * node with the same sample value is better in most cases
425  * (and thus the current is skipped), but not strictly
426  * in all cases. Only skipping samples where ssd >=
427  * ssd of the earlier node with the same sample gives
428  * slightly worse quality, though, for some reason. */ \
429  h = &hash[(uint16_t) dec_sample];\
430  if (*h == generation)\
431  goto next_##NAME;\
432  if (heap_pos < frontier) {\
433  pos = heap_pos++;\
434  } else {\
435  /* Try to replace one of the leaf nodes with the new \
436  * one, but try a different slot each time. */\
437  pos = (frontier >> 1) +\
438  (heap_pos & ((frontier >> 1) - 1));\
439  if (ssd > nodes_next[pos]->ssd)\
440  goto next_##NAME;\
441  heap_pos++;\
442  }\
443  *h = generation;\
444  u = nodes_next[pos];\
445  if (!u) {\
446  av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
447  u = t++;\
448  nodes_next[pos] = u;\
449  u->path = pathn++;\
450  }\
451  u->ssd = ssd;\
452  u->step = STEP_INDEX;\
453  u->sample2 = nodes[j]->sample1;\
454  u->sample1 = dec_sample;\
455  paths[u->path].nibble = nibble;\
456  paths[u->path].prev = nodes[j]->path;\
457  /* Sift the newly inserted node up in the heap to \
458  * restore the heap property. */\
459  while (pos > 0) {\
460  int parent = (pos - 1) >> 1;\
461  if (nodes_next[parent]->ssd <= ssd)\
462  break;\
463  FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
464  pos = parent;\
465  }\
466  next_##NAME:;
467  STORE_NODE(ms, FFMAX(16,
468  (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
469  }
470  } else if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
474 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
475  const int predictor = nodes[j]->sample1;\
476  const int div = (sample - predictor) * 4 / STEP_TABLE;\
477  int nmin = av_clip(div - range, -7, 6);\
478  int nmax = av_clip(div + range, -6, 7);\
479  if (nmin <= 0)\
480  nmin--; /* distinguish -0 from +0 */\
481  if (nmax < 0)\
482  nmax--;\
483  for (nidx = nmin; nidx <= nmax; nidx++) {\
484  const int nibble = nidx < 0 ? 7 - nidx : nidx;\
485  int dec_sample = predictor +\
486  (STEP_TABLE *\
487  ff_adpcm_yamaha_difflookup[nibble]) / 8;\
488  STORE_NODE(NAME, STEP_INDEX);\
489  }
491  av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
492  } else { //AV_CODEC_ID_ADPCM_YAMAHA
493  LOOP_NODES(yamaha, step,
494  av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
495  127, 24576));
496 #undef LOOP_NODES
497 #undef STORE_NODE
498  }
499  }
500 
501  u = nodes;
502  nodes = nodes_next;
503  nodes_next = u;
504 
505  generation++;
506  if (generation == 255) {
507  memset(hash, 0xff, 65536 * sizeof(*hash));
508  generation = 0;
509  }
510 
511  // prevent overflow
512  if (nodes[0]->ssd > (1 << 28)) {
513  for (j = 1; j < frontier && nodes[j]; j++)
514  nodes[j]->ssd -= nodes[0]->ssd;
515  nodes[0]->ssd = 0;
516  }
517 
518  // merge old paths to save memory
519  if (i == froze + FREEZE_INTERVAL) {
520  p = &paths[nodes[0]->path];
521  for (k = i; k > froze; k--) {
522  dst[k] = p->nibble;
523  p = &paths[p->prev];
524  }
525  froze = i;
526  pathn = 0;
527  // other nodes might use paths that don't coincide with the frozen one.
528  // checking which nodes do so is too slow, so just kill them all.
529  // this also slightly improves quality, but I don't know why.
530  memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
531  }
532  }
533 
534  p = &paths[nodes[0]->path];
535  for (i = n - 1; i > froze; i--) {
536  dst[i] = p->nibble;
537  p = &paths[p->prev];
538  }
539 
540  c->predictor = nodes[0]->sample1;
541  c->sample1 = nodes[0]->sample1;
542  c->sample2 = nodes[0]->sample2;
543  c->step_index = nodes[0]->step;
544  c->step = nodes[0]->step;
545  c->idelta = nodes[0]->step;
546 }
547 
548 static inline int adpcm_argo_compress_nibble(const ADPCMChannelStatus *cs, int16_t s,
549  int shift, int flag)
550 {
551  int nibble;
552 
553  if (flag)
554  nibble = 4 * s - 8 * cs->sample1 + 4 * cs->sample2;
555  else
556  nibble = 4 * s - 4 * cs->sample1;
557 
558  return (nibble >> shift) & 0x0F;
559 }
560 
562  const int16_t *samples, int nsamples,
563  int shift, int flag)
564 {
565  int64_t error = 0;
566 
567  if (pb) {
568  put_bits(pb, 4, shift - 2);
569  put_bits(pb, 1, 0);
570  put_bits(pb, 1, !!flag);
571  put_bits(pb, 2, 0);
572  }
573 
574  for (int n = 0; n < nsamples; n++) {
575  /* Compress the nibble, then expand it to see how much precision we've lost. */
576  int nibble = adpcm_argo_compress_nibble(cs, samples[n], shift, flag);
577  int16_t sample = ff_adpcm_argo_expand_nibble(cs, nibble, shift, flag);
578 
579  error += abs(samples[n] - sample);
580 
581  if (pb)
582  put_bits(pb, 4, nibble);
583  }
584 
585  return error;
586 }
587 
588 static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
589  const AVFrame *frame, int *got_packet_ptr)
590 {
591  int n, i, ch, st, pkt_size, ret;
592  const int16_t *samples;
593  int16_t **samples_p;
594  uint8_t *dst;
595  ADPCMEncodeContext *c = avctx->priv_data;
596  uint8_t *buf;
597 
598  samples = (const int16_t *)frame->data[0];
599  samples_p = (int16_t **)frame->extended_data;
600  st = avctx->channels == 2;
601 
602  if (avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_SSI ||
606  pkt_size = (frame->nb_samples * avctx->channels) / 2;
607  else
608  pkt_size = avctx->block_align;
609  if ((ret = ff_get_encode_buffer(avctx, avpkt, pkt_size, 0)) < 0)
610  return ret;
611  dst = avpkt->data;
612 
613  switch(avctx->codec->id) {
615  {
616  int blocks, j;
617 
618  blocks = (frame->nb_samples - 1) / 8;
619 
620  for (ch = 0; ch < avctx->channels; ch++) {
621  ADPCMChannelStatus *status = &c->status[ch];
622  status->prev_sample = samples_p[ch][0];
623  /* status->step_index = 0;
624  XXX: not sure how to init the state machine */
625  bytestream_put_le16(&dst, status->prev_sample);
626  *dst++ = status->step_index;
627  *dst++ = 0; /* unknown */
628  }
629 
630  /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
631  if (avctx->trellis > 0) {
632  if (!FF_ALLOC_TYPED_ARRAY(buf, avctx->channels * blocks * 8))
633  return AVERROR(ENOMEM);
634  for (ch = 0; ch < avctx->channels; ch++) {
635  adpcm_compress_trellis(avctx, &samples_p[ch][1],
636  buf + ch * blocks * 8, &c->status[ch],
637  blocks * 8, 1);
638  }
639  for (i = 0; i < blocks; i++) {
640  for (ch = 0; ch < avctx->channels; ch++) {
641  uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
642  for (j = 0; j < 8; j += 2)
643  *dst++ = buf1[j] | (buf1[j + 1] << 4);
644  }
645  }
646  av_free(buf);
647  } else {
648  for (i = 0; i < blocks; i++) {
649  for (ch = 0; ch < avctx->channels; ch++) {
650  ADPCMChannelStatus *status = &c->status[ch];
651  const int16_t *smp = &samples_p[ch][1 + i * 8];
652  for (j = 0; j < 8; j += 2) {
653  uint8_t v = adpcm_ima_compress_sample(status, smp[j ]);
654  v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4;
655  *dst++ = v;
656  }
657  }
658  }
659  }
660  break;
661  }
663  {
664  PutBitContext pb;
665  init_put_bits(&pb, dst, pkt_size);
666 
667  for (ch = 0; ch < avctx->channels; ch++) {
668  ADPCMChannelStatus *status = &c->status[ch];
669  put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7);
670  put_bits(&pb, 7, status->step_index);
671  if (avctx->trellis > 0) {
672  uint8_t buf[64];
673  adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
674  64, 1);
675  for (i = 0; i < 64; i++)
676  put_bits(&pb, 4, buf[i ^ 1]);
677  status->prev_sample = status->predictor;
678  } else {
679  for (i = 0; i < 64; i += 2) {
680  int t1, t2;
681  t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]);
682  t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]);
683  put_bits(&pb, 4, t2);
684  put_bits(&pb, 4, t1);
685  }
686  }
687  }
688 
689  flush_put_bits(&pb);
690  break;
691  }
693  {
694  PutBitContext pb;
695  init_put_bits(&pb, dst, pkt_size);
696 
697  av_assert0(avctx->trellis == 0);
698 
699  for (i = 0; i < frame->nb_samples; i++) {
700  for (ch = 0; ch < avctx->channels; ch++) {
701  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
702  }
703  }
704 
705  flush_put_bits(&pb);
706  break;
707  }
709  {
710  PutBitContext pb;
711  init_put_bits(&pb, dst, pkt_size);
712 
713  av_assert0(avctx->trellis == 0);
714 
715  for (n = frame->nb_samples / 2; n > 0; n--) {
716  for (ch = 0; ch < avctx->channels; ch++) {
717  put_bits(&pb, 4, adpcm_ima_alp_compress_sample(c->status + ch, *samples++));
718  put_bits(&pb, 4, adpcm_ima_alp_compress_sample(c->status + ch, samples[st]));
719  }
720  samples += avctx->channels;
721  }
722 
723  flush_put_bits(&pb);
724  break;
725  }
727  {
728  PutBitContext pb;
729  init_put_bits(&pb, dst, pkt_size);
730 
731  n = frame->nb_samples - 1;
732 
733  /* NB: This is safe as we don't have AV_CODEC_CAP_SMALL_LAST_FRAME. */
734  av_assert0(n == 4095);
735 
736  // store AdpcmCodeSize
737  put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
738 
739  // init the encoder state
740  for (i = 0; i < avctx->channels; i++) {
741  // clip step so it fits 6 bits
742  c->status[i].step_index = av_clip_uintp2(c->status[i].step_index, 6);
743  put_sbits(&pb, 16, samples[i]);
744  put_bits(&pb, 6, c->status[i].step_index);
745  c->status[i].prev_sample = samples[i];
746  }
747 
748  if (avctx->trellis > 0) {
749  uint8_t buf[8190 /* = 2 * n */];
750  adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
751  &c->status[0], n, avctx->channels);
752  if (avctx->channels == 2)
753  adpcm_compress_trellis(avctx, samples + avctx->channels + 1,
754  buf + n, &c->status[1], n,
755  avctx->channels);
756  for (i = 0; i < n; i++) {
757  put_bits(&pb, 4, buf[i]);
758  if (avctx->channels == 2)
759  put_bits(&pb, 4, buf[n + i]);
760  }
761  } else {
762  for (i = 1; i < frame->nb_samples; i++) {
763  put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
764  samples[avctx->channels * i]));
765  if (avctx->channels == 2)
766  put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1],
767  samples[2 * i + 1]));
768  }
769  }
770  flush_put_bits(&pb);
771  break;
772  }
774  for (i = 0; i < avctx->channels; i++) {
775  int predictor = 0;
776  *dst++ = predictor;
777  c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
778  c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
779  }
780  for (i = 0; i < avctx->channels; i++) {
781  if (c->status[i].idelta < 16)
782  c->status[i].idelta = 16;
783  bytestream_put_le16(&dst, c->status[i].idelta);
784  }
785  for (i = 0; i < avctx->channels; i++)
786  c->status[i].sample2= *samples++;
787  for (i = 0; i < avctx->channels; i++) {
788  c->status[i].sample1 = *samples++;
789  bytestream_put_le16(&dst, c->status[i].sample1);
790  }
791  for (i = 0; i < avctx->channels; i++)
792  bytestream_put_le16(&dst, c->status[i].sample2);
793 
794  if (avctx->trellis > 0) {
795  n = avctx->block_align - 7 * avctx->channels;
796  if (!(buf = av_malloc(2 * n)))
797  return AVERROR(ENOMEM);
798  if (avctx->channels == 1) {
799  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
800  avctx->channels);
801  for (i = 0; i < n; i += 2)
802  *dst++ = (buf[i] << 4) | buf[i + 1];
803  } else {
804  adpcm_compress_trellis(avctx, samples, buf,
805  &c->status[0], n, avctx->channels);
806  adpcm_compress_trellis(avctx, samples + 1, buf + n,
807  &c->status[1], n, avctx->channels);
808  for (i = 0; i < n; i++)
809  *dst++ = (buf[i] << 4) | buf[n + i];
810  }
811  av_free(buf);
812  } else {
813  for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
814  int nibble;
815  nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
816  nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
817  *dst++ = nibble;
818  }
819  }
820  break;
822  n = frame->nb_samples / 2;
823  if (avctx->trellis > 0) {
824  if (!(buf = av_malloc(2 * n * 2)))
825  return AVERROR(ENOMEM);
826  n *= 2;
827  if (avctx->channels == 1) {
828  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
829  avctx->channels);
830  for (i = 0; i < n; i += 2)
831  *dst++ = buf[i] | (buf[i + 1] << 4);
832  } else {
833  adpcm_compress_trellis(avctx, samples, buf,
834  &c->status[0], n, avctx->channels);
835  adpcm_compress_trellis(avctx, samples + 1, buf + n,
836  &c->status[1], n, avctx->channels);
837  for (i = 0; i < n; i++)
838  *dst++ = buf[i] | (buf[n + i] << 4);
839  }
840  av_free(buf);
841  } else
842  for (n *= avctx->channels; n > 0; n--) {
843  int nibble;
844  nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
845  nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
846  *dst++ = nibble;
847  }
848  break;
850  {
851  PutBitContext pb;
852  init_put_bits(&pb, dst, pkt_size);
853 
854  av_assert0(avctx->trellis == 0);
855 
856  for (n = frame->nb_samples / 2; n > 0; n--) {
857  for (ch = 0; ch < avctx->channels; ch++) {
858  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
859  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, samples[st]));
860  }
861  samples += avctx->channels;
862  }
863 
864  flush_put_bits(&pb);
865  break;
866  }
868  {
869  av_assert0(avctx->channels == 1);
870 
871  c->status[0].prev_sample = *samples;
872  bytestream_put_le16(&dst, c->status[0].prev_sample);
873  bytestream_put_byte(&dst, c->status[0].step_index);
874  bytestream_put_byte(&dst, 0);
875  bytestream_put_le32(&dst, avctx->frame_size);
876 
877  if (avctx->trellis > 0) {
878  n = frame->nb_samples >> 1;
879 
880  if (!(buf = av_malloc(2 * n)))
881  return AVERROR(ENOMEM);
882 
883  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], 2 * n, avctx->channels);
884  for (i = 0; i < n; i++)
885  bytestream_put_byte(&dst, (buf[2 * i] << 4) | buf[2 * i + 1]);
886 
887  samples += 2 * n;
888  av_free(buf);
889  } else for (n = frame->nb_samples >> 1; n > 0; n--) {
890  int nibble;
891  nibble = adpcm_ima_compress_sample(&c->status[0], *samples++) << 4;
892  nibble |= adpcm_ima_compress_sample(&c->status[0], *samples++) & 0x0F;
893  bytestream_put_byte(&dst, nibble);
894  }
895 
896  if (avctx->frame_size & 1) {
897  int nibble = adpcm_ima_compress_sample(&c->status[0], *samples++) << 4;
898  bytestream_put_byte(&dst, nibble);
899  }
900  break;
901  }
903  {
904  PutBitContext pb;
905  init_put_bits(&pb, dst, pkt_size);
906 
907  av_assert0(frame->nb_samples == 32);
908 
909  for (ch = 0; ch < avctx->channels; ch++) {
910  int64_t error = INT64_MAX, tmperr = INT64_MAX;
911  int shift = 2, flag = 0;
912  int saved1 = c->status[ch].sample1;
913  int saved2 = c->status[ch].sample2;
914 
915  /* Find the optimal coefficients, bail early if we find a perfect result. */
916  for (int s = 2; s < 18 && tmperr != 0; s++) {
917  for (int f = 0; f < 2 && tmperr != 0; f++) {
918  c->status[ch].sample1 = saved1;
919  c->status[ch].sample2 = saved2;
920  tmperr = adpcm_argo_compress_block(c->status + ch, NULL, samples_p[ch],
921  frame->nb_samples, s, f);
922  if (tmperr < error) {
923  shift = s;
924  flag = f;
925  error = tmperr;
926  }
927  }
928  }
929 
930  /* Now actually do the encode. */
931  c->status[ch].sample1 = saved1;
932  c->status[ch].sample2 = saved2;
933  adpcm_argo_compress_block(c->status + ch, &pb, samples_p[ch],
934  frame->nb_samples, shift, flag);
935  }
936 
937  flush_put_bits(&pb);
938  break;
939  }
941  {
942  PutBitContext pb;
943  init_put_bits(&pb, dst, pkt_size);
944 
945  av_assert0(avctx->trellis == 0);
946  for (n = frame->nb_samples / 2; n > 0; n--) {
947  /* stereo: 1 byte (2 samples) for left, 1 byte for right */
948  for (ch = 0; ch < avctx->channels; ch++) {
949  int t1, t2;
950  t1 = adpcm_ima_compress_sample(&c->status[ch], *samples++);
951  t2 = adpcm_ima_compress_sample(&c->status[ch], samples[st]);
952  put_bits(&pb, 4, t2);
953  put_bits(&pb, 4, t1);
954  }
955  samples += avctx->channels;
956  }
958  break;
959  }
960  default:
961  return AVERROR(EINVAL);
962  }
963 
964  *got_packet_ptr = 1;
965  return 0;
966 }
967 
968 static const enum AVSampleFormat sample_fmts[] = {
970 };
971 
972 static const enum AVSampleFormat sample_fmts_p[] = {
974 };
975 
976 static const AVOption options[] = {
977  {
978  .name = "block_size",
979  .help = "set the block size",
980  .offset = offsetof(ADPCMEncodeContext, block_size),
981  .type = AV_OPT_TYPE_INT,
982  .default_val = {.i64 = 1024},
983  .min = 32,
984  .max = 8192, /* Is this a reasonable upper limit? */
986  },
987  { NULL }
988 };
989 
990 #define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_) \
991 static const AVClass name_ ## _encoder_class = { \
992  .class_name = #name_, \
993  .item_name = av_default_item_name, \
994  .option = options, \
995  .version = LIBAVUTIL_VERSION_INT, \
996 }; \
997  \
998 const AVCodec ff_ ## name_ ## _encoder = { \
999  .name = #name_, \
1000  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
1001  .type = AVMEDIA_TYPE_AUDIO, \
1002  .id = id_, \
1003  .priv_data_size = sizeof(ADPCMEncodeContext), \
1004  .init = adpcm_encode_init, \
1005  .encode2 = adpcm_encode_frame, \
1006  .close = adpcm_encode_close, \
1007  .sample_fmts = sample_fmts_, \
1008  .capabilities = capabilities_ | AV_CODEC_CAP_DR1, \
1009  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, \
1010  .priv_class = &name_ ## _encoder_class, \
1011 }
1012 
1013 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_ARGO, adpcm_argo, sample_fmts_p, 0, "ADPCM Argonaut Games");
1014 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, sample_fmts, 0, "ADPCM IMA AMV");
1015 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_APM, adpcm_ima_apm, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Ubisoft APM");
1016 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_ALP, adpcm_ima_alp, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA High Voltage Software ALP");
1017 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, sample_fmts_p, 0, "ADPCM IMA QuickTime");
1018 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_SSI, adpcm_ima_ssi, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Simon & Schuster Interactive");
1019 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, sample_fmts_p, 0, "ADPCM IMA WAV");
1021 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, sample_fmts, 0, "ADPCM Microsoft");
1022 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, sample_fmts, 0, "ADPCM Shockwave Flash");
1023 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, sample_fmts, 0, "ADPCM Yamaha");
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:30
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1001
adpcm_yamaha_compress_sample
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:319
stride
int stride
Definition: mace.c:144
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:358
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:352
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
av_clip
#define av_clip
Definition: common.h:122
TrellisNode::sample1
int sample1
Definition: adpcmenc.c:49
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
LOOP_NODES
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:981
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:953
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:146
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
TrellisNode::path
int path
Definition: adpcmenc.c:48
put_sbits
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:280
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:220
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:365
TrellisNode::sample2
int sample2
Definition: adpcmenc.c:50
AVOption
AVOption.
Definition: opt.h:248
encode.h
TrellisNode::step
int step
Definition: adpcmenc.c:51
t1
#define t1
Definition: regdef.h:29
ADPCM_ENCODER
#define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_)
Definition: adpcmenc.c:975
hash
uint8_t hash[HASH_SIZE]
Definition: movenc.c:57
ADPCMEncodeContext::nodep_buf
TrellisNode ** nodep_buf
Definition: adpcmenc.c:61
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:393
U
#define U(x)
Definition: vp56_arith.h:37
STORE_NODE
#define STORE_NODE(NAME, STEP_INDEX)
TrellisNode
Definition: adpcmenc.c:46
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:102
ADPCMEncodeContext::status
ADPCMChannelStatus status[6]
Definition: adpcmenc.c:58
ADPCMEncodeContext::paths
TrellisPath * paths
Definition: adpcmenc.c:59
ADPCMEncodeContext::node_buf
TrellisNode * node_buf
Definition: adpcmenc.c:60
av_get_bits_per_sample
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
Definition: utils.c:585
TrellisNode::ssd
uint32_t ssd
Definition: adpcmenc.c:47
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
av_cold
#define av_cold
Definition: attributes.h:90
options
static const AVOption options[]
Definition: adpcmenc.c:961
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:486
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_FLAG_ENCODING_PARAM
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:278
TrellisPath::nibble
int nibble
Definition: adpcmenc.c:42
ADPCMEncodeContext::block_size
int block_size
Definition: adpcmenc.c:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
f
#define f(width, name)
Definition: cbs_vp9.c:255
PutBitContext
Definition: put_bits.h:49
adpcm_compress_trellis
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
Definition: adpcmenc.c:341
AV_OPT_FLAG_AUDIO_PARAM
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:280
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:394
if
if(ret)
Definition: filter_design.txt:179
TrellisPath
Definition: aaccoder.c:188
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
av_clip_int16
#define av_clip_int16
Definition: common.h:137
NULL
#define NULL
Definition: coverity.c:32
av_clip_intp2
#define av_clip_intp2
Definition: common.h:143
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:366
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:356
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:394
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:371
abs
#define abs(x)
Definition: cuda_runtime.h:35
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1218
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
adpcm_ima_alp_compress_sample
static uint8_t adpcm_ima_alp_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:234
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
adpcm.h
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:398
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:365
AVOption::name
const char * name
Definition: opt.h:249
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
version
version
Definition: libkvazaar.c:307
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:164
FREEZE_INTERVAL
#define FREEZE_INTERVAL
Definition: adpcmenc.c:65
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:982
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
flag
#define flag(name)
Definition: cbs_av1.c:553
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1407
i
int i
Definition: input.c:407
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:485
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
ADPCMEncodeContext::trellis_hash
uint8_t * trellis_hash
Definition: adpcmenc.c:62
delta
float delta
Definition: vorbis_enc_data.h:430
adpcm_ima_compress_sample
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:221
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:397
TrellisPath::prev
int prev
Definition: aaccoder.c:190
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:244
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:671
adpcm_argo_compress_block
static int64_t adpcm_argo_compress_block(ADPCMChannelStatus *cs, PutBitContext *pb, const int16_t *samples, int nsamples, int shift, int flag)
Definition: adpcmenc.c:546
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
adpcm_encode_frame
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: adpcmenc.c:573
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1018
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ADPCMEncodeContext
Definition: adpcmenc.c:54
sample_fmts_p
static enum AVSampleFormat sample_fmts_p[]
Definition: adpcmenc.c:957
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:384
t2
#define t2
Definition: regdef.h:30
ima
#define ima
Definition: vf_colormatrix.c:110
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:77
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
shift
static int shift(int a, int b)
Definition: sonic.c:83
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:395
adpcm_encode_init
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
Definition: adpcmenc.c:67
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
adpcm_argo_compress_nibble
static int adpcm_argo_compress_nibble(const ADPCMChannelStatus *cs, int16_t s, int shift, int flag)
Definition: adpcmenc.c:533
AVPacket
This structure stores compressed data.
Definition: packet.h:342
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:411
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:353
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
adpcm_ima_qt_compress_sample
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:253
bytestream.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:82
put_bits.h
ADPCMChannelStatus
Definition: adpcm.h:31
adpcm_ms_compress_sample
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:290
adpcm_encode_close
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
Definition: adpcmenc.c:209