FFmpeg
apedec.c
Go to the documentation of this file.
1 /*
2  * Monkey's Audio lossless audio decoder
3  * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
4  * based upon libdemac from Dave Chapman.
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <inttypes.h>
24 
25 #include "libavutil/avassert.h"
27 #include "libavutil/crc.h"
28 #include "libavutil/opt.h"
29 #include "lossless_audiodsp.h"
30 #include "avcodec.h"
31 #include "bswapdsp.h"
32 #include "bytestream.h"
33 #include "internal.h"
34 #include "get_bits.h"
35 #include "unary.h"
36 
37 /**
38  * @file
39  * Monkey's Audio lossless audio decoder
40  */
41 
42 #define MAX_CHANNELS 2
43 #define MAX_BYTESPERSAMPLE 3
44 
45 #define APE_FRAMECODE_MONO_SILENCE 1
46 #define APE_FRAMECODE_STEREO_SILENCE 3
47 #define APE_FRAMECODE_PSEUDO_STEREO 4
48 
49 #define HISTORY_SIZE 512
50 #define PREDICTOR_ORDER 8
51 /** Total size of all predictor histories */
52 #define PREDICTOR_SIZE 50
53 
54 #define YDELAYA (18 + PREDICTOR_ORDER*4)
55 #define YDELAYB (18 + PREDICTOR_ORDER*3)
56 #define XDELAYA (18 + PREDICTOR_ORDER*2)
57 #define XDELAYB (18 + PREDICTOR_ORDER)
58 
59 #define YADAPTCOEFFSA 18
60 #define XADAPTCOEFFSA 14
61 #define YADAPTCOEFFSB 10
62 #define XADAPTCOEFFSB 5
63 
64 /**
65  * Possible compression levels
66  * @{
67  */
74 };
75 /** @} */
76 
77 #define APE_FILTER_LEVELS 3
78 
79 /** Filter orders depending on compression level */
80 static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
81  { 0, 0, 0 },
82  { 16, 0, 0 },
83  { 64, 0, 0 },
84  { 32, 256, 0 },
85  { 16, 256, 1280 }
86 };
87 
88 /** Filter fraction bits depending on compression level */
90  { 0, 0, 0 },
91  { 11, 0, 0 },
92  { 11, 0, 0 },
93  { 10, 13, 0 },
94  { 11, 13, 15 }
95 };
96 
97 
98 /** Filters applied to the decoded data */
99 typedef struct APEFilter {
100  int16_t *coeffs; ///< actual coefficients used in filtering
101  int16_t *adaptcoeffs; ///< adaptive filter coefficients used for correcting of actual filter coefficients
102  int16_t *historybuffer; ///< filter memory
103  int16_t *delay; ///< filtered values
104 
105  int avg;
106 } APEFilter;
107 
108 typedef struct APERice {
109  uint32_t k;
110  uint32_t ksum;
111 } APERice;
112 
113 typedef struct APERangecoder {
114  uint32_t low; ///< low end of interval
115  uint32_t range; ///< length of interval
116  uint32_t help; ///< bytes_to_follow resp. intermediate value
117  unsigned int buffer; ///< buffer for input/output
118 } APERangecoder;
119 
120 /** Filter histories */
121 typedef struct APEPredictor {
123 
124  int32_t lastA[2];
125 
126  int32_t filterA[2];
127  int32_t filterB[2];
128 
129  uint32_t coeffsA[2][4]; ///< adaption coefficients
130  uint32_t coeffsB[2][5]; ///< adaption coefficients
132 
133  unsigned int sample_pos;
134 } APEPredictor;
135 
136 /** Decoder context */
137 typedef struct APEContext {
138  AVClass *class; ///< class for AVOptions
142  int channels;
143  int samples; ///< samples left to decode in current frame
144  int bps;
145 
146  int fileversion; ///< codec version, very important in decoding process
147  int compression_level; ///< compression levels
148  int fset; ///< which filter set to use (calculated from compression level)
149  int flags; ///< global decoder flags
150 
151  uint32_t CRC; ///< signalled frame CRC
152  uint32_t CRC_state; ///< accumulated CRC
153  int frameflags; ///< frame flags
154  APEPredictor predictor; ///< predictor used for final reconstruction
155 
158  int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel
159  int blocks_per_loop; ///< maximum number of samples to decode for each call
160 
161  int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory
162 
163  APERangecoder rc; ///< rangecoder used to decode actual values
164  APERice riceX; ///< rice code parameters for the second channel
165  APERice riceY; ///< rice code parameters for the first channel
166  APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction
168 
169  uint8_t *data; ///< current frame data
170  uint8_t *data_end; ///< frame data end
171  int data_size; ///< frame data allocated size
172  const uint8_t *ptr; ///< current position in frame data
173 
174  int error;
175 
176  void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode);
177  void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode);
180 } APEContext;
181 
182 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
183  int32_t *decoded1, int count);
184 
185 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode);
186 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode);
187 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode);
188 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode);
189 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode);
190 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode);
191 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode);
192 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode);
193 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode);
194 
201 
203 {
204  APEContext *s = avctx->priv_data;
205  int i;
206 
207  for (i = 0; i < APE_FILTER_LEVELS; i++)
208  av_freep(&s->filterbuf[i]);
209 
211  av_freep(&s->data);
212  s->decoded_size = s->data_size = 0;
213 
214  return 0;
215 }
216 
218 {
219  APEContext *s = avctx->priv_data;
220  int i;
221 
222  if (avctx->extradata_size != 6) {
223  av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
224  return AVERROR(EINVAL);
225  }
226  if (avctx->channels > 2) {
227  av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
228  return AVERROR(EINVAL);
229  }
230  s->bps = avctx->bits_per_coded_sample;
231  switch (s->bps) {
232  case 8:
233  avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
234  break;
235  case 16:
237  break;
238  case 24:
240  break;
241  default:
242  avpriv_request_sample(avctx,
243  "%d bits per coded sample", s->bps);
244  return AVERROR_PATCHWELCOME;
245  }
246  s->avctx = avctx;
247  s->channels = avctx->channels;
248  s->fileversion = AV_RL16(avctx->extradata);
249  s->compression_level = AV_RL16(avctx->extradata + 2);
250  s->flags = AV_RL16(avctx->extradata + 4);
251 
252  av_log(avctx, AV_LOG_VERBOSE, "Compression Level: %d - Flags: %d\n",
253  s->compression_level, s->flags);
255  !s->compression_level ||
257  av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
258  s->compression_level);
259  return AVERROR_INVALIDDATA;
260  }
261  s->fset = s->compression_level / 1000 - 1;
262  for (i = 0; i < APE_FILTER_LEVELS; i++) {
263  if (!ape_filter_orders[s->fset][i])
264  break;
265  if (!(s->filterbuf[i] = av_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4)))
266  return AVERROR(ENOMEM);
267  }
268 
269  if (s->fileversion < 3860) {
272  } else if (s->fileversion < 3900) {
275  } else if (s->fileversion < 3930) {
278  } else if (s->fileversion < 3990) {
281  } else {
284  }
285 
286  if (s->fileversion < 3930) {
289  } else if (s->fileversion < 3950) {
292  } else {
295  }
296 
297  ff_bswapdsp_init(&s->bdsp);
298  ff_llauddsp_init(&s->adsp);
300 
301  return 0;
302 }
303 
304 /**
305  * @name APE range decoding functions
306  * @{
307  */
308 
309 #define CODE_BITS 32
310 #define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
311 #define SHIFT_BITS (CODE_BITS - 9)
312 #define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
313 #define BOTTOM_VALUE (TOP_VALUE >> 8)
314 
315 /** Start the decoder */
316 static inline void range_start_decoding(APEContext *ctx)
317 {
318  ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
319  ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
320  ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
321 }
322 
323 /** Perform normalization */
324 static inline void range_dec_normalize(APEContext *ctx)
325 {
326  while (ctx->rc.range <= BOTTOM_VALUE) {
327  ctx->rc.buffer <<= 8;
328  if(ctx->ptr < ctx->data_end) {
329  ctx->rc.buffer += *ctx->ptr;
330  ctx->ptr++;
331  } else {
332  ctx->error = 1;
333  }
334  ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
335  ctx->rc.range <<= 8;
336  }
337 }
338 
339 /**
340  * Calculate cumulative frequency for next symbol. Does NO update!
341  * @param ctx decoder context
342  * @param tot_f is the total frequency or (code_value)1<<shift
343  * @return the cumulative frequency
344  */
345 static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
346 {
347  range_dec_normalize(ctx);
348  ctx->rc.help = ctx->rc.range / tot_f;
349  return ctx->rc.low / ctx->rc.help;
350 }
351 
352 /**
353  * Decode value with given size in bits
354  * @param ctx decoder context
355  * @param shift number of bits to decode
356  */
357 static inline int range_decode_culshift(APEContext *ctx, int shift)
358 {
359  range_dec_normalize(ctx);
360  ctx->rc.help = ctx->rc.range >> shift;
361  return ctx->rc.low / ctx->rc.help;
362 }
363 
364 
365 /**
366  * Update decoding state
367  * @param ctx decoder context
368  * @param sy_f the interval length (frequency of the symbol)
369  * @param lt_f the lower end (frequency sum of < symbols)
370  */
371 static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
372 {
373  ctx->rc.low -= ctx->rc.help * lt_f;
374  ctx->rc.range = ctx->rc.help * sy_f;
375 }
376 
377 /** Decode n bits (n <= 16) without modelling */
378 static inline int range_decode_bits(APEContext *ctx, int n)
379 {
380  int sym = range_decode_culshift(ctx, n);
381  range_decode_update(ctx, 1, sym);
382  return sym;
383 }
384 
385 
386 #define MODEL_ELEMENTS 64
387 
388 /**
389  * Fixed probabilities for symbols in Monkey Audio version 3.97
390  */
391 static const uint16_t counts_3970[22] = {
392  0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
393  62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
394  65450, 65469, 65480, 65487, 65491, 65493,
395 };
396 
397 /**
398  * Probability ranges for symbols in Monkey Audio version 3.97
399  */
400 static const uint16_t counts_diff_3970[21] = {
401  14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
402  1104, 677, 415, 248, 150, 89, 54, 31,
403  19, 11, 7, 4, 2,
404 };
405 
406 /**
407  * Fixed probabilities for symbols in Monkey Audio version 3.98
408  */
409 static const uint16_t counts_3980[22] = {
410  0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
411  64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
412  65485, 65488, 65490, 65491, 65492, 65493,
413 };
414 
415 /**
416  * Probability ranges for symbols in Monkey Audio version 3.98
417  */
418 static const uint16_t counts_diff_3980[21] = {
419  19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
420  261, 119, 65, 31, 19, 10, 6, 3,
421  3, 2, 1, 1, 1,
422 };
423 
424 /**
425  * Decode symbol
426  * @param ctx decoder context
427  * @param counts probability range start position
428  * @param counts_diff probability range widths
429  */
430 static inline int range_get_symbol(APEContext *ctx,
431  const uint16_t counts[],
432  const uint16_t counts_diff[])
433 {
434  int symbol, cf;
435 
436  cf = range_decode_culshift(ctx, 16);
437 
438  if(cf > 65492){
439  symbol= cf - 65535 + 63;
440  range_decode_update(ctx, 1, cf);
441  if(cf > 65535)
442  ctx->error=1;
443  return symbol;
444  }
445  /* figure out the symbol inefficiently; a binary search would be much better */
446  for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
447 
448  range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
449 
450  return symbol;
451 }
452 /** @} */ // group rangecoder
453 
454 static inline void update_rice(APERice *rice, unsigned int x)
455 {
456  int lim = rice->k ? (1 << (rice->k + 4)) : 0;
457  rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
458 
459  if (rice->ksum < lim)
460  rice->k--;
461  else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
462  rice->k++;
463 }
464 
465 static inline int get_rice_ook(GetBitContext *gb, int k)
466 {
467  unsigned int x;
468 
469  x = get_unary(gb, 1, get_bits_left(gb));
470 
471  if (k)
472  x = (x << k) | get_bits(gb, k);
473 
474  return x;
475 }
476 
478  APERice *rice)
479 {
480  unsigned int x, overflow;
481 
482  overflow = get_unary(gb, 1, get_bits_left(gb));
483 
484  if (ctx->fileversion > 3880) {
485  while (overflow >= 16) {
486  overflow -= 16;
487  rice->k += 4;
488  }
489  }
490 
491  if (!rice->k)
492  x = overflow;
493  else if(rice->k <= MIN_CACHE_BITS) {
494  x = (overflow << rice->k) + get_bits(gb, rice->k);
495  } else {
496  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %"PRIu32"\n", rice->k);
497  ctx->error = 1;
498  return AVERROR_INVALIDDATA;
499  }
500  rice->ksum += x - (rice->ksum + 8 >> 4);
501  if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0))
502  rice->k--;
503  else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
504  rice->k++;
505 
506  /* Convert to signed */
507  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
508 }
509 
510 static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice)
511 {
512  unsigned int x, overflow;
513  int tmpk;
514 
516 
517  if (overflow == (MODEL_ELEMENTS - 1)) {
518  tmpk = range_decode_bits(ctx, 5);
519  overflow = 0;
520  } else
521  tmpk = (rice->k < 1) ? 0 : rice->k - 1;
522 
523  if (tmpk <= 16 || ctx->fileversion < 3910) {
524  if (tmpk > 23) {
525  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
526  return AVERROR_INVALIDDATA;
527  }
528  x = range_decode_bits(ctx, tmpk);
529  } else if (tmpk <= 31) {
530  x = range_decode_bits(ctx, 16);
531  x |= (range_decode_bits(ctx, tmpk - 16) << 16);
532  } else {
533  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
534  return AVERROR_INVALIDDATA;
535  }
536  x += overflow << tmpk;
537 
538  update_rice(rice, x);
539 
540  /* Convert to signed */
541  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
542 }
543 
544 static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice)
545 {
546  unsigned int x, overflow;
547  int base, pivot;
548 
549  pivot = rice->ksum >> 5;
550  if (pivot == 0)
551  pivot = 1;
552 
554 
555  if (overflow == (MODEL_ELEMENTS - 1)) {
556  overflow = (unsigned)range_decode_bits(ctx, 16) << 16;
557  overflow |= range_decode_bits(ctx, 16);
558  }
559 
560  if (pivot < 0x10000) {
561  base = range_decode_culfreq(ctx, pivot);
562  range_decode_update(ctx, 1, base);
563  } else {
564  int base_hi = pivot, base_lo;
565  int bbits = 0;
566 
567  while (base_hi & ~0xFFFF) {
568  base_hi >>= 1;
569  bbits++;
570  }
571  base_hi = range_decode_culfreq(ctx, base_hi + 1);
572  range_decode_update(ctx, 1, base_hi);
573  base_lo = range_decode_culfreq(ctx, 1 << bbits);
574  range_decode_update(ctx, 1, base_lo);
575 
576  base = (base_hi << bbits) + base_lo;
577  }
578 
579  x = base + overflow * pivot;
580 
581  update_rice(rice, x);
582 
583  /* Convert to signed */
584  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
585 }
586 
587 static int get_k(int ksum)
588 {
589  return av_log2(ksum) + !!ksum;
590 }
591 
593  int32_t *out, APERice *rice, int blockstodecode)
594 {
595  int i;
596  unsigned ksummax, ksummin;
597 
598  rice->ksum = 0;
599  for (i = 0; i < FFMIN(blockstodecode, 5); i++) {
600  out[i] = get_rice_ook(&ctx->gb, 10);
601  rice->ksum += out[i];
602  }
603 
604  if (blockstodecode <= 5)
605  goto end;
606 
607  rice->k = get_k(rice->ksum / 10);
608  if (rice->k >= 24)
609  return;
610  for (; i < FFMIN(blockstodecode, 64); i++) {
611  out[i] = get_rice_ook(&ctx->gb, rice->k);
612  rice->ksum += out[i];
613  rice->k = get_k(rice->ksum / ((i + 1) * 2));
614  if (rice->k >= 24)
615  return;
616  }
617 
618  if (blockstodecode <= 64)
619  goto end;
620 
621  rice->k = get_k(rice->ksum >> 7);
622  ksummax = 1 << rice->k + 7;
623  ksummin = rice->k ? (1 << rice->k + 6) : 0;
624  for (; i < blockstodecode; i++) {
625  if (get_bits_left(&ctx->gb) < 1) {
626  ctx->error = 1;
627  return;
628  }
629  out[i] = get_rice_ook(&ctx->gb, rice->k);
630  rice->ksum += out[i] - (unsigned)out[i - 64];
631  while (rice->ksum < ksummin) {
632  rice->k--;
633  ksummin = rice->k ? ksummin >> 1 : 0;
634  ksummax >>= 1;
635  }
636  while (rice->ksum >= ksummax) {
637  rice->k++;
638  if (rice->k > 24)
639  return;
640  ksummax <<= 1;
641  ksummin = ksummin ? ksummin << 1 : 128;
642  }
643  }
644 
645 end:
646  for (i = 0; i < blockstodecode; i++)
647  out[i] = ((out[i] >> 1) ^ ((out[i] & 1) - 1)) + 1;
648 }
649 
650 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
651 {
652  decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
653  blockstodecode);
654 }
655 
656 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
657 {
658  decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
659  blockstodecode);
660  decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX,
661  blockstodecode);
662 }
663 
664 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
665 {
666  int32_t *decoded0 = ctx->decoded[0];
667 
668  while (blockstodecode--)
669  *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
670 }
671 
672 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
673 {
674  int32_t *decoded0 = ctx->decoded[0];
675  int32_t *decoded1 = ctx->decoded[1];
676  int blocks = blockstodecode;
677 
678  while (blockstodecode--)
679  *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
680  while (blocks--)
681  *decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX);
682 }
683 
684 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
685 {
686  int32_t *decoded0 = ctx->decoded[0];
687 
688  while (blockstodecode--)
689  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
690 }
691 
692 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
693 {
694  int32_t *decoded0 = ctx->decoded[0];
695  int32_t *decoded1 = ctx->decoded[1];
696  int blocks = blockstodecode;
697 
698  while (blockstodecode--)
699  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
700  range_dec_normalize(ctx);
701  // because of some implementation peculiarities we need to backpedal here
702  ctx->ptr -= 1;
704  while (blocks--)
705  *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
706 }
707 
708 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
709 {
710  int32_t *decoded0 = ctx->decoded[0];
711  int32_t *decoded1 = ctx->decoded[1];
712 
713  while (blockstodecode--) {
714  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
715  *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
716  }
717 }
718 
719 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
720 {
721  int32_t *decoded0 = ctx->decoded[0];
722 
723  while (blockstodecode--)
724  *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
725 }
726 
727 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
728 {
729  int32_t *decoded0 = ctx->decoded[0];
730  int32_t *decoded1 = ctx->decoded[1];
731 
732  while (blockstodecode--) {
733  *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
734  *decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX);
735  }
736 }
737 
739 {
740  /* Read the CRC */
741  if (ctx->fileversion >= 3900) {
742  if (ctx->data_end - ctx->ptr < 6)
743  return AVERROR_INVALIDDATA;
744  ctx->CRC = bytestream_get_be32(&ctx->ptr);
745  } else {
746  ctx->CRC = get_bits_long(&ctx->gb, 32);
747  }
748 
749  /* Read the frame flags if they exist */
750  ctx->frameflags = 0;
751  ctx->CRC_state = UINT32_MAX;
752  if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
753  ctx->CRC &= ~0x80000000;
754 
755  if (ctx->data_end - ctx->ptr < 6)
756  return AVERROR_INVALIDDATA;
757  ctx->frameflags = bytestream_get_be32(&ctx->ptr);
758  }
759 
760  /* Initialize the rice structs */
761  ctx->riceX.k = 10;
762  ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
763  ctx->riceY.k = 10;
764  ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
765 
766  if (ctx->fileversion >= 3900) {
767  /* The first 8 bits of input are ignored. */
768  ctx->ptr++;
769 
771  }
772 
773  return 0;
774 }
775 
777  375,
778 };
779 
780 static const int32_t initial_coeffs_a_3800[3] = {
781  64, 115, 64,
782 };
783 
784 static const int32_t initial_coeffs_b_3800[2] = {
785  740, 0
786 };
787 
788 static const int32_t initial_coeffs_3930[4] = {
789  360, 317, -109, 98
790 };
791 
793 {
794  APEPredictor *p = &ctx->predictor;
795 
796  /* Zero the history buffers */
797  memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer));
798  p->buf = p->historybuffer;
799 
800  /* Initialize and zero the coefficients */
801  if (ctx->fileversion < 3930) {
803  memcpy(p->coeffsA[0], initial_coeffs_fast_3320,
804  sizeof(initial_coeffs_fast_3320));
805  memcpy(p->coeffsA[1], initial_coeffs_fast_3320,
806  sizeof(initial_coeffs_fast_3320));
807  } else {
808  memcpy(p->coeffsA[0], initial_coeffs_a_3800,
809  sizeof(initial_coeffs_a_3800));
810  memcpy(p->coeffsA[1], initial_coeffs_a_3800,
811  sizeof(initial_coeffs_a_3800));
812  }
813  } else {
814  memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930));
815  memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930));
816  }
817  memset(p->coeffsB, 0, sizeof(p->coeffsB));
818  if (ctx->fileversion < 3930) {
819  memcpy(p->coeffsB[0], initial_coeffs_b_3800,
820  sizeof(initial_coeffs_b_3800));
821  memcpy(p->coeffsB[1], initial_coeffs_b_3800,
822  sizeof(initial_coeffs_b_3800));
823  }
824 
825  p->filterA[0] = p->filterA[1] = 0;
826  p->filterB[0] = p->filterB[1] = 0;
827  p->lastA[0] = p->lastA[1] = 0;
828 
829  p->sample_pos = 0;
830 }
831 
832 /** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */
833 static inline int APESIGN(int32_t x) {
834  return (x < 0) - (x > 0);
835 }
836 
838  const int decoded, const int filter,
839  const int delayA)
840 {
841  int32_t predictionA;
842 
843  p->buf[delayA] = p->lastA[filter];
844  if (p->sample_pos < 3) {
845  p->lastA[filter] = decoded;
846  p->filterA[filter] = decoded;
847  return decoded;
848  }
849 
850  predictionA = p->buf[delayA] * 2U - p->buf[delayA - 1];
851  p->lastA[filter] = decoded + ((int32_t)(predictionA * p->coeffsA[filter][0]) >> 9);
852 
853  if ((decoded ^ predictionA) > 0)
854  p->coeffsA[filter][0]++;
855  else
856  p->coeffsA[filter][0]--;
857 
858  p->filterA[filter] += (unsigned)p->lastA[filter];
859 
860  return p->filterA[filter];
861 }
862 
864  const unsigned decoded, const int filter,
865  const int delayA, const int delayB,
866  const int start, const int shift)
867 {
868  int32_t predictionA, predictionB, sign;
869  int32_t d0, d1, d2, d3, d4;
870 
871  p->buf[delayA] = p->lastA[filter];
872  p->buf[delayB] = p->filterB[filter];
873  if (p->sample_pos < start) {
874  predictionA = decoded + p->filterA[filter];
875  p->lastA[filter] = decoded;
876  p->filterB[filter] = decoded;
877  p->filterA[filter] = predictionA;
878  return predictionA;
879  }
880  d2 = p->buf[delayA];
881  d1 = (p->buf[delayA] - p->buf[delayA - 1]) * 2U;
882  d0 = p->buf[delayA] + ((p->buf[delayA - 2] - p->buf[delayA - 1]) * 8U);
883  d3 = p->buf[delayB] * 2U - p->buf[delayB - 1];
884  d4 = p->buf[delayB];
885 
886  predictionA = d0 * p->coeffsA[filter][0] +
887  d1 * p->coeffsA[filter][1] +
888  d2 * p->coeffsA[filter][2];
889 
890  sign = APESIGN(decoded);
891  p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign;
892  p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign;
893  p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign;
894 
895  predictionB = d3 * p->coeffsB[filter][0] -
896  d4 * p->coeffsB[filter][1];
897  p->lastA[filter] = decoded + (predictionA >> 11);
898  sign = APESIGN(p->lastA[filter]);
899  p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign;
900  p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign;
901 
902  p->filterB[filter] = p->lastA[filter] + (predictionB >> shift);
903  p->filterA[filter] = p->filterB[filter] + (unsigned)((int)(p->filterA[filter] * 31U) >> 5);
904 
905  return p->filterA[filter];
906 }
907 
908 static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length)
909 {
910  int i, j;
911  int32_t dotprod, sign;
912  int32_t coeffs[256], delay[256];
913 
914  if (order >= length)
915  return;
916 
917  memset(coeffs, 0, order * sizeof(*coeffs));
918  for (i = 0; i < order; i++)
919  delay[i] = buffer[i];
920  for (i = order; i < length; i++) {
921  dotprod = 0;
922  sign = APESIGN(buffer[i]);
923  for (j = 0; j < order; j++) {
924  dotprod += delay[j] * (unsigned)coeffs[j];
925  coeffs[j] += ((delay[j] >> 31) | 1) * sign;
926  }
927  buffer[i] -= dotprod >> shift;
928  for (j = 0; j < order - 1; j++)
929  delay[j] = delay[j + 1];
930  delay[order - 1] = buffer[i];
931  }
932 }
933 
935 {
936  int i, j;
937  int32_t dotprod, sign;
938  int32_t delay[8] = { 0 };
939  uint32_t coeffs[8] = { 0 };
940 
941  for (i = 0; i < length; i++) {
942  dotprod = 0;
943  sign = APESIGN(buffer[i]);
944  for (j = 7; j >= 0; j--) {
945  dotprod += delay[j] * coeffs[j];
946  coeffs[j] += ((delay[j] >> 31) | 1) * sign;
947  }
948  for (j = 7; j > 0; j--)
949  delay[j] = delay[j - 1];
950  delay[0] = buffer[i];
951  buffer[i] -= dotprod >> 9;
952  }
953 }
954 
956 {
957  APEPredictor *p = &ctx->predictor;
958  int32_t *decoded0 = ctx->decoded[0];
959  int32_t *decoded1 = ctx->decoded[1];
960  int start = 4, shift = 10;
961 
963  start = 16;
964  long_filter_high_3800(decoded0, 16, 9, count);
965  long_filter_high_3800(decoded1, 16, 9, count);
966  } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
967  int order = 128, shift2 = 11;
968 
969  if (ctx->fileversion >= 3830) {
970  order <<= 1;
971  shift++;
972  shift2++;
973  long_filter_ehigh_3830(decoded0 + order, count - order);
974  long_filter_ehigh_3830(decoded1 + order, count - order);
975  }
976  start = order;
977  long_filter_high_3800(decoded0, order, shift2, count);
978  long_filter_high_3800(decoded1, order, shift2, count);
979  }
980 
981  while (count--) {
982  int X = *decoded0, Y = *decoded1;
984  *decoded0 = filter_fast_3320(p, Y, 0, YDELAYA);
985  decoded0++;
986  *decoded1 = filter_fast_3320(p, X, 1, XDELAYA);
987  decoded1++;
988  } else {
989  *decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB,
990  start, shift);
991  decoded0++;
992  *decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB,
993  start, shift);
994  decoded1++;
995  }
996 
997  /* Combined */
998  p->buf++;
999  p->sample_pos++;
1000 
1001  /* Have we filled the history buffer? */
1002  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1003  memmove(p->historybuffer, p->buf,
1004  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1005  p->buf = p->historybuffer;
1006  }
1007  }
1008 }
1009 
1011 {
1012  APEPredictor *p = &ctx->predictor;
1013  int32_t *decoded0 = ctx->decoded[0];
1014  int start = 4, shift = 10;
1015 
1017  start = 16;
1018  long_filter_high_3800(decoded0, 16, 9, count);
1019  } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
1020  int order = 128, shift2 = 11;
1021 
1022  if (ctx->fileversion >= 3830) {
1023  order <<= 1;
1024  shift++;
1025  shift2++;
1026  long_filter_ehigh_3830(decoded0 + order, count - order);
1027  }
1028  start = order;
1029  long_filter_high_3800(decoded0, order, shift2, count);
1030  }
1031 
1032  while (count--) {
1034  *decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA);
1035  decoded0++;
1036  } else {
1037  *decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB,
1038  start, shift);
1039  decoded0++;
1040  }
1041 
1042  /* Combined */
1043  p->buf++;
1044  p->sample_pos++;
1045 
1046  /* Have we filled the history buffer? */
1047  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1048  memmove(p->historybuffer, p->buf,
1049  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1050  p->buf = p->historybuffer;
1051  }
1052  }
1053 }
1054 
1056  const int decoded, const int filter,
1057  const int delayA)
1058 {
1059  int32_t predictionA, sign;
1060  int32_t d0, d1, d2, d3;
1061 
1062  p->buf[delayA] = p->lastA[filter];
1063  d0 = p->buf[delayA ];
1064  d1 = p->buf[delayA ] - p->buf[delayA - 1];
1065  d2 = p->buf[delayA - 1] - p->buf[delayA - 2];
1066  d3 = p->buf[delayA - 2] - p->buf[delayA - 3];
1067 
1068  predictionA = d0 * p->coeffsA[filter][0] +
1069  d1 * p->coeffsA[filter][1] +
1070  d2 * p->coeffsA[filter][2] +
1071  d3 * p->coeffsA[filter][3];
1072 
1073  p->lastA[filter] = decoded + (predictionA >> 9);
1074  p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5);
1075 
1076  sign = APESIGN(decoded);
1077  p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign;
1078  p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign;
1079  p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign;
1080  p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign;
1081 
1082  return p->filterA[filter];
1083 }
1084 
1086 {
1087  APEPredictor *p = &ctx->predictor;
1088  int32_t *decoded0 = ctx->decoded[0];
1089  int32_t *decoded1 = ctx->decoded[1];
1090 
1091  ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1092 
1093  while (count--) {
1094  /* Predictor Y */
1095  int Y = *decoded1, X = *decoded0;
1096  *decoded0 = predictor_update_3930(p, Y, 0, YDELAYA);
1097  decoded0++;
1098  *decoded1 = predictor_update_3930(p, X, 1, XDELAYA);
1099  decoded1++;
1100 
1101  /* Combined */
1102  p->buf++;
1103 
1104  /* Have we filled the history buffer? */
1105  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1106  memmove(p->historybuffer, p->buf,
1107  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1108  p->buf = p->historybuffer;
1109  }
1110  }
1111 }
1112 
1114 {
1115  APEPredictor *p = &ctx->predictor;
1116  int32_t *decoded0 = ctx->decoded[0];
1117 
1118  ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1119 
1120  while (count--) {
1121  *decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA);
1122  decoded0++;
1123 
1124  p->buf++;
1125 
1126  /* Have we filled the history buffer? */
1127  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1128  memmove(p->historybuffer, p->buf,
1129  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1130  p->buf = p->historybuffer;
1131  }
1132  }
1133 }
1134 
1136  const int decoded, const int filter,
1137  const int delayA, const int delayB,
1138  const int adaptA, const int adaptB)
1139 {
1140  int32_t predictionA, predictionB, sign;
1141 
1142  p->buf[delayA] = p->lastA[filter];
1143  p->buf[adaptA] = APESIGN(p->buf[delayA]);
1144  p->buf[delayA - 1] = p->buf[delayA] - (unsigned)p->buf[delayA - 1];
1145  p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
1146 
1147  predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
1148  p->buf[delayA - 1] * p->coeffsA[filter][1] +
1149  p->buf[delayA - 2] * p->coeffsA[filter][2] +
1150  p->buf[delayA - 3] * p->coeffsA[filter][3];
1151 
1152  /* Apply a scaled first-order filter compression */
1153  p->buf[delayB] = p->filterA[filter ^ 1] - ((int)(p->filterB[filter] * 31U) >> 5);
1154  p->buf[adaptB] = APESIGN(p->buf[delayB]);
1155  p->buf[delayB - 1] = p->buf[delayB] - (unsigned)p->buf[delayB - 1];
1156  p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
1157  p->filterB[filter] = p->filterA[filter ^ 1];
1158 
1159  predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
1160  p->buf[delayB - 1] * p->coeffsB[filter][1] +
1161  p->buf[delayB - 2] * p->coeffsB[filter][2] +
1162  p->buf[delayB - 3] * p->coeffsB[filter][3] +
1163  p->buf[delayB - 4] * p->coeffsB[filter][4];
1164 
1165  p->lastA[filter] = decoded + ((int)((unsigned)predictionA + (predictionB >> 1)) >> 10);
1166  p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5);
1167 
1168  sign = APESIGN(decoded);
1169  p->coeffsA[filter][0] += p->buf[adaptA ] * sign;
1170  p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign;
1171  p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign;
1172  p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign;
1173  p->coeffsB[filter][0] += p->buf[adaptB ] * sign;
1174  p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign;
1175  p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign;
1176  p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign;
1177  p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign;
1178 
1179  return p->filterA[filter];
1180 }
1181 
1183 {
1184  APEPredictor *p = &ctx->predictor;
1185  int32_t *decoded0 = ctx->decoded[0];
1186  int32_t *decoded1 = ctx->decoded[1];
1187 
1188  ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1189 
1190  while (count--) {
1191  /* Predictor Y */
1192  *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
1194  decoded0++;
1195  *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
1197  decoded1++;
1198 
1199  /* Combined */
1200  p->buf++;
1201 
1202  /* Have we filled the history buffer? */
1203  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1204  memmove(p->historybuffer, p->buf,
1205  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1206  p->buf = p->historybuffer;
1207  }
1208  }
1209 }
1210 
1212 {
1213  APEPredictor *p = &ctx->predictor;
1214  int32_t *decoded0 = ctx->decoded[0];
1215  int32_t predictionA, currentA, A, sign;
1216 
1217  ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1218 
1219  currentA = p->lastA[0];
1220 
1221  while (count--) {
1222  A = *decoded0;
1223 
1224  p->buf[YDELAYA] = currentA;
1225  p->buf[YDELAYA - 1] = p->buf[YDELAYA] - (unsigned)p->buf[YDELAYA - 1];
1226 
1227  predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
1228  p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
1229  p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
1230  p->buf[YDELAYA - 3] * p->coeffsA[0][3];
1231 
1232  currentA = A + (unsigned)(predictionA >> 10);
1233 
1234  p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
1235  p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
1236 
1237  sign = APESIGN(A);
1238  p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign;
1239  p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign;
1240  p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign;
1241  p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign;
1242 
1243  p->buf++;
1244 
1245  /* Have we filled the history buffer? */
1246  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1247  memmove(p->historybuffer, p->buf,
1248  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1249  p->buf = p->historybuffer;
1250  }
1251 
1252  p->filterA[0] = currentA + (unsigned)((int)(p->filterA[0] * 31U) >> 5);
1253  *(decoded0++) = p->filterA[0];
1254  }
1255 
1256  p->lastA[0] = currentA;
1257 }
1258 
1259 static void do_init_filter(APEFilter *f, int16_t *buf, int order)
1260 {
1261  f->coeffs = buf;
1262  f->historybuffer = buf + order;
1263  f->delay = f->historybuffer + order * 2;
1264  f->adaptcoeffs = f->historybuffer + order;
1265 
1266  memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer));
1267  memset(f->coeffs, 0, order * sizeof(*f->coeffs));
1268  f->avg = 0;
1269 }
1270 
1271 static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
1272 {
1273  do_init_filter(&f[0], buf, order);
1274  do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
1275 }
1276 
1278  int32_t *data, int count, int order, int fracbits)
1279 {
1280  int res;
1281  int absres;
1282 
1283  while (count--) {
1284  /* round fixedpoint scalar product */
1286  f->delay - order,
1287  f->adaptcoeffs - order,
1288  order, APESIGN(*data));
1289  res = (int)(res + (1U << (fracbits - 1))) >> fracbits;
1290  res += (unsigned)*data;
1291  *data++ = res;
1292 
1293  /* Update the output history */
1294  *f->delay++ = av_clip_int16(res);
1295 
1296  if (version < 3980) {
1297  /* Version ??? to < 3.98 files (untested) */
1298  f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
1299  f->adaptcoeffs[-4] >>= 1;
1300  f->adaptcoeffs[-8] >>= 1;
1301  } else {
1302  /* Version 3.98 and later files */
1303 
1304  /* Update the adaption coefficients */
1305  absres = res < 0 ? -(unsigned)res : res;
1306  if (absres)
1307  *f->adaptcoeffs = APESIGN(res) *
1308  (8 << ((absres > f->avg * 3) + (absres > f->avg * 4 / 3)));
1309  /* equivalent to the following code
1310  if (absres <= f->avg * 4 / 3)
1311  *f->adaptcoeffs = APESIGN(res) * 8;
1312  else if (absres <= f->avg * 3)
1313  *f->adaptcoeffs = APESIGN(res) * 16;
1314  else
1315  *f->adaptcoeffs = APESIGN(res) * 32;
1316  */
1317  else
1318  *f->adaptcoeffs = 0;
1319 
1320  f->avg += (int)(absres - (unsigned)f->avg) / 16;
1321 
1322  f->adaptcoeffs[-1] >>= 1;
1323  f->adaptcoeffs[-2] >>= 1;
1324  f->adaptcoeffs[-8] >>= 1;
1325  }
1326 
1327  f->adaptcoeffs++;
1328 
1329  /* Have we filled the history buffer? */
1330  if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
1331  memmove(f->historybuffer, f->delay - (order * 2),
1332  (order * 2) * sizeof(*f->historybuffer));
1333  f->delay = f->historybuffer + order * 2;
1334  f->adaptcoeffs = f->historybuffer + order;
1335  }
1336  }
1337 }
1338 
1340  int32_t *data0, int32_t *data1,
1341  int count, int order, int fracbits)
1342 {
1343  do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
1344  if (data1)
1345  do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
1346 }
1347 
1348 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
1349  int32_t *decoded1, int count)
1350 {
1351  int i;
1352 
1353  for (i = 0; i < APE_FILTER_LEVELS; i++) {
1354  if (!ape_filter_orders[ctx->fset][i])
1355  break;
1356  apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
1357  ape_filter_orders[ctx->fset][i],
1358  ape_filter_fracbits[ctx->fset][i]);
1359  }
1360 }
1361 
1363 {
1364  int i, ret;
1365  if ((ret = init_entropy_decoder(ctx)) < 0)
1366  return ret;
1368 
1369  for (i = 0; i < APE_FILTER_LEVELS; i++) {
1370  if (!ape_filter_orders[ctx->fset][i])
1371  break;
1372  init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
1373  ape_filter_orders[ctx->fset][i]);
1374  }
1375  return 0;
1376 }
1377 
1379 {
1381  /* We are pure silence, so we're done. */
1382  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
1383  return;
1384  }
1385 
1386  ctx->entropy_decode_mono(ctx, count);
1387  if (ctx->error)
1388  return;
1389 
1390  /* Now apply the predictor decoding */
1391  ctx->predictor_decode_mono(ctx, count);
1392 
1393  /* Pseudo-stereo - just copy left channel to right channel */
1394  if (ctx->channels == 2) {
1395  memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1]));
1396  }
1397 }
1398 
1400 {
1401  unsigned left, right;
1402  int32_t *decoded0 = ctx->decoded[0];
1403  int32_t *decoded1 = ctx->decoded[1];
1404 
1406  /* We are pure silence, so we're done. */
1407  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
1408  return;
1409  }
1410 
1411  ctx->entropy_decode_stereo(ctx, count);
1412  if (ctx->error)
1413  return;
1414 
1415  /* Now apply the predictor decoding */
1416  ctx->predictor_decode_stereo(ctx, count);
1417 
1418  /* Decorrelate and scale to output depth */
1419  while (count--) {
1420  left = *decoded1 - (unsigned)(*decoded0 / 2);
1421  right = left + *decoded0;
1422 
1423  *(decoded0++) = left;
1424  *(decoded1++) = right;
1425  }
1426 }
1427 
1429  int *got_frame_ptr, AVPacket *avpkt)
1430 {
1431  AVFrame *frame = data;
1432  const uint8_t *buf = avpkt->data;
1433  APEContext *s = avctx->priv_data;
1434  uint8_t *sample8;
1435  int16_t *sample16;
1436  int32_t *sample24;
1437  int i, ch, ret;
1438  int blockstodecode;
1439  uint64_t decoded_buffer_size;
1440 
1441  /* this should never be negative, but bad things will happen if it is, so
1442  check it just to make sure. */
1443  av_assert0(s->samples >= 0);
1444 
1445  if(!s->samples){
1446  uint32_t nblocks, offset;
1447  int buf_size;
1448 
1449  if (!avpkt->size) {
1450  *got_frame_ptr = 0;
1451  return 0;
1452  }
1453  if (avpkt->size < 8) {
1454  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1455  return AVERROR_INVALIDDATA;
1456  }
1457  buf_size = avpkt->size & ~3;
1458  if (buf_size != avpkt->size) {
1459  av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. "
1460  "extra bytes at the end will be skipped.\n");
1461  }
1462  if (s->fileversion < 3950) // previous versions overread two bytes
1463  buf_size += 2;
1464  av_fast_padded_malloc(&s->data, &s->data_size, buf_size);
1465  if (!s->data)
1466  return AVERROR(ENOMEM);
1467  s->bdsp.bswap_buf((uint32_t *) s->data, (const uint32_t *) buf,
1468  buf_size >> 2);
1469  memset(s->data + (buf_size & ~3), 0, buf_size & 3);
1470  s->ptr = s->data;
1471  s->data_end = s->data + buf_size;
1472 
1473  nblocks = bytestream_get_be32(&s->ptr);
1474  offset = bytestream_get_be32(&s->ptr);
1475  if (s->fileversion >= 3900) {
1476  if (offset > 3) {
1477  av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
1478  av_freep(&s->data);
1479  s->data_size = 0;
1480  return AVERROR_INVALIDDATA;
1481  }
1482  if (s->data_end - s->ptr < offset) {
1483  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1484  return AVERROR_INVALIDDATA;
1485  }
1486  s->ptr += offset;
1487  } else {
1488  if ((ret = init_get_bits8(&s->gb, s->ptr, s->data_end - s->ptr)) < 0)
1489  return ret;
1490  if (s->fileversion > 3800)
1491  skip_bits_long(&s->gb, offset * 8);
1492  else
1493  skip_bits_long(&s->gb, offset);
1494  }
1495 
1496  if (!nblocks || nblocks > INT_MAX / 2 / sizeof(*s->decoded_buffer) - 8) {
1497  av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %"PRIu32".\n",
1498  nblocks);
1499  return AVERROR_INVALIDDATA;
1500  }
1501 
1502  /* Initialize the frame decoder */
1503  if (init_frame_decoder(s) < 0) {
1504  av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
1505  return AVERROR_INVALIDDATA;
1506  }
1507  s->samples = nblocks;
1508  }
1509 
1510  if (!s->data) {
1511  *got_frame_ptr = 0;
1512  return avpkt->size;
1513  }
1514 
1515  blockstodecode = FFMIN(s->blocks_per_loop, s->samples);
1516  // for old files coefficients were not interleaved,
1517  // so we need to decode all of them at once
1518  if (s->fileversion < 3930)
1519  blockstodecode = s->samples;
1520 
1521  /* reallocate decoded sample buffer if needed */
1522  decoded_buffer_size = 2LL * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer);
1523  av_assert0(decoded_buffer_size <= INT_MAX);
1524 
1525  /* get output buffer */
1526  frame->nb_samples = blockstodecode;
1527  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
1528  s->samples=0;
1529  return ret;
1530  }
1531 
1532  av_fast_malloc(&s->decoded_buffer, &s->decoded_size, decoded_buffer_size);
1533  if (!s->decoded_buffer)
1534  return AVERROR(ENOMEM);
1535  memset(s->decoded_buffer, 0, decoded_buffer_size);
1536  s->decoded[0] = s->decoded_buffer;
1537  s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
1538 
1539  s->error=0;
1540 
1541  if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
1542  ape_unpack_mono(s, blockstodecode);
1543  else
1544  ape_unpack_stereo(s, blockstodecode);
1545  emms_c();
1546 
1547  if (s->error) {
1548  s->samples=0;
1549  av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
1550  return AVERROR_INVALIDDATA;
1551  }
1552 
1553  switch (s->bps) {
1554  case 8:
1555  for (ch = 0; ch < s->channels; ch++) {
1556  sample8 = (uint8_t *)frame->data[ch];
1557  for (i = 0; i < blockstodecode; i++)
1558  *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
1559  }
1560  break;
1561  case 16:
1562  for (ch = 0; ch < s->channels; ch++) {
1563  sample16 = (int16_t *)frame->data[ch];
1564  for (i = 0; i < blockstodecode; i++)
1565  *sample16++ = s->decoded[ch][i];
1566  }
1567  break;
1568  case 24:
1569  for (ch = 0; ch < s->channels; ch++) {
1570  sample24 = (int32_t *)frame->data[ch];
1571  for (i = 0; i < blockstodecode; i++)
1572  *sample24++ = s->decoded[ch][i] * 256U;
1573  }
1574  break;
1575  }
1576 
1577  s->samples -= blockstodecode;
1578 
1579  if (avctx->err_recognition & AV_EF_CRCCHECK &&
1580  s->fileversion >= 3900 && s->bps < 24) {
1581  uint32_t crc = s->CRC_state;
1582  const AVCRC *crc_tab = av_crc_get_table(AV_CRC_32_IEEE_LE);
1583  for (i = 0; i < blockstodecode; i++) {
1584  for (ch = 0; ch < s->channels; ch++) {
1585  uint8_t *smp = frame->data[ch] + (i*(s->bps >> 3));
1586  crc = av_crc(crc_tab, crc, smp, s->bps >> 3);
1587  }
1588  }
1589 
1590  if (!s->samples && (~crc >> 1) ^ s->CRC) {
1591  av_log(avctx, AV_LOG_ERROR, "CRC mismatch! Previously decoded "
1592  "frames may have been affected as well.\n");
1593  if (avctx->err_recognition & AV_EF_EXPLODE)
1594  return AVERROR_INVALIDDATA;
1595  }
1596 
1597  s->CRC_state = crc;
1598  }
1599 
1600  *got_frame_ptr = 1;
1601 
1602  return !s->samples ? avpkt->size : 0;
1603 }
1604 
1606 {
1607  APEContext *s = avctx->priv_data;
1608  s->samples= 0;
1609 }
1610 
1611 #define OFFSET(x) offsetof(APEContext, x)
1612 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
1613 static const AVOption options[] = {
1614  { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" },
1615  { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" },
1616  { NULL},
1617 };
1618 
1619 static const AVClass ape_decoder_class = {
1620  .class_name = "APE decoder",
1621  .item_name = av_default_item_name,
1622  .option = options,
1623  .version = LIBAVUTIL_VERSION_INT,
1624 };
1625 
1627  .name = "ape",
1628  .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
1629  .type = AVMEDIA_TYPE_AUDIO,
1630  .id = AV_CODEC_ID_APE,
1631  .priv_data_size = sizeof(APEContext),
1632  .init = ape_decode_init,
1633  .close = ape_decode_close,
1635  .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
1637  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1638  .flush = ape_flush,
1639  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
1643  .priv_class = &ape_decoder_class,
1644 };
static int init_frame_decoder(APEContext *ctx)
Definition: apedec.c:1362
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
static const int32_t initial_coeffs_3930[4]
Definition: apedec.c:788
static void decode_array_0000(APEContext *ctx, GetBitContext *gb, int32_t *out, APERice *rice, int blockstodecode)
Definition: apedec.c:592
int compression_level
compression levels
Definition: apedec.c:147
AVCodec ff_ape_decoder
Definition: apedec.c:1626
#define MODEL_ELEMENTS
Definition: apedec.c:386
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int decoded_size
Definition: apedec.c:157
#define YADAPTCOEFFSB
Definition: apedec.c:61
version
Definition: libkvazaar.c:317
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
static void range_start_decoding(APEContext *ctx)
Start the decoder.
Definition: apedec.c:316
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
#define XDELAYA
Definition: apedec.c:56
static void apply_filter(APEContext *ctx, APEFilter *f, int32_t *data0, int32_t *data1, int count, int order, int fracbits)
Definition: apedec.c:1339
int fileversion
codec version, very important in decoding process
Definition: apedec.c:146
static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
Definition: apedec.c:656
int32_t filterA[2]
Definition: apedec.c:126
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
void(* entropy_decode_mono)(struct APEContext *ctx, int blockstodecode)
Definition: apedec.c:176
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define avpriv_request_sample(...)
void(* entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode)
Definition: apedec.c:177
static int APESIGN(int32_t x)
Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero)
Definition: apedec.c:833
static void update_rice(APERice *rice, unsigned int x)
Definition: apedec.c:454
int size
Definition: packet.h:364
static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
Definition: apedec.c:692
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
static av_cold int ape_decode_init(AVCodecContext *avctx)
Definition: apedec.c:217
unsigned int buffer
buffer for input/output
Definition: apedec.c:117
int av_log2(unsigned v)
Definition: intmath.c:26
static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length)
Definition: apedec.c:908
static int init_entropy_decoder(APEContext *ctx)
Definition: apedec.c:738
static void ape_flush(AVCodecContext *avctx)
Definition: apedec.c:1605
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:72
static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
Definition: apedec.c:708
static int get_k(int ksum)
Definition: apedec.c:587
static av_always_inline int predictor_update_3930(APEPredictor *p, const int decoded, const int filter, const int delayA)
Definition: apedec.c:1055
#define AV_CH_LAYOUT_STEREO
#define OFFSET(x)
Definition: apedec.c:1611
#define XADAPTCOEFFSA
Definition: apedec.c:60
AVCodec.
Definition: codec.h:190
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int16_t * filterbuf[APE_FILTER_LEVELS]
filter memory
Definition: apedec.c:161
static void predictor_decode_mono_3800(APEContext *ctx, int count)
Definition: apedec.c:1010
uint8_t base
Definition: vp3data.h:202
uint32_t CRC_state
accumulated CRC
Definition: apedec.c:152
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int ape_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: apedec.c:1428
Filter histories.
Definition: apedec.c:121
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
int16_t * delay
filtered values
Definition: apedec.c:103
AVOptions.
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
static void do_init_filter(APEFilter *f, int16_t *buf, int order)
Definition: apedec.c:1259
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
static const int32_t initial_coeffs_a_3800[3]
Definition: apedec.c:780
static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
Definition: apedec.c:672
static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
Definition: apedec.c:719
static void ape_unpack_mono(APEContext *ctx, int count)
Definition: apedec.c:1378
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
APERangecoder rc
rangecoder used to decode actual values
Definition: apedec.c:163
#define YDELAYB
Definition: apedec.c:55
Public header for CRC hash function implementation.
static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS]
Filter fraction bits depending on compression level.
Definition: apedec.c:89
uint8_t * data
Definition: packet.h:363
static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, int32_t *decoded1, int count)
Definition: apedec.c:1348
bitstream reader API header.
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1750
Decoder context.
Definition: apedec.c:137
#define A(x)
Definition: vp56_arith.h:28
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static const uint16_t counts_3970[22]
Fixed probabilities for symbols in Monkey Audio version 3.97.
Definition: apedec.c:391
static void range_dec_normalize(APEContext *ctx)
Perform normalization.
Definition: apedec.c:324
#define U(x)
Definition: vp56_arith.h:37
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
static const uint16_t counts_diff_3980[21]
Probability ranges for symbols in Monkey Audio version 3.98.
Definition: apedec.c:418
int bps
Definition: apedec.c:144
void(* predictor_decode_mono)(struct APEContext *ctx, int count)
Definition: apedec.c:178
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define YDELAYA
Definition: apedec.c:54
int32_t lastA[2]
Definition: apedec.c:124
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static av_cold int ape_decode_close(AVCodecContext *avctx)
Definition: apedec.c:202
static int ape_decode_value_3900(APEContext *ctx, APERice *rice)
Definition: apedec.c:510
int32_t historybuffer[HISTORY_SIZE+PREDICTOR_SIZE]
Definition: apedec.c:131
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define XDELAYB
Definition: apedec.c:57
int32_t * decoded_buffer
Definition: apedec.c:156
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
int avg
Definition: apedec.c:105
const char * name
Name of the codec implementation.
Definition: codec.h:197
static int range_decode_culshift(APEContext *ctx, int shift)
Decode value with given size in bits.
Definition: apedec.c:357
#define APE_FILTER_LEVELS
Definition: apedec.c:77
GLsizei count
Definition: opengl_enc.c:108
int error
Definition: apedec.c:174
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
static int range_decode_bits(APEContext *ctx, int n)
Decode n bits (n <= 16) without modelling.
Definition: apedec.c:378
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
audio channel layout utility functions
#define Y
Definition: boxblur.h:38
static void predictor_decode_mono_3930(APEContext *ctx, int count)
Definition: apedec.c:1113
uint8_t * data
current frame data
Definition: apedec.c:169
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS]
Filter orders depending on compression level.
Definition: apedec.c:80
#define FFMIN(a, b)
Definition: common.h:96
signed 32 bits, planar
Definition: samplefmt.h:68
static int get_rice_ook(GetBitContext *gb, int k)
Definition: apedec.c:465
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static av_always_inline int filter_fast_3320(APEPredictor *p, const int decoded, const int filter, const int delayA)
Definition: apedec.c:837
AVCodecContext * avctx
Definition: apedec.c:139
static void ape_unpack_stereo(APEContext *ctx, int count)
Definition: apedec.c:1399
const uint8_t * ptr
current position in frame data
Definition: apedec.c:172
int32_t
static int range_decode_culfreq(APEContext *ctx, int tot_f)
Calculate cumulative frequency for next symbol.
Definition: apedec.c:345
AVFormatContext * ctx
Definition: movenc.c:48
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
int32_t(* scalarproduct_and_madd_int16)(int16_t *v1, const int16_t *v2, const int16_t *v3, int len, int mul)
Calculate scalar product of v1 and v2, and v1[i] += v3[i] * mul.
unsigned 8 bits, planar
Definition: samplefmt.h:66
static void predictor_decode_stereo_3930(APEContext *ctx, int count)
Definition: apedec.c:1085
uint32_t ksum
Definition: apedec.c:110
av_cold void ff_llauddsp_init(LLAudDSPContext *c)
uint32_t help
bytes_to_follow resp. intermediate value
Definition: apedec.c:116
uint32_t coeffsA[2][4]
adaption coefficients
Definition: apedec.c:129
static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
Definition: apedec.c:727
#define APE_FRAMECODE_PSEUDO_STEREO
Definition: apedec.c:47
uint32_t range
length of interval
Definition: apedec.c:115
if(ret)
int samples
samples left to decode in current frame
Definition: apedec.c:143
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int fset
which filter set to use (calculated from compression level)
Definition: apedec.c:148
static int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, APERice *rice)
Definition: apedec.c:477
APERice riceX
rice code parameters for the second channel
Definition: apedec.c:164
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static void predictor_decode_stereo_3950(APEContext *ctx, int count)
Definition: apedec.c:1182
static void predictor_decode_stereo_3800(APEContext *ctx, int count)
Definition: apedec.c:955
LLAudDSPContext adsp
Definition: apedec.c:141
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define APE_FRAMECODE_STEREO_SILENCE
Definition: apedec.c:46
static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
Definition: apedec.c:1271
int frameflags
frame flags
Definition: apedec.c:153
main external API structure.
Definition: avcodec.h:526
static av_always_inline int filter_3800(APEPredictor *p, const unsigned decoded, const int filter, const int delayA, const int delayB, const int start, const int shift)
Definition: apedec.c:863
static int ape_decode_value_3990(APEContext *ctx, APERice *rice)
Definition: apedec.c:544
uint32_t CRC
signalled frame CRC
Definition: apedec.c:151
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1872
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
BswapDSPContext bdsp
Definition: apedec.c:140
unsigned int sample_pos
Definition: apedec.c:133
int extradata_size
Definition: avcodec.h:628
static const uint16_t counts_3980[22]
Fixed probabilities for symbols in Monkey Audio version 3.98.
Definition: apedec.c:409
static int range_get_symbol(APEContext *ctx, const uint16_t counts[], const uint16_t counts_diff[])
Decode symbol.
Definition: apedec.c:430
Describe the class of an AVClass context structure.
Definition: log.h:67
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: codec.h:93
uint32_t low
low end of interval
Definition: apedec.c:114
int flags
global decoder flags
Definition: apedec.c:149
APECompressionLevel
Possible compression levels.
Definition: apedec.c:68
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data...
Definition: avcodec.h:1663
void(* predictor_decode_stereo)(struct APEContext *ctx, int count)
Definition: apedec.c:179
#define EXTRA_BITS
Definition: apedec.c:312
static void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
Update decoding state.
Definition: apedec.c:371
static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
Definition: apedec.c:684
uint32_t k
Definition: apedec.c:109
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
#define MAX_CHANNELS
Definition: apedec.c:42
static const int32_t initial_coeffs_fast_3320[1]
Definition: apedec.c:776
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
Definition: vf_addroi.c:26
#define MIN_CACHE_BITS
Definition: get_bits.h:128
static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits)
Definition: apedec.c:1277
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
#define PREDICTOR_SIZE
Total size of all predictor histories.
Definition: apedec.c:52
static const uint16_t counts_diff_3970[21]
Probability ranges for symbols in Monkey Audio version 3.97.
Definition: apedec.c:400
int blocks_per_loop
maximum number of samples to decode for each call
Definition: apedec.c:159
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
int
uint8_t * data_end
frame data end
Definition: apedec.c:170
common internal api header.
APERice riceY
rice code parameters for the first channel
Definition: apedec.c:165
static const int shift2[6]
Definition: dxa.c:51
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
APEFilter filters[APE_FILTER_LEVELS][2]
filters used for reconstruction
Definition: apedec.c:166
static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB)
Definition: apedec.c:1135
int16_t * coeffs
actual coefficients used in filtering
Definition: apedec.c:100
int32_t filterB[2]
Definition: apedec.c:127
#define YADAPTCOEFFSA
Definition: apedec.c:59
#define PAR
Definition: apedec.c:1612
static void init_predictor_decoder(APEContext *ctx)
Definition: apedec.c:792
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:553
static const int32_t initial_coeffs_b_3800[2]
Definition: apedec.c:784
APEPredictor predictor
predictor used for final reconstruction
Definition: apedec.c:154
static const AVClass ape_decoder_class
Definition: apedec.c:1619
int channels
number of audio channels
Definition: avcodec.h:1187
static void long_filter_ehigh_3830(int32_t *buffer, int length)
Definition: apedec.c:934
static void predictor_decode_mono_3950(APEContext *ctx, int count)
Definition: apedec.c:1211
GetBitContext gb
Definition: apedec.c:167
Filters applied to the decoded data.
Definition: apedec.c:99
static const struct PPFilter filters[]
Definition: postprocess.c:134
uint32_t coeffsB[2][5]
adaption coefficients
Definition: apedec.c:130
#define XADAPTCOEFFSB
Definition: apedec.c:62
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:836
int32_t * decoded[MAX_CHANNELS]
decoded data for each channel
Definition: apedec.c:158
int32_t * buf
Definition: apedec.c:122
FILE * out
Definition: movenc.c:54
#define av_freep(p)
signed 16 bits, planar
Definition: samplefmt.h:67
#define HISTORY_SIZE
Definition: apedec.c:49
#define av_always_inline
Definition: attributes.h:45
int data_size
frame data allocated size
Definition: apedec.c:171
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static const AVOption options[]
Definition: apedec.c:1613
#define AV_CH_LAYOUT_MONO
int16_t * adaptcoeffs
adaptive filter coefficients used for correcting of actual filter coefficients
Definition: apedec.c:101
int channels
Definition: apedec.c:142
#define BOTTOM_VALUE
Definition: apedec.c:313
This structure stores compressed data.
Definition: packet.h:340
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
uint32_t AVCRC
Definition: crc.h:47
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
Definition: apedec.c:650
int i
Definition: input.c:407
GLuint buffer
Definition: opengl_enc.c:101
int16_t * historybuffer
filter memory
Definition: apedec.c:102
static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
Definition: apedec.c:664