FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
wmaprodec.c
Go to the documentation of this file.
1 /*
2  * Wmapro compatible decoder
3  * Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion
4  * Copyright (c) 2008 - 2011 Sascha Sommer, Benjamin Larsson
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * @brief wmapro decoder implementation
26  * Wmapro is an MDCT based codec comparable to wma standard or AAC.
27  * The decoding therefore consists of the following steps:
28  * - bitstream decoding
29  * - reconstruction of per-channel data
30  * - rescaling and inverse quantization
31  * - IMDCT
32  * - windowing and overlapp-add
33  *
34  * The compressed wmapro bitstream is split into individual packets.
35  * Every such packet contains one or more wma frames.
36  * The compressed frames may have a variable length and frames may
37  * cross packet boundaries.
38  * Common to all wmapro frames is the number of samples that are stored in
39  * a frame.
40  * The number of samples and a few other decode flags are stored
41  * as extradata that has to be passed to the decoder.
42  *
43  * The wmapro frames themselves are again split into a variable number of
44  * subframes. Every subframe contains the data for 2^N time domain samples
45  * where N varies between 7 and 12.
46  *
47  * Example wmapro bitstream (in samples):
48  *
49  * || packet 0 || packet 1 || packet 2 packets
50  * ---------------------------------------------------
51  * || frame 0 || frame 1 || frame 2 || frames
52  * ---------------------------------------------------
53  * || | | || | | | || || subframes of channel 0
54  * ---------------------------------------------------
55  * || | | || | | | || || subframes of channel 1
56  * ---------------------------------------------------
57  *
58  * The frame layouts for the individual channels of a wma frame does not need
59  * to be the same.
60  *
61  * However, if the offsets and lengths of several subframes of a frame are the
62  * same, the subframes of the channels can be grouped.
63  * Every group may then use special coding techniques like M/S stereo coding
64  * to improve the compression ratio. These channel transformations do not
65  * need to be applied to a whole subframe. Instead, they can also work on
66  * individual scale factor bands (see below).
67  * The coefficients that carry the audio signal in the frequency domain
68  * are transmitted as huffman-coded vectors with 4, 2 and 1 elements.
69  * In addition to that, the encoder can switch to a runlevel coding scheme
70  * by transmitting subframe_length / 128 zero coefficients.
71  *
72  * Before the audio signal can be converted to the time domain, the
73  * coefficients have to be rescaled and inverse quantized.
74  * A subframe is therefore split into several scale factor bands that get
75  * scaled individually.
76  * Scale factors are submitted for every frame but they might be shared
77  * between the subframes of a channel. Scale factors are initially DPCM-coded.
78  * Once scale factors are shared, the differences are transmitted as runlevel
79  * codes.
80  * Every subframe length and offset combination in the frame layout shares a
81  * common quantization factor that can be adjusted for every channel by a
82  * modifier.
83  * After the inverse quantization, the coefficients get processed by an IMDCT.
84  * The resulting values are then windowed with a sine window and the first half
85  * of the values are added to the second half of the output from the previous
86  * subframe in order to reconstruct the output samples.
87  */
88 
89 #include <inttypes.h>
90 
91 #include "libavutil/float_dsp.h"
92 #include "libavutil/intfloat.h"
93 #include "libavutil/intreadwrite.h"
94 #include "avcodec.h"
95 #include "internal.h"
96 #include "get_bits.h"
97 #include "put_bits.h"
98 #include "wmaprodata.h"
99 #include "sinewin.h"
100 #include "wma.h"
101 #include "wma_common.h"
102 
103 /** current decoder limitations */
104 #define WMAPRO_MAX_CHANNELS 8 ///< max number of handled channels
105 #define MAX_SUBFRAMES 32 ///< max number of subframes per channel
106 #define MAX_BANDS 29 ///< max number of scale factor bands
107 #define MAX_FRAMESIZE 32768 ///< maximum compressed frame size
108 
109 #define WMAPRO_BLOCK_MIN_BITS 6 ///< log2 of min block size
110 #define WMAPRO_BLOCK_MAX_BITS 13 ///< log2 of max block size
111 #define WMAPRO_BLOCK_MIN_SIZE (1 << WMAPRO_BLOCK_MIN_BITS) ///< minimum block size
112 #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS) ///< maximum block size
113 #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1) ///< possible block sizes
114 
115 
116 #define VLCBITS 9
117 #define SCALEVLCBITS 8
118 #define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS)
119 #define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS)
120 #define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS)
121 #define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS)
122 #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS)
123 
124 static VLC sf_vlc; ///< scale factor DPCM vlc
125 static VLC sf_rl_vlc; ///< scale factor run length vlc
126 static VLC vec4_vlc; ///< 4 coefficients per symbol
127 static VLC vec2_vlc; ///< 2 coefficients per symbol
128 static VLC vec1_vlc; ///< 1 coefficient per symbol
129 static VLC coef_vlc[2]; ///< coefficient run length vlc codes
130 static float sin64[33]; ///< sine table for decorrelation
131 
132 /**
133  * @brief frame specific decoder context for a single channel
134  */
135 typedef struct {
136  int16_t prev_block_len; ///< length of the previous block
139  uint16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
140  uint16_t subframe_offset[MAX_SUBFRAMES]; ///< subframe positions in the current frame
141  uint8_t cur_subframe; ///< current subframe number
142  uint16_t decoded_samples; ///< number of already processed samples
143  uint8_t grouped; ///< channel is part of a group
144  int quant_step; ///< quantization step for the current subframe
145  int8_t reuse_sf; ///< share scale factors between subframes
146  int8_t scale_factor_step; ///< scaling step for the current subframe
147  int max_scale_factor; ///< maximum scale factor for the current subframe
148  int saved_scale_factors[2][MAX_BANDS]; ///< resampled and (previously) transmitted scale factor values
149  int8_t scale_factor_idx; ///< index for the transmitted scale factor values (used for resampling)
150  int* scale_factors; ///< pointer to the scale factor values used for decoding
151  uint8_t table_idx; ///< index in sf_offsets for the scale factor reference block
152  float* coeffs; ///< pointer to the subframe decode buffer
153  uint16_t num_vec_coeffs; ///< number of vector coded coefficients
154  DECLARE_ALIGNED(32, float, out)[WMAPRO_BLOCK_MAX_SIZE + WMAPRO_BLOCK_MAX_SIZE / 2]; ///< output buffer
156 
157 /**
158  * @brief channel group for channel transformations
159  */
160 typedef struct {
161  uint8_t num_channels; ///< number of channels in the group
162  int8_t transform; ///< transform on / off
163  int8_t transform_band[MAX_BANDS]; ///< controls if the transform is enabled for a certain band
164  float decorrelation_matrix[WMAPRO_MAX_CHANNELS*WMAPRO_MAX_CHANNELS];
165  float* channel_data[WMAPRO_MAX_CHANNELS]; ///< transformation coefficients
167 
168 /**
169  * @brief main decoder context
170  */
171 typedef struct WMAProDecodeCtx {
172  /* generic decoder variables */
173  AVCodecContext* avctx; ///< codec context for av_log
176  FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
177  PutBitContext pb; ///< context for filling the frame_data buffer
178  FFTContext mdct_ctx[WMAPRO_BLOCK_SIZES]; ///< MDCT context per block size
179  DECLARE_ALIGNED(32, float, tmp)[WMAPRO_BLOCK_MAX_SIZE]; ///< IMDCT output buffer
180  const float* windows[WMAPRO_BLOCK_SIZES]; ///< windows for the different block sizes
181 
182  /* frame size dependent frame information (set during initialization) */
183  uint32_t decode_flags; ///< used compression features
184  uint8_t len_prefix; ///< frame is prefixed with its length
185  uint8_t dynamic_range_compression; ///< frame contains DRC data
186  uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
187  uint16_t samples_per_frame; ///< number of samples to output
188  uint16_t log2_frame_size;
189  int8_t lfe_channel; ///< lfe channel index
191  uint8_t subframe_len_bits; ///< number of bits used for the subframe length
192  uint8_t max_subframe_len_bit; ///< flag indicating that the subframe is of maximum size when the first subframe length bit is 1
194  int8_t num_sfb[WMAPRO_BLOCK_SIZES]; ///< scale factor bands per block size
195  int16_t sfb_offsets[WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor band offsets (multiples of 4)
196  int8_t sf_offsets[WMAPRO_BLOCK_SIZES][WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor resample matrix
197  int16_t subwoofer_cutoffs[WMAPRO_BLOCK_SIZES]; ///< subwoofer cutoff values
198 
199  /* packet decode state */
200  GetBitContext pgb; ///< bitstream reader context for the packet
201  int next_packet_start; ///< start offset of the next wma packet in the demuxer packet
202  uint8_t packet_offset; ///< frame offset in the packet
203  uint8_t packet_sequence_number; ///< current packet number
204  int num_saved_bits; ///< saved number of bits
205  int frame_offset; ///< frame offset in the bit reservoir
206  int subframe_offset; ///< subframe offset in the bit reservoir
207  uint8_t packet_loss; ///< set in case of bitstream error
208  uint8_t packet_done; ///< set when a packet is fully decoded
209 
210  /* frame decode state */
211  uint32_t frame_num; ///< current frame number (not used for decoding)
212  GetBitContext gb; ///< bitstream reader context
213  int buf_bit_size; ///< buffer size in bits
214  uint8_t drc_gain; ///< gain for the DRC tool
215  int8_t skip_frame; ///< skip output step
216  int8_t parsed_all_subframes; ///< all subframes decoded?
217 
218  /* subframe/block decode state */
219  int16_t subframe_len; ///< current subframe length
220  int8_t channels_for_cur_subframe; ///< number of channels that contain the subframe
222  int8_t num_bands; ///< number of scale factor bands
223  int8_t transmit_num_vec_coeffs; ///< number of vector coded coefficients is part of the bitstream
224  int16_t* cur_sfb_offsets; ///< sfb offsets for the current block
225  uint8_t table_idx; ///< index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
226  int8_t esc_len; ///< length of escaped coefficients
227 
228  uint8_t num_chgroups; ///< number of channel groups
229  WMAProChannelGrp chgroup[WMAPRO_MAX_CHANNELS]; ///< channel group information
230 
233 
234 
235 /**
236  *@brief helper function to print the most important members of the context
237  *@param s context
238  */
240 {
241 #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b);
242 #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %"PRIx32"\n", a, b);
243 
244  PRINT("ed sample bit depth", s->bits_per_sample);
245  PRINT_HEX("ed decode flags", s->decode_flags);
246  PRINT("samples per frame", s->samples_per_frame);
247  PRINT("log2 frame size", s->log2_frame_size);
248  PRINT("max num subframes", s->max_num_subframes);
249  PRINT("len prefix", s->len_prefix);
250  PRINT("num channels", s->avctx->channels);
251 }
252 
253 /**
254  *@brief Uninitialize the decoder and free all resources.
255  *@param avctx codec context
256  *@return 0 on success, < 0 otherwise
257  */
259 {
260  WMAProDecodeCtx *s = avctx->priv_data;
261  int i;
262 
263  for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
264  ff_mdct_end(&s->mdct_ctx[i]);
265 
266  return 0;
267 }
268 
269 /**
270  *@brief Initialize the decoder.
271  *@param avctx codec context
272  *@return 0 on success, -1 otherwise
273  */
275 {
276  WMAProDecodeCtx *s = avctx->priv_data;
277  uint8_t *edata_ptr = avctx->extradata;
278  unsigned int channel_mask;
279  int i, bits;
280  int log2_max_num_subframes;
281  int num_possible_block_sizes;
282 
283  if (!avctx->block_align) {
284  av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
285  return AVERROR(EINVAL);
286  }
287 
288  s->avctx = avctx;
290 
292 
294 
295  if (avctx->extradata_size >= 18) {
296  s->decode_flags = AV_RL16(edata_ptr+14);
297  channel_mask = AV_RL32(edata_ptr+2);
298  s->bits_per_sample = AV_RL16(edata_ptr);
299  /** dump the extradata */
300  for (i = 0; i < avctx->extradata_size; i++)
301  av_dlog(avctx, "[%x] ", avctx->extradata[i]);
302  av_dlog(avctx, "\n");
303 
304  } else {
305  avpriv_request_sample(avctx, "Unknown extradata size");
306  return AVERROR_PATCHWELCOME;
307  }
308 
309  /** generic init */
310  s->log2_frame_size = av_log2(avctx->block_align) + 4;
311  if (s->log2_frame_size > 25) {
312  avpriv_request_sample(avctx, "Large block align");
313  return AVERROR_PATCHWELCOME;
314  }
315 
316  /** frame info */
317  s->skip_frame = 1; /* skip first frame */
318  s->packet_loss = 1;
319  s->len_prefix = (s->decode_flags & 0x40);
320 
321  /** get frame len */
322  bits = ff_wma_get_frame_len_bits(avctx->sample_rate, 3, s->decode_flags);
323  if (bits > WMAPRO_BLOCK_MAX_BITS) {
324  avpriv_request_sample(avctx, "14-bit block sizes");
325  return AVERROR_PATCHWELCOME;
326  }
327  s->samples_per_frame = 1 << bits;
328 
329  /** subframe info */
330  log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
331  s->max_num_subframes = 1 << log2_max_num_subframes;
332  if (s->max_num_subframes == 16 || s->max_num_subframes == 4)
333  s->max_subframe_len_bit = 1;
334  s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
335 
336  num_possible_block_sizes = log2_max_num_subframes + 1;
338  s->dynamic_range_compression = (s->decode_flags & 0x80);
339 
340  if (s->max_num_subframes > MAX_SUBFRAMES) {
341  av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %"PRId8"\n",
342  s->max_num_subframes);
343  return AVERROR_INVALIDDATA;
344  }
345 
347  av_log(avctx, AV_LOG_ERROR, "min_samples_per_subframe of %d too small\n",
349  return AVERROR_INVALIDDATA;
350  }
351 
352  if (s->avctx->sample_rate <= 0) {
353  av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
354  return AVERROR_INVALIDDATA;
355  }
356 
357  if (avctx->channels < 0) {
358  av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
359  avctx->channels);
360  return AVERROR_INVALIDDATA;
361  } else if (avctx->channels > WMAPRO_MAX_CHANNELS) {
362  avpriv_request_sample(avctx,
363  "More than %d channels", WMAPRO_MAX_CHANNELS);
364  return AVERROR_PATCHWELCOME;
365  }
366 
367  /** init previous block len */
368  for (i = 0; i < avctx->channels; i++)
370 
371  /** extract lfe channel position */
372  s->lfe_channel = -1;
373 
374  if (channel_mask & 8) {
375  unsigned int mask;
376  for (mask = 1; mask < 16; mask <<= 1) {
377  if (channel_mask & mask)
378  ++s->lfe_channel;
379  }
380  }
381 
383  scale_huffbits, 1, 1,
384  scale_huffcodes, 2, 2, 616);
385 
387  scale_rl_huffbits, 1, 1,
388  scale_rl_huffcodes, 4, 4, 1406);
389 
390  INIT_VLC_STATIC(&coef_vlc[0], VLCBITS, HUFF_COEF0_SIZE,
391  coef0_huffbits, 1, 1,
392  coef0_huffcodes, 4, 4, 2108);
393 
394  INIT_VLC_STATIC(&coef_vlc[1], VLCBITS, HUFF_COEF1_SIZE,
395  coef1_huffbits, 1, 1,
396  coef1_huffcodes, 4, 4, 3912);
397 
399  vec4_huffbits, 1, 1,
400  vec4_huffcodes, 2, 2, 604);
401 
403  vec2_huffbits, 1, 1,
404  vec2_huffcodes, 2, 2, 562);
405 
407  vec1_huffbits, 1, 1,
408  vec1_huffcodes, 2, 2, 562);
409 
410  /** calculate number of scale factor bands and their offsets
411  for every possible block size */
412  for (i = 0; i < num_possible_block_sizes; i++) {
413  int subframe_len = s->samples_per_frame >> i;
414  int x;
415  int band = 1;
416 
417  s->sfb_offsets[i][0] = 0;
418 
419  for (x = 0; x < MAX_BANDS-1 && s->sfb_offsets[i][band - 1] < subframe_len; x++) {
420  int offset = (subframe_len * 2 * critical_freq[x])
421  / s->avctx->sample_rate + 2;
422  offset &= ~3;
423  if (offset > s->sfb_offsets[i][band - 1])
424  s->sfb_offsets[i][band++] = offset;
425  }
426  s->sfb_offsets[i][band - 1] = subframe_len;
427  s->num_sfb[i] = band - 1;
428  if (s->num_sfb[i] <= 0) {
429  av_log(avctx, AV_LOG_ERROR, "num_sfb invalid\n");
430  return AVERROR_INVALIDDATA;
431  }
432  }
433 
434 
435  /** Scale factors can be shared between blocks of different size
436  as every block has a different scale factor band layout.
437  The matrix sf_offsets is needed to find the correct scale factor.
438  */
439 
440  for (i = 0; i < num_possible_block_sizes; i++) {
441  int b;
442  for (b = 0; b < s->num_sfb[i]; b++) {
443  int x;
444  int offset = ((s->sfb_offsets[i][b]
445  + s->sfb_offsets[i][b + 1] - 1) << i) >> 1;
446  for (x = 0; x < num_possible_block_sizes; x++) {
447  int v = 0;
448  while (s->sfb_offsets[x][v + 1] << x < offset) {
449  v++;
450  av_assert0(v < MAX_BANDS);
451  }
452  s->sf_offsets[i][x][b] = v;
453  }
454  }
455  }
456 
457  /** init MDCT, FIXME: only init needed sizes */
458  for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
460  1.0 / (1 << (WMAPRO_BLOCK_MIN_BITS + i - 1))
461  / (1 << (s->bits_per_sample - 1)));
462 
463  /** init MDCT windows: simple sine window */
464  for (i = 0; i < WMAPRO_BLOCK_SIZES; i++) {
465  const int win_idx = WMAPRO_BLOCK_MAX_BITS - i;
466  ff_init_ff_sine_windows(win_idx);
467  s->windows[WMAPRO_BLOCK_SIZES - i - 1] = ff_sine_windows[win_idx];
468  }
469 
470  /** calculate subwoofer cutoff values */
471  for (i = 0; i < num_possible_block_sizes; i++) {
472  int block_size = s->samples_per_frame >> i;
473  int cutoff = (440*block_size + 3 * (s->avctx->sample_rate >> 1) - 1)
474  / s->avctx->sample_rate;
475  s->subwoofer_cutoffs[i] = av_clip(cutoff, 4, block_size);
476  }
477 
478  /** calculate sine values for the decorrelation matrix */
479  for (i = 0; i < 33; i++)
480  sin64[i] = sin(i*M_PI / 64.0);
481 
482  if (avctx->debug & FF_DEBUG_BITSTREAM)
483  dump_context(s);
484 
485  avctx->channel_layout = channel_mask;
486 
487  return 0;
488 }
489 
490 /**
491  *@brief Decode the subframe length.
492  *@param s context
493  *@param offset sample offset in the frame
494  *@return decoded subframe length on success, < 0 in case of an error
495  */
497 {
498  int frame_len_shift = 0;
499  int subframe_len;
500 
501  /** no need to read from the bitstream when only one length is possible */
502  if (offset == s->samples_per_frame - s->min_samples_per_subframe)
503  return s->min_samples_per_subframe;
504 
505  if (get_bits_left(&s->gb) < 1)
506  return AVERROR_INVALIDDATA;
507 
508  /** 1 bit indicates if the subframe is of maximum length */
509  if (s->max_subframe_len_bit) {
510  if (get_bits1(&s->gb))
511  frame_len_shift = 1 + get_bits(&s->gb, s->subframe_len_bits-1);
512  } else
513  frame_len_shift = get_bits(&s->gb, s->subframe_len_bits);
514 
515  subframe_len = s->samples_per_frame >> frame_len_shift;
516 
517  /** sanity check the length */
518  if (subframe_len < s->min_samples_per_subframe ||
519  subframe_len > s->samples_per_frame) {
520  av_log(s->avctx, AV_LOG_ERROR, "broken frame: subframe_len %i\n",
521  subframe_len);
522  return AVERROR_INVALIDDATA;
523  }
524  return subframe_len;
525 }
526 
527 /**
528  *@brief Decode how the data in the frame is split into subframes.
529  * Every WMA frame contains the encoded data for a fixed number of
530  * samples per channel. The data for every channel might be split
531  * into several subframes. This function will reconstruct the list of
532  * subframes for every channel.
533  *
534  * If the subframes are not evenly split, the algorithm estimates the
535  * channels with the lowest number of total samples.
536  * Afterwards, for each of these channels a bit is read from the
537  * bitstream that indicates if the channel contains a subframe with the
538  * next subframe size that is going to be read from the bitstream or not.
539  * If a channel contains such a subframe, the subframe size gets added to
540  * the channel's subframe list.
541  * The algorithm repeats these steps until the frame is properly divided
542  * between the individual channels.
543  *
544  *@param s context
545  *@return 0 on success, < 0 in case of an error
546  */
548 {
549  uint16_t num_samples[WMAPRO_MAX_CHANNELS] = { 0 };/**< sum of samples for all currently known subframes of a channel */
550  uint8_t contains_subframe[WMAPRO_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */
551  int channels_for_cur_subframe = s->avctx->channels; /**< number of channels that contain the current subframe */
552  int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subframe offsets and sizes */
553  int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */
554  int c;
555 
556  /* Should never consume more than 3073 bits (256 iterations for the
557  * while loop when always the minimum amount of 128 samples is subtracted
558  * from missing samples in the 8 channel case).
559  * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4)
560  */
561 
562  /** reset tiling information */
563  for (c = 0; c < s->avctx->channels; c++)
564  s->channel[c].num_subframes = 0;
565 
566  if (s->max_num_subframes == 1 || get_bits1(&s->gb))
567  fixed_channel_layout = 1;
568 
569  /** loop until the frame data is split between the subframes */
570  do {
571  int subframe_len;
572 
573  /** check which channels contain the subframe */
574  for (c = 0; c < s->avctx->channels; c++) {
575  if (num_samples[c] == min_channel_len) {
576  if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
577  (min_channel_len == s->samples_per_frame - s->min_samples_per_subframe))
578  contains_subframe[c] = 1;
579  else
580  contains_subframe[c] = get_bits1(&s->gb);
581  } else
582  contains_subframe[c] = 0;
583  }
584 
585  /** get subframe length, subframe_len == 0 is not allowed */
586  if ((subframe_len = decode_subframe_length(s, min_channel_len)) <= 0)
587  return AVERROR_INVALIDDATA;
588 
589  /** add subframes to the individual channels and find new min_channel_len */
590  min_channel_len += subframe_len;
591  for (c = 0; c < s->avctx->channels; c++) {
592  WMAProChannelCtx* chan = &s->channel[c];
593 
594  if (contains_subframe[c]) {
595  if (chan->num_subframes >= MAX_SUBFRAMES) {
597  "broken frame: num subframes > 31\n");
598  return AVERROR_INVALIDDATA;
599  }
600  chan->subframe_len[chan->num_subframes] = subframe_len;
601  num_samples[c] += subframe_len;
602  ++chan->num_subframes;
603  if (num_samples[c] > s->samples_per_frame) {
604  av_log(s->avctx, AV_LOG_ERROR, "broken frame: "
605  "channel len > samples_per_frame\n");
606  return AVERROR_INVALIDDATA;
607  }
608  } else if (num_samples[c] <= min_channel_len) {
609  if (num_samples[c] < min_channel_len) {
610  channels_for_cur_subframe = 0;
611  min_channel_len = num_samples[c];
612  }
613  ++channels_for_cur_subframe;
614  }
615  }
616  } while (min_channel_len < s->samples_per_frame);
617 
618  for (c = 0; c < s->avctx->channels; c++) {
619  int i;
620  int offset = 0;
621  for (i = 0; i < s->channel[c].num_subframes; i++) {
622  av_dlog(s->avctx, "frame[%i] channel[%i] subframe[%i]"
623  " len %i\n", s->frame_num, c, i,
624  s->channel[c].subframe_len[i]);
625  s->channel[c].subframe_offset[i] = offset;
626  offset += s->channel[c].subframe_len[i];
627  }
628  }
629 
630  return 0;
631 }
632 
633 /**
634  *@brief Calculate a decorrelation matrix from the bitstream parameters.
635  *@param s codec context
636  *@param chgroup channel group for which the matrix needs to be calculated
637  */
639  WMAProChannelGrp *chgroup)
640 {
641  int i;
642  int offset = 0;
643  int8_t rotation_offset[WMAPRO_MAX_CHANNELS * WMAPRO_MAX_CHANNELS];
644  memset(chgroup->decorrelation_matrix, 0, s->avctx->channels *
645  s->avctx->channels * sizeof(*chgroup->decorrelation_matrix));
646 
647  for (i = 0; i < chgroup->num_channels * (chgroup->num_channels - 1) >> 1; i++)
648  rotation_offset[i] = get_bits(&s->gb, 6);
649 
650  for (i = 0; i < chgroup->num_channels; i++)
651  chgroup->decorrelation_matrix[chgroup->num_channels * i + i] =
652  get_bits1(&s->gb) ? 1.0 : -1.0;
653 
654  for (i = 1; i < chgroup->num_channels; i++) {
655  int x;
656  for (x = 0; x < i; x++) {
657  int y;
658  for (y = 0; y < i + 1; y++) {
659  float v1 = chgroup->decorrelation_matrix[x * chgroup->num_channels + y];
660  float v2 = chgroup->decorrelation_matrix[i * chgroup->num_channels + y];
661  int n = rotation_offset[offset + x];
662  float sinv;
663  float cosv;
664 
665  if (n < 32) {
666  sinv = sin64[n];
667  cosv = sin64[32 - n];
668  } else {
669  sinv = sin64[64 - n];
670  cosv = -sin64[n - 32];
671  }
672 
673  chgroup->decorrelation_matrix[y + x * chgroup->num_channels] =
674  (v1 * sinv) - (v2 * cosv);
675  chgroup->decorrelation_matrix[y + i * chgroup->num_channels] =
676  (v1 * cosv) + (v2 * sinv);
677  }
678  }
679  offset += i;
680  }
681 }
682 
683 /**
684  *@brief Decode channel transformation parameters
685  *@param s codec context
686  *@return >= 0 in case of success, < 0 in case of bitstream errors
687  */
689 {
690  int i;
691  /* should never consume more than 1921 bits for the 8 channel case
692  * 1 + MAX_CHANNELS * (MAX_CHANNELS + 2 + 3 * MAX_CHANNELS * MAX_CHANNELS
693  * + MAX_CHANNELS + MAX_BANDS + 1)
694  */
695 
696  /** in the one channel case channel transforms are pointless */
697  s->num_chgroups = 0;
698  if (s->avctx->channels > 1) {
699  int remaining_channels = s->channels_for_cur_subframe;
700 
701  if (get_bits1(&s->gb)) {
703  "Channel transform bit");
704  return AVERROR_PATCHWELCOME;
705  }
706 
707  for (s->num_chgroups = 0; remaining_channels &&
709  WMAProChannelGrp* chgroup = &s->chgroup[s->num_chgroups];
710  float** channel_data = chgroup->channel_data;
711  chgroup->num_channels = 0;
712  chgroup->transform = 0;
713 
714  /** decode channel mask */
715  if (remaining_channels > 2) {
716  for (i = 0; i < s->channels_for_cur_subframe; i++) {
717  int channel_idx = s->channel_indexes_for_cur_subframe[i];
718  if (!s->channel[channel_idx].grouped
719  && get_bits1(&s->gb)) {
720  ++chgroup->num_channels;
721  s->channel[channel_idx].grouped = 1;
722  *channel_data++ = s->channel[channel_idx].coeffs;
723  }
724  }
725  } else {
726  chgroup->num_channels = remaining_channels;
727  for (i = 0; i < s->channels_for_cur_subframe; i++) {
728  int channel_idx = s->channel_indexes_for_cur_subframe[i];
729  if (!s->channel[channel_idx].grouped)
730  *channel_data++ = s->channel[channel_idx].coeffs;
731  s->channel[channel_idx].grouped = 1;
732  }
733  }
734 
735  /** decode transform type */
736  if (chgroup->num_channels == 2) {
737  if (get_bits1(&s->gb)) {
738  if (get_bits1(&s->gb)) {
740  "Unknown channel transform type");
741  return AVERROR_PATCHWELCOME;
742  }
743  } else {
744  chgroup->transform = 1;
745  if (s->avctx->channels == 2) {
746  chgroup->decorrelation_matrix[0] = 1.0;
747  chgroup->decorrelation_matrix[1] = -1.0;
748  chgroup->decorrelation_matrix[2] = 1.0;
749  chgroup->decorrelation_matrix[3] = 1.0;
750  } else {
751  /** cos(pi/4) */
752  chgroup->decorrelation_matrix[0] = 0.70703125;
753  chgroup->decorrelation_matrix[1] = -0.70703125;
754  chgroup->decorrelation_matrix[2] = 0.70703125;
755  chgroup->decorrelation_matrix[3] = 0.70703125;
756  }
757  }
758  } else if (chgroup->num_channels > 2) {
759  if (get_bits1(&s->gb)) {
760  chgroup->transform = 1;
761  if (get_bits1(&s->gb)) {
762  decode_decorrelation_matrix(s, chgroup);
763  } else {
764  /** FIXME: more than 6 coupled channels not supported */
765  if (chgroup->num_channels > 6) {
767  "Coupled channels > 6");
768  } else {
769  memcpy(chgroup->decorrelation_matrix,
771  chgroup->num_channels * chgroup->num_channels *
772  sizeof(*chgroup->decorrelation_matrix));
773  }
774  }
775  }
776  }
777 
778  /** decode transform on / off */
779  if (chgroup->transform) {
780  if (!get_bits1(&s->gb)) {
781  int i;
782  /** transform can be enabled for individual bands */
783  for (i = 0; i < s->num_bands; i++) {
784  chgroup->transform_band[i] = get_bits1(&s->gb);
785  }
786  } else {
787  memset(chgroup->transform_band, 1, s->num_bands);
788  }
789  }
790  remaining_channels -= chgroup->num_channels;
791  }
792  }
793  return 0;
794 }
795 
796 /**
797  *@brief Extract the coefficients from the bitstream.
798  *@param s codec context
799  *@param c current channel number
800  *@return 0 on success, < 0 in case of bitstream errors
801  */
802 static int decode_coeffs(WMAProDecodeCtx *s, int c)
803 {
804  /* Integers 0..15 as single-precision floats. The table saves a
805  costly int to float conversion, and storing the values as
806  integers allows fast sign-flipping. */
807  static const uint32_t fval_tab[16] = {
808  0x00000000, 0x3f800000, 0x40000000, 0x40400000,
809  0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
810  0x41000000, 0x41100000, 0x41200000, 0x41300000,
811  0x41400000, 0x41500000, 0x41600000, 0x41700000,
812  };
813  int vlctable;
814  VLC* vlc;
815  WMAProChannelCtx* ci = &s->channel[c];
816  int rl_mode = 0;
817  int cur_coeff = 0;
818  int num_zeros = 0;
819  const uint16_t* run;
820  const float* level;
821 
822  av_dlog(s->avctx, "decode coefficients for channel %i\n", c);
823 
824  vlctable = get_bits1(&s->gb);
825  vlc = &coef_vlc[vlctable];
826 
827  if (vlctable) {
828  run = coef1_run;
829  level = coef1_level;
830  } else {
831  run = coef0_run;
832  level = coef0_level;
833  }
834 
835  /** decode vector coefficients (consumes up to 167 bits per iteration for
836  4 vector coded large values) */
837  while ((s->transmit_num_vec_coeffs || !rl_mode) &&
838  (cur_coeff + 3 < ci->num_vec_coeffs)) {
839  uint32_t vals[4];
840  int i;
841  unsigned int idx;
842 
843  idx = get_vlc2(&s->gb, vec4_vlc.table, VLCBITS, VEC4MAXDEPTH);
844 
845  if (idx == HUFF_VEC4_SIZE - 1) {
846  for (i = 0; i < 4; i += 2) {
847  idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH);
848  if (idx == HUFF_VEC2_SIZE - 1) {
849  uint32_t v0, v1;
850  v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
851  if (v0 == HUFF_VEC1_SIZE - 1)
852  v0 += ff_wma_get_large_val(&s->gb);
853  v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
854  if (v1 == HUFF_VEC1_SIZE - 1)
855  v1 += ff_wma_get_large_val(&s->gb);
856  vals[i ] = av_float2int(v0);
857  vals[i+1] = av_float2int(v1);
858  } else {
859  vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ];
860  vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
861  }
862  }
863  } else {
864  vals[0] = fval_tab[ symbol_to_vec4[idx] >> 12 ];
865  vals[1] = fval_tab[(symbol_to_vec4[idx] >> 8) & 0xF];
866  vals[2] = fval_tab[(symbol_to_vec4[idx] >> 4) & 0xF];
867  vals[3] = fval_tab[ symbol_to_vec4[idx] & 0xF];
868  }
869 
870  /** decode sign */
871  for (i = 0; i < 4; i++) {
872  if (vals[i]) {
873  uint32_t sign = get_bits1(&s->gb) - 1;
874  AV_WN32A(&ci->coeffs[cur_coeff], vals[i] ^ sign << 31);
875  num_zeros = 0;
876  } else {
877  ci->coeffs[cur_coeff] = 0;
878  /** switch to run level mode when subframe_len / 128 zeros
879  were found in a row */
880  rl_mode |= (++num_zeros > s->subframe_len >> 8);
881  }
882  ++cur_coeff;
883  }
884  }
885 
886  /** decode run level coded coefficients */
887  if (cur_coeff < s->subframe_len) {
888  memset(&ci->coeffs[cur_coeff], 0,
889  sizeof(*ci->coeffs) * (s->subframe_len - cur_coeff));
890  if (ff_wma_run_level_decode(s->avctx, &s->gb, vlc,
891  level, run, 1, ci->coeffs,
892  cur_coeff, s->subframe_len,
893  s->subframe_len, s->esc_len, 0))
894  return AVERROR_INVALIDDATA;
895  }
896 
897  return 0;
898 }
899 
900 /**
901  *@brief Extract scale factors from the bitstream.
902  *@param s codec context
903  *@return 0 on success, < 0 in case of bitstream errors
904  */
906 {
907  int i;
908 
909  /** should never consume more than 5344 bits
910  * MAX_CHANNELS * (1 + MAX_BANDS * 23)
911  */
912 
913  for (i = 0; i < s->channels_for_cur_subframe; i++) {
915  int* sf;
916  int* sf_end;
918  sf_end = s->channel[c].scale_factors + s->num_bands;
919 
920  /** resample scale factors for the new block size
921  * as the scale factors might need to be resampled several times
922  * before some new values are transmitted, a backup of the last
923  * transmitted scale factors is kept in saved_scale_factors
924  */
925  if (s->channel[c].reuse_sf) {
926  const int8_t* sf_offsets = s->sf_offsets[s->table_idx][s->channel[c].table_idx];
927  int b;
928  for (b = 0; b < s->num_bands; b++)
929  s->channel[c].scale_factors[b] =
930  s->channel[c].saved_scale_factors[s->channel[c].scale_factor_idx][*sf_offsets++];
931  }
932 
933  if (!s->channel[c].cur_subframe || get_bits1(&s->gb)) {
934 
935  if (!s->channel[c].reuse_sf) {
936  int val;
937  /** decode DPCM coded scale factors */
938  s->channel[c].scale_factor_step = get_bits(&s->gb, 2) + 1;
939  val = 45 / s->channel[c].scale_factor_step;
940  for (sf = s->channel[c].scale_factors; sf < sf_end; sf++) {
941  val += get_vlc2(&s->gb, sf_vlc.table, SCALEVLCBITS, SCALEMAXDEPTH) - 60;
942  *sf = val;
943  }
944  } else {
945  int i;
946  /** run level decode differences to the resampled factors */
947  for (i = 0; i < s->num_bands; i++) {
948  int idx;
949  int skip;
950  int val;
951  int sign;
952 
953  idx = get_vlc2(&s->gb, sf_rl_vlc.table, VLCBITS, SCALERLMAXDEPTH);
954 
955  if (!idx) {
956  uint32_t code = get_bits(&s->gb, 14);
957  val = code >> 6;
958  sign = (code & 1) - 1;
959  skip = (code & 0x3f) >> 1;
960  } else if (idx == 1) {
961  break;
962  } else {
963  skip = scale_rl_run[idx];
964  val = scale_rl_level[idx];
965  sign = get_bits1(&s->gb)-1;
966  }
967 
968  i += skip;
969  if (i >= s->num_bands) {
971  "invalid scale factor coding\n");
972  return AVERROR_INVALIDDATA;
973  }
974  s->channel[c].scale_factors[i] += (val ^ sign) - sign;
975  }
976  }
977  /** swap buffers */
979  s->channel[c].table_idx = s->table_idx;
980  s->channel[c].reuse_sf = 1;
981  }
982 
983  /** calculate new scale factor maximum */
985  for (sf = s->channel[c].scale_factors + 1; sf < sf_end; sf++) {
987  FFMAX(s->channel[c].max_scale_factor, *sf);
988  }
989 
990  }
991  return 0;
992 }
993 
994 /**
995  *@brief Reconstruct the individual channel data.
996  *@param s codec context
997  */
999 {
1000  int i;
1001 
1002  for (i = 0; i < s->num_chgroups; i++) {
1003  if (s->chgroup[i].transform) {
1004  float data[WMAPRO_MAX_CHANNELS];
1005  const int num_channels = s->chgroup[i].num_channels;
1006  float** ch_data = s->chgroup[i].channel_data;
1007  float** ch_end = ch_data + num_channels;
1008  const int8_t* tb = s->chgroup[i].transform_band;
1009  int16_t* sfb;
1010 
1011  /** multichannel decorrelation */
1012  for (sfb = s->cur_sfb_offsets;
1013  sfb < s->cur_sfb_offsets + s->num_bands; sfb++) {
1014  int y;
1015  if (*tb++ == 1) {
1016  /** multiply values with the decorrelation_matrix */
1017  for (y = sfb[0]; y < FFMIN(sfb[1], s->subframe_len); y++) {
1018  const float* mat = s->chgroup[i].decorrelation_matrix;
1019  const float* data_end = data + num_channels;
1020  float* data_ptr = data;
1021  float** ch;
1022 
1023  for (ch = ch_data; ch < ch_end; ch++)
1024  *data_ptr++ = (*ch)[y];
1025 
1026  for (ch = ch_data; ch < ch_end; ch++) {
1027  float sum = 0;
1028  data_ptr = data;
1029  while (data_ptr < data_end)
1030  sum += *data_ptr++ * *mat++;
1031 
1032  (*ch)[y] = sum;
1033  }
1034  }
1035  } else if (s->avctx->channels == 2) {
1036  int len = FFMIN(sfb[1], s->subframe_len) - sfb[0];
1037  s->fdsp.vector_fmul_scalar(ch_data[0] + sfb[0],
1038  ch_data[0] + sfb[0],
1039  181.0 / 128, len);
1040  s->fdsp.vector_fmul_scalar(ch_data[1] + sfb[0],
1041  ch_data[1] + sfb[0],
1042  181.0 / 128, len);
1043  }
1044  }
1045  }
1046  }
1047 }
1048 
1049 /**
1050  *@brief Apply sine window and reconstruct the output buffer.
1051  *@param s codec context
1052  */
1054 {
1055  int i;
1056  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1057  int c = s->channel_indexes_for_cur_subframe[i];
1058  const float* window;
1059  int winlen = s->channel[c].prev_block_len;
1060  float* start = s->channel[c].coeffs - (winlen >> 1);
1061 
1062  if (s->subframe_len < winlen) {
1063  start += (winlen - s->subframe_len) >> 1;
1064  winlen = s->subframe_len;
1065  }
1066 
1067  window = s->windows[av_log2(winlen) - WMAPRO_BLOCK_MIN_BITS];
1068 
1069  winlen >>= 1;
1070 
1071  s->fdsp.vector_fmul_window(start, start, start + winlen,
1072  window, winlen);
1073 
1075  }
1076 }
1077 
1078 /**
1079  *@brief Decode a single subframe (block).
1080  *@param s codec context
1081  *@return 0 on success, < 0 when decoding failed
1082  */
1084 {
1085  int offset = s->samples_per_frame;
1086  int subframe_len = s->samples_per_frame;
1087  int i;
1088  int total_samples = s->samples_per_frame * s->avctx->channels;
1089  int transmit_coeffs = 0;
1090  int cur_subwoofer_cutoff;
1091 
1092  s->subframe_offset = get_bits_count(&s->gb);
1093 
1094  /** reset channel context and find the next block offset and size
1095  == the next block of the channel with the smallest number of
1096  decoded samples
1097  */
1098  for (i = 0; i < s->avctx->channels; i++) {
1099  s->channel[i].grouped = 0;
1100  if (offset > s->channel[i].decoded_samples) {
1101  offset = s->channel[i].decoded_samples;
1102  subframe_len =
1104  }
1105  }
1106 
1107  av_dlog(s->avctx,
1108  "processing subframe with offset %i len %i\n", offset, subframe_len);
1109 
1110  /** get a list of all channels that contain the estimated block */
1112  for (i = 0; i < s->avctx->channels; i++) {
1113  const int cur_subframe = s->channel[i].cur_subframe;
1114  /** subtract already processed samples */
1115  total_samples -= s->channel[i].decoded_samples;
1116 
1117  /** and count if there are multiple subframes that match our profile */
1118  if (offset == s->channel[i].decoded_samples &&
1119  subframe_len == s->channel[i].subframe_len[cur_subframe]) {
1120  total_samples -= s->channel[i].subframe_len[cur_subframe];
1121  s->channel[i].decoded_samples +=
1122  s->channel[i].subframe_len[cur_subframe];
1125  }
1126  }
1127 
1128  /** check if the frame will be complete after processing the
1129  estimated block */
1130  if (!total_samples)
1131  s->parsed_all_subframes = 1;
1132 
1133 
1134  av_dlog(s->avctx, "subframe is part of %i channels\n",
1136 
1137  /** calculate number of scale factor bands and their offsets */
1138  s->table_idx = av_log2(s->samples_per_frame/subframe_len);
1139  s->num_bands = s->num_sfb[s->table_idx];
1141  cur_subwoofer_cutoff = s->subwoofer_cutoffs[s->table_idx];
1142 
1143  /** configure the decoder for the current subframe */
1144  offset += s->samples_per_frame >> 1;
1145 
1146  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1147  int c = s->channel_indexes_for_cur_subframe[i];
1148 
1149  s->channel[c].coeffs = &s->channel[c].out[offset];
1150  }
1151 
1152  s->subframe_len = subframe_len;
1153  s->esc_len = av_log2(s->subframe_len - 1) + 1;
1154 
1155  /** skip extended header if any */
1156  if (get_bits1(&s->gb)) {
1157  int num_fill_bits;
1158  if (!(num_fill_bits = get_bits(&s->gb, 2))) {
1159  int len = get_bits(&s->gb, 4);
1160  num_fill_bits = (len ? get_bits(&s->gb, len) : 0) + 1;
1161  }
1162 
1163  if (num_fill_bits >= 0) {
1164  if (get_bits_count(&s->gb) + num_fill_bits > s->num_saved_bits) {
1165  av_log(s->avctx, AV_LOG_ERROR, "invalid number of fill bits\n");
1166  return AVERROR_INVALIDDATA;
1167  }
1168 
1169  skip_bits_long(&s->gb, num_fill_bits);
1170  }
1171  }
1172 
1173  /** no idea for what the following bit is used */
1174  if (get_bits1(&s->gb)) {
1175  avpriv_request_sample(s->avctx, "Reserved bit");
1176  return AVERROR_PATCHWELCOME;
1177  }
1178 
1179 
1180  if (decode_channel_transform(s) < 0)
1181  return AVERROR_INVALIDDATA;
1182 
1183 
1184  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1185  int c = s->channel_indexes_for_cur_subframe[i];
1186  if ((s->channel[c].transmit_coefs = get_bits1(&s->gb)))
1187  transmit_coeffs = 1;
1188  }
1189 
1191  if (transmit_coeffs) {
1192  int step;
1193  int quant_step = 90 * s->bits_per_sample >> 4;
1194 
1195  /** decode number of vector coded coefficients */
1196  if ((s->transmit_num_vec_coeffs = get_bits1(&s->gb))) {
1197  int num_bits = av_log2((s->subframe_len + 3)/4) + 1;
1198  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1199  int c = s->channel_indexes_for_cur_subframe[i];
1200  int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2;
1201  if (num_vec_coeffs > s->subframe_len) {
1202  av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs);
1203  return AVERROR_INVALIDDATA;
1204  }
1205  av_assert0(num_vec_coeffs + offset <= FF_ARRAY_ELEMS(s->channel[c].out));
1206  s->channel[c].num_vec_coeffs = num_vec_coeffs;
1207  }
1208  } else {
1209  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1210  int c = s->channel_indexes_for_cur_subframe[i];
1212  }
1213  }
1214  /** decode quantization step */
1215  step = get_sbits(&s->gb, 6);
1216  quant_step += step;
1217  if (step == -32 || step == 31) {
1218  const int sign = (step == 31) - 1;
1219  int quant = 0;
1220  while (get_bits_count(&s->gb) + 5 < s->num_saved_bits &&
1221  (step = get_bits(&s->gb, 5)) == 31) {
1222  quant += 31;
1223  }
1224  quant_step += ((quant + step) ^ sign) - sign;
1225  }
1226  if (quant_step < 0) {
1227  av_log(s->avctx, AV_LOG_DEBUG, "negative quant step\n");
1228  }
1229 
1230  /** decode quantization step modifiers for every channel */
1231 
1232  if (s->channels_for_cur_subframe == 1) {
1233  s->channel[s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step;
1234  } else {
1235  int modifier_len = get_bits(&s->gb, 3);
1236  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1237  int c = s->channel_indexes_for_cur_subframe[i];
1238  s->channel[c].quant_step = quant_step;
1239  if (get_bits1(&s->gb)) {
1240  if (modifier_len) {
1241  s->channel[c].quant_step += get_bits(&s->gb, modifier_len) + 1;
1242  } else
1243  ++s->channel[c].quant_step;
1244  }
1245  }
1246  }
1247 
1248  /** decode scale factors */
1249  if (decode_scale_factors(s) < 0)
1250  return AVERROR_INVALIDDATA;
1251  }
1252 
1253  av_dlog(s->avctx, "BITSTREAM: subframe header length was %i\n",
1254  get_bits_count(&s->gb) - s->subframe_offset);
1255 
1256  /** parse coefficients */
1257  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1258  int c = s->channel_indexes_for_cur_subframe[i];
1259  if (s->channel[c].transmit_coefs &&
1260  get_bits_count(&s->gb) < s->num_saved_bits) {
1261  decode_coeffs(s, c);
1262  } else
1263  memset(s->channel[c].coeffs, 0,
1264  sizeof(*s->channel[c].coeffs) * subframe_len);
1265  }
1266 
1267  av_dlog(s->avctx, "BITSTREAM: subframe length was %i\n",
1268  get_bits_count(&s->gb) - s->subframe_offset);
1269 
1270  if (transmit_coeffs) {
1271  FFTContext *mdct = &s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS];
1272  /** reconstruct the per channel data */
1274  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1275  int c = s->channel_indexes_for_cur_subframe[i];
1276  const int* sf = s->channel[c].scale_factors;
1277  int b;
1278 
1279  if (c == s->lfe_channel)
1280  memset(&s->tmp[cur_subwoofer_cutoff], 0, sizeof(*s->tmp) *
1281  (subframe_len - cur_subwoofer_cutoff));
1282 
1283  /** inverse quantization and rescaling */
1284  for (b = 0; b < s->num_bands; b++) {
1285  const int end = FFMIN(s->cur_sfb_offsets[b+1], s->subframe_len);
1286  const int exp = s->channel[c].quant_step -
1287  (s->channel[c].max_scale_factor - *sf++) *
1288  s->channel[c].scale_factor_step;
1289  const float quant = pow(10.0, exp / 20.0);
1290  int start = s->cur_sfb_offsets[b];
1291  s->fdsp.vector_fmul_scalar(s->tmp + start,
1292  s->channel[c].coeffs + start,
1293  quant, end - start);
1294  }
1295 
1296  /** apply imdct (imdct_half == DCTIV with reverse) */
1297  mdct->imdct_half(mdct, s->channel[c].coeffs, s->tmp);
1298  }
1299  }
1300 
1301  /** window and overlapp-add */
1302  wmapro_window(s);
1303 
1304  /** handled one subframe */
1305  for (i = 0; i < s->channels_for_cur_subframe; i++) {
1306  int c = s->channel_indexes_for_cur_subframe[i];
1307  if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) {
1308  av_log(s->avctx, AV_LOG_ERROR, "broken subframe\n");
1309  return AVERROR_INVALIDDATA;
1310  }
1311  ++s->channel[c].cur_subframe;
1312  }
1313 
1314  return 0;
1315 }
1316 
1317 /**
1318  *@brief Decode one WMA frame.
1319  *@param s codec context
1320  *@return 0 if the trailer bit indicates that this is the last frame,
1321  * 1 if there are additional frames
1322  */
1323 static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr)
1324 {
1325  AVCodecContext *avctx = s->avctx;
1326  GetBitContext* gb = &s->gb;
1327  int more_frames = 0;
1328  int len = 0;
1329  int i, ret;
1330 
1331  /** get frame length */
1332  if (s->len_prefix)
1333  len = get_bits(gb, s->log2_frame_size);
1334 
1335  av_dlog(s->avctx, "decoding frame with length %x\n", len);
1336 
1337  /** decode tile information */
1338  if (decode_tilehdr(s)) {
1339  s->packet_loss = 1;
1340  return 0;
1341  }
1342 
1343  /** read postproc transform */
1344  if (s->avctx->channels > 1 && get_bits1(gb)) {
1345  if (get_bits1(gb)) {
1346  for (i = 0; i < avctx->channels * avctx->channels; i++)
1347  skip_bits(gb, 4);
1348  }
1349  }
1350 
1351  /** read drc info */
1352  if (s->dynamic_range_compression) {
1353  s->drc_gain = get_bits(gb, 8);
1354  av_dlog(s->avctx, "drc_gain %i\n", s->drc_gain);
1355  }
1356 
1357  /** no idea what these are for, might be the number of samples
1358  that need to be skipped at the beginning or end of a stream */
1359  if (get_bits1(gb)) {
1360  int av_unused skip;
1361 
1362  /** usually true for the first frame */
1363  if (get_bits1(gb)) {
1364  skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1365  av_dlog(s->avctx, "start skip: %i\n", skip);
1366  }
1367 
1368  /** sometimes true for the last frame */
1369  if (get_bits1(gb)) {
1370  skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1371  av_dlog(s->avctx, "end skip: %i\n", skip);
1372  }
1373 
1374  }
1375 
1376  av_dlog(s->avctx, "BITSTREAM: frame header length was %i\n",
1377  get_bits_count(gb) - s->frame_offset);
1378 
1379  /** reset subframe states */
1380  s->parsed_all_subframes = 0;
1381  for (i = 0; i < avctx->channels; i++) {
1382  s->channel[i].decoded_samples = 0;
1383  s->channel[i].cur_subframe = 0;
1384  s->channel[i].reuse_sf = 0;
1385  }
1386 
1387  /** decode all subframes */
1388  while (!s->parsed_all_subframes) {
1389  if (decode_subframe(s) < 0) {
1390  s->packet_loss = 1;
1391  return 0;
1392  }
1393  }
1394 
1395  /* get output buffer */
1396  frame->nb_samples = s->samples_per_frame;
1397  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
1398  s->packet_loss = 1;
1399  return 0;
1400  }
1401 
1402  /** copy samples to the output buffer */
1403  for (i = 0; i < avctx->channels; i++)
1404  memcpy(frame->extended_data[i], s->channel[i].out,
1405  s->samples_per_frame * sizeof(*s->channel[i].out));
1406 
1407  for (i = 0; i < avctx->channels; i++) {
1408  /** reuse second half of the IMDCT output for the next frame */
1409  memcpy(&s->channel[i].out[0],
1410  &s->channel[i].out[s->samples_per_frame],
1411  s->samples_per_frame * sizeof(*s->channel[i].out) >> 1);
1412  }
1413 
1414  if (s->skip_frame) {
1415  s->skip_frame = 0;
1416  *got_frame_ptr = 0;
1417  av_frame_unref(frame);
1418  } else {
1419  *got_frame_ptr = 1;
1420  }
1421 
1422  if (s->len_prefix) {
1423  if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
1424  /** FIXME: not sure if this is always an error */
1426  "frame[%"PRIu32"] would have to skip %i bits\n",
1427  s->frame_num,
1428  len - (get_bits_count(gb) - s->frame_offset) - 1);
1429  s->packet_loss = 1;
1430  return 0;
1431  }
1432 
1433  /** skip the rest of the frame data */
1434  skip_bits_long(gb, len - (get_bits_count(gb) - s->frame_offset) - 1);
1435  } else {
1436  while (get_bits_count(gb) < s->num_saved_bits && get_bits1(gb) == 0) {
1437  }
1438  }
1439 
1440  /** decode trailer bit */
1441  more_frames = get_bits1(gb);
1442 
1443  ++s->frame_num;
1444  return more_frames;
1445 }
1446 
1447 /**
1448  *@brief Calculate remaining input buffer length.
1449  *@param s codec context
1450  *@param gb bitstream reader context
1451  *@return remaining size in bits
1452  */
1454 {
1455  return s->buf_bit_size - get_bits_count(gb);
1456 }
1457 
1458 /**
1459  *@brief Fill the bit reservoir with a (partial) frame.
1460  *@param s codec context
1461  *@param gb bitstream reader context
1462  *@param len length of the partial frame
1463  *@param append decides whether to reset the buffer or not
1464  */
1466  int append)
1467 {
1468  int buflen;
1469 
1470  /** when the frame data does not need to be concatenated, the input buffer
1471  is reset and additional bits from the previous frame are copied
1472  and skipped later so that a fast byte copy is possible */
1473 
1474  if (!append) {
1475  s->frame_offset = get_bits_count(gb) & 7;
1476  s->num_saved_bits = s->frame_offset;
1478  }
1479 
1480  buflen = (put_bits_count(&s->pb) + len + 8) >> 3;
1481 
1482  if (len <= 0 || buflen > MAX_FRAMESIZE) {
1483  avpriv_request_sample(s->avctx, "Too small input buffer");
1484  s->packet_loss = 1;
1485  return;
1486  }
1487 
1488  av_assert0(len <= put_bits_left(&s->pb));
1489 
1490  s->num_saved_bits += len;
1491  if (!append) {
1492  avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3),
1493  s->num_saved_bits);
1494  } else {
1495  int align = 8 - (get_bits_count(gb) & 7);
1496  align = FFMIN(align, len);
1497  put_bits(&s->pb, align, get_bits(gb, align));
1498  len -= align;
1499  avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3), len);
1500  }
1501  skip_bits_long(gb, len);
1502 
1503  {
1504  PutBitContext tmp = s->pb;
1505  flush_put_bits(&tmp);
1506  }
1507 
1509  skip_bits(&s->gb, s->frame_offset);
1510 }
1511 
1512 /**
1513  *@brief Decode a single WMA packet.
1514  *@param avctx codec context
1515  *@param data the output buffer
1516  *@param avpkt input packet
1517  *@return number of bytes that were read from the input buffer
1518  */
1519 static int decode_packet(AVCodecContext *avctx, void *data,
1520  int *got_frame_ptr, AVPacket* avpkt)
1521 {
1522  WMAProDecodeCtx *s = avctx->priv_data;
1523  GetBitContext* gb = &s->pgb;
1524  const uint8_t* buf = avpkt->data;
1525  int buf_size = avpkt->size;
1526  int num_bits_prev_frame;
1527  int packet_sequence_number;
1528 
1529  *got_frame_ptr = 0;
1530 
1531  if (s->packet_done || s->packet_loss) {
1532  s->packet_done = 0;
1533 
1534  /** sanity check for the buffer length */
1535  if (buf_size < avctx->block_align) {
1536  av_log(avctx, AV_LOG_ERROR, "Input packet too small (%d < %d)\n",
1537  buf_size, avctx->block_align);
1538  return AVERROR_INVALIDDATA;
1539  }
1540 
1541  s->next_packet_start = buf_size - avctx->block_align;
1542  buf_size = avctx->block_align;
1543  s->buf_bit_size = buf_size << 3;
1544 
1545  /** parse packet header */
1546  init_get_bits(gb, buf, s->buf_bit_size);
1547  packet_sequence_number = get_bits(gb, 4);
1548  skip_bits(gb, 2);
1549 
1550  /** get number of bits that need to be added to the previous frame */
1551  num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
1552  av_dlog(avctx, "packet[%d]: nbpf %x\n", avctx->frame_number,
1553  num_bits_prev_frame);
1554 
1555  /** check for packet loss */
1556  if (!s->packet_loss &&
1557  ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
1558  s->packet_loss = 1;
1559  av_log(avctx, AV_LOG_ERROR,
1560  "Packet loss detected! seq %"PRIx8" vs %x\n",
1561  s->packet_sequence_number, packet_sequence_number);
1562  }
1563  s->packet_sequence_number = packet_sequence_number;
1564 
1565  if (num_bits_prev_frame > 0) {
1566  int remaining_packet_bits = s->buf_bit_size - get_bits_count(gb);
1567  if (num_bits_prev_frame >= remaining_packet_bits) {
1568  num_bits_prev_frame = remaining_packet_bits;
1569  s->packet_done = 1;
1570  }
1571 
1572  /** append the previous frame data to the remaining data from the
1573  previous packet to create a full frame */
1574  save_bits(s, gb, num_bits_prev_frame, 1);
1575  av_dlog(avctx, "accumulated %x bits of frame data\n",
1576  s->num_saved_bits - s->frame_offset);
1577 
1578  /** decode the cross packet frame if it is valid */
1579  if (!s->packet_loss)
1580  decode_frame(s, data, got_frame_ptr);
1581  } else if (s->num_saved_bits - s->frame_offset) {
1582  av_dlog(avctx, "ignoring %x previously saved bits\n",
1583  s->num_saved_bits - s->frame_offset);
1584  }
1585 
1586  if (s->packet_loss) {
1587  /** reset number of saved bits so that the decoder
1588  does not start to decode incomplete frames in the
1589  s->len_prefix == 0 case */
1590  s->num_saved_bits = 0;
1591  s->packet_loss = 0;
1592  }
1593 
1594  } else {
1595  int frame_size;
1596  s->buf_bit_size = (avpkt->size - s->next_packet_start) << 3;
1597  init_get_bits(gb, avpkt->data, s->buf_bit_size);
1598  skip_bits(gb, s->packet_offset);
1599  if (s->len_prefix && remaining_bits(s, gb) > s->log2_frame_size &&
1600  (frame_size = show_bits(gb, s->log2_frame_size)) &&
1601  frame_size <= remaining_bits(s, gb)) {
1602  save_bits(s, gb, frame_size, 0);
1603  if (!s->packet_loss)
1604  s->packet_done = !decode_frame(s, data, got_frame_ptr);
1605  } else if (!s->len_prefix
1606  && s->num_saved_bits > get_bits_count(&s->gb)) {
1607  /** when the frames do not have a length prefix, we don't know
1608  the compressed length of the individual frames
1609  however, we know what part of a new packet belongs to the
1610  previous frame
1611  therefore we save the incoming packet first, then we append
1612  the "previous frame" data from the next packet so that
1613  we get a buffer that only contains full frames */
1614  s->packet_done = !decode_frame(s, data, got_frame_ptr);
1615  } else
1616  s->packet_done = 1;
1617  }
1618 
1619  if (s->packet_done && !s->packet_loss &&
1620  remaining_bits(s, gb) > 0) {
1621  /** save the rest of the data so that it can be decoded
1622  with the next packet */
1623  save_bits(s, gb, remaining_bits(s, gb), 0);
1624  }
1625 
1626  s->packet_offset = get_bits_count(gb) & 7;
1627  if (s->packet_loss)
1628  return AVERROR_INVALIDDATA;
1629 
1630  return get_bits_count(gb) >> 3;
1631 }
1632 
1633 /**
1634  *@brief Clear decoder buffers (for seeking).
1635  *@param avctx codec context
1636  */
1637 static void flush(AVCodecContext *avctx)
1638 {
1639  WMAProDecodeCtx *s = avctx->priv_data;
1640  int i;
1641  /** reset output buffer as a part of it is used during the windowing of a
1642  new frame */
1643  for (i = 0; i < avctx->channels; i++)
1644  memset(s->channel[i].out, 0, s->samples_per_frame *
1645  sizeof(*s->channel[i].out));
1646  s->packet_loss = 1;
1647 }
1648 
1649 
1650 /**
1651  *@brief wmapro decoder
1652  */
1654  .name = "wmapro",
1655  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
1656  .type = AVMEDIA_TYPE_AUDIO,
1657  .id = AV_CODEC_ID_WMAPRO,
1658  .priv_data_size = sizeof(WMAProDecodeCtx),
1659  .init = decode_init,
1660  .close = decode_end,
1661  .decode = decode_packet,
1662  .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
1663  .flush = flush,
1664  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
1666 };