43 #define MUL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS) 45 #define SAMPLES_BUF_SIZE 4096 68 float scale_factor_inv_table[64];
86 av_log(avctx,
AV_LOG_ERROR,
"encoding %d channel(s) is not allowed in mp2\n", channels);
89 bitrate = bitrate / 1000;
142 ff_dlog(avctx,
"%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n",
162 v = (
int)(
exp2((3 - i) / 3.0) * (1 << 20));
167 s->scale_factor_inv_table[
i] =
exp2(-(3 - i) / 3.0) / (float)(1 << 20);
208 for(j=31;j>=3;j-=2) tab[j] += tab[j - 2];
252 x1 =
MUL((t[8] - x2), xp[0]);
253 x2 =
MUL((t[8] + x2), xp[1]);
266 xr =
MUL(t[28],xp[0]);
270 xr =
MUL(t[4],xp[1]);
271 t[ 4] = (t[24] - xr);
272 t[24] = (t[24] + xr);
274 xr =
MUL(t[20],xp[2]);
278 xr =
MUL(t[12],xp[3]);
279 t[12] = (t[16] - xr);
280 t[16] = (t[16] + xr);
285 for (i = 0; i < 4; i++) {
286 xr =
MUL(tab[30-i*4],xp[0]);
287 tab[30-i*4] = (tab[i*4] - xr);
288 tab[ i*4] = (tab[i*4] + xr);
290 xr =
MUL(tab[ 2+i*4],xp[1]);
291 tab[ 2+i*4] = (tab[28-i*4] - xr);
292 tab[28-i*4] = (tab[28-i*4] + xr);
294 xr =
MUL(tab[31-i*4],xp[0]);
295 tab[31-i*4] = (tab[1+i*4] - xr);
296 tab[ 1+i*4] = (tab[1+i*4] + xr);
298 xr =
MUL(tab[ 3+i*4],xp[1]);
299 tab[ 3+i*4] = (tab[29-i*4] - xr);
300 tab[29-i*4] = (tab[29-i*4] + xr);
308 xr =
MUL(t1[0], *xp);
321 #define WSHIFT (WFRAC_BITS + 15 - FRAC_BITS) 345 sum = p[0*64] * q[0*64];
346 sum += p[1*64] * q[1*64];
347 sum += p[2*64] * q[2*64];
348 sum += p[3*64] * q[3*64];
349 sum += p[4*64] * q[4*64];
350 sum += p[5*64] * q[5*64];
351 sum += p[6*64] * q[6*64];
352 sum += p[7*64] * q[7*64];
357 tmp1[0] = tmp[16] >>
WSHIFT;
358 for( i=1; i<=16; i++ ) tmp1[i] = (tmp[i+16]+tmp[16-i]) >>
WSHIFT;
359 for( i=17; i<=31; i++ ) tmp1[i] = (tmp[i+16]-tmp[80-i]) >>
WSHIFT;
382 int *p, vmax, v, n,
i, j, k,
code;
402 index = (21 - n) * 3 - 3;
426 switch(d1 * 5 + d2) {
458 sf[1] = sf[2] = sf[0];
463 sf[0] = sf[1] = sf[2];
469 sf[0] = sf[2] = sf[1];
475 sf[1] = sf[2] = sf[0];
483 sf[0], sf[1], sf[2], d1, d2, code);
502 #define SB_NOTALLOCATED 0 503 #define SB_ALLOCATED 1 514 int i, ch,
b, max_smr, max_ch, max_sb, current_frame_size, max_frame_size;
518 const unsigned char *alloc;
520 memcpy(smr, smr1, s->
nb_channels *
sizeof(
short) * SBLIMIT);
536 current_frame_size = 32;
550 if (smr[ch][i] > max_smr && subband_status[ch][i] !=
SB_NOMORE) {
551 max_smr = smr[ch][
i];
559 ff_dlog(
NULL,
"current=%d max=%d max_sb=%d max_ch=%d alloc=%d\n",
560 current_frame_size, max_frame_size, max_sb, max_ch,
566 for(i=0;i<max_sb;i++) {
567 alloc += 1 << alloc[0];
581 if (current_frame_size + incr <= max_frame_size) {
584 current_frame_size += incr;
586 smr[max_ch][max_sb] = smr1[max_ch][max_sb] -
quant_snr[alloc[
b]];
588 if (b == ((1 << alloc[0]) - 1))
589 subband_status[max_ch][max_sb] =
SB_NOMORE;
594 subband_status[max_ch][max_sb] =
SB_NOMORE;
597 *padding = max_frame_size - current_frame_size;
609 int i, j, k, l, bit_alloc_bits,
b, ch;
637 j += 1 << bit_alloc_bits;
692 a = (float)sample * s->scale_factor_inv_table[s->
scale_factors[ch][i][k]];
693 q[m] = (
int)((a + 1.0) * steps * 0.5);
704 q1 = sample * (1 << -
shift);
706 q1 = sample >>
shift;
707 q1 = (q1 *
mult) >>
P;
711 q[m] = (q1 * (unsigned)steps) >> (
P + 1);
722 q[0] + steps * (q[1] + steps * q[2]));
731 j += 1 << bit_alloc_bits;
737 for(i=0;i<padding;i++)
748 const int16_t *
samples = (
const int16_t *)frame->
data[0];
#define MPA_MAX_CODED_FRAME_SIZE
static int shift(int a, int b)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
int64_t bit_rate
the average bitrate
unsigned char scale_diff_table[128]
static const unsigned char nb_scale_factors[4]
unsigned short scale_factor_mult[64]
unsigned short total_quant_bits[17]
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const int ff_mpa_quant_bits[17]
static const uint8_t q1[256]
mpeg audio layer common tables.
const int32_t ff_mpa_enwindow[257]
static __device__ float floor(float a)
static int MPA_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static const AVCodecDefault mp2_defaults[]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static const int costab32[30]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
const int ff_mpa_quant_steps[17]
int scale_factor_table[64]
const uint16_t avpriv_mpa_freq_tab[3]
const unsigned char *const ff_mpa_alloc_tables[5]
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3]
mpeg audio layer 2 tables.
static void compute_bit_allocation(MpegAudioContext *s, short smr1[MPA_MAX_CHANNELS][SBLIMIT], unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int *padding)
unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT]
static int bit_alloc(AC3EncodeContext *s, int snr_offset)
Run the bit allocation with a given SNR offset.
static const unsigned short quant_snr[17]
static const uint16_t table[]
static av_cold int MPA_encode_init(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void compute_scale_factors(MpegAudioContext *s, unsigned char scale_code[SBLIMIT], unsigned char scale_factors[SBLIMIT][3], int sb_samples[3][12][SBLIMIT], int sblimit)
int initial_padding
Audio only.
static const int bitinv32[32]
static int put_bits_count(PutBitContext *s)
const unsigned char * alloc_table
int8_t scale_factor_shift[64]
audio channel layout utility functions
static int16_t mult(Float11 *f1, Float11 *f2)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void encode_frame(MpegAudioContext *s, unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int padding)
int frame_size
Number of samples per channel in an audio frame.
Libavcodec external API header.
int samples_offset[MPA_MAX_CHANNELS]
int sample_rate
samples per second
static const float fixed_smr[SBLIMIT]
main external API structure.
short samples_buf[MPA_MAX_CHANNELS][SAMPLES_BUF_SIZE]
static void idct32(int *out, int *tab)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
mpeg audio declarations for both encoder and decoder.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
const int ff_mpa_sblimit_table[5]
static void psycho_acoustic_model(MpegAudioContext *s, short smr[SBLIMIT])
int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf)
int channels
number of audio channels
static const struct twinvq_data tab
Filter the word “frame” indicates either a video frame or a group of audio samples
const uint16_t avpriv_mpa_bitrate_tab[2][3][15]
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int sb_samples[MPA_MAX_CHANNELS][3][12][SBLIMIT]
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.