32 for (i = 0; i < n; i++)
40 for (i = 0; i < n; i++) {
52 for (i = 0; i < n; i++) {
56 for (j = 0; j < 6; j++) {
82 for (j = 0; j <
len; j++) {
83 out[
i][j][0] = L[0][j][
i];
84 out[
i][j][1] = L[1][j][
i];
96 for (n = 0; n <
len; n++) {
97 out[0][n][
i] =
in[
i][n][0];
98 out[1][n][
i] =
in[
i][n][1];
111 Q31(0.56471812200776
f),
112 Q31(0.48954165955695
f) };
119 for (n = 0; n <
len; n++) {
125 INTFLOAT link_delay_re = ap_delay[m][n+2-m][0];
126 INTFLOAT link_delay_im = ap_delay[m][n+2-m][1];
127 INTFLOAT fractional_delay_re = Q_fract[m][0];
128 INTFLOAT fractional_delay_im = Q_fract[m][1];
131 in_re =
AAC_MSUB30(link_delay_re, fractional_delay_re,
132 link_delay_im, fractional_delay_im);
134 in_im =
AAC_MADD30(link_delay_re, fractional_delay_im,
135 link_delay_im, fractional_delay_re);
140 out[n][0] =
AAC_MUL16(transient_gain[n], in_re);
141 out[n][1] =
AAC_MUL16(transient_gain[n], in_im);
159 for (n = 0; n <
len; n++) {
180 INTFLOAT h00 = h[0][0], h10 = h[1][0];
181 INTFLOAT h01 = h[0][1], h11 = h[1][1];
182 INTFLOAT h02 = h[0][2], h12 = h[1][2];
183 INTFLOAT h03 = h[0][3], h13 = h[1][3];
184 UINTFLOAT hs00 = h_step[0][0], hs10 = h_step[1][0];
185 UINTFLOAT hs01 = h_step[0][1], hs11 = h_step[1][1];
186 UINTFLOAT hs02 = h_step[0][2], hs12 = h_step[1][2];
187 UINTFLOAT hs03 = h_step[0][3], hs13 = h_step[1][3];
190 for (n = 0; n <
len; n++) {
205 l[n][0] =
AAC_MSUB30_V8(h00, l_re, h02, r_re, h10, l_im, h12, r_im);
206 l[n][1] =
AAC_MADD30_V8(h00, l_im, h02, r_im, h10, l_re, h12, r_re);
207 r[n][0] =
AAC_MSUB30_V8(h01, l_re, h03, r_re, h11, l_im, h13, r_im);
208 r[n][1] =
AAC_MADD30_V8(h01, l_im, h03, r_im, h11, l_re, h13, r_re);
void ff_psdsp_init_arm(PSDSPContext *s)
static void ps_add_squares_c(INTFLOAT *dst, const INTFLOAT(*src)[2], int n)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
void ff_psdsp_init_aarch64(PSDSPContext *s)
Macro definitions for various function/variable attributes.
#define AAC_MADD28(x, y, a, b)
static void ps_hybrid_synthesis_deint_c(INTFLOAT out[2][38][64], INTFLOAT(*in)[32][2], int i, int len)
av_cold void AAC_RENAME() ff_psdsp_init(PSDSPContext *s)
#define AAC_MSUB30(x, y, a, b)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static void ps_stereo_interpolate_c(INTFLOAT(*l)[2], INTFLOAT(*r)[2], INTFLOAT h[2][4], INTFLOAT h_step[2][4], int len)
static int phi_fract[2][50][2]
#define AAC_MSUB30_V8(x, y, a, b, c, d, e, f)
static void ps_hybrid_analysis_c(INTFLOAT(*out)[2], INTFLOAT(*in)[2], const INTFLOAT(*filter)[8][2], ptrdiff_t stride, int n)
void ff_psdsp_init_x86(PSDSPContext *s)
static void ps_mul_pair_single_c(INTFLOAT(*dst)[2], INTFLOAT(*src0)[2], INTFLOAT *src1, int n)
static void ps_stereo_interpolate_ipdopd_c(INTFLOAT(*l)[2], INTFLOAT(*r)[2], INTFLOAT h[2][4], INTFLOAT h_step[2][4], int len)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
void ff_psdsp_init_mips(PSDSPContext *s)
#define AAC_MADD30(x, y, a, b)
GLint GLenum GLboolean GLsizei stride
#define PS_QMF_TIME_SLOTS
static void ps_hybrid_analysis_ileave_c(INTFLOAT(*out)[32][2], INTFLOAT L[2][38][64], int i, int len)
static void ps_decorrelate_c(INTFLOAT(*out)[2], INTFLOAT(*delay)[2], INTFLOAT(*ap_delay)[PS_QMF_TIME_SLOTS+PS_MAX_AP_DELAY][2], const INTFLOAT phi_fract[2], const INTFLOAT(*Q_fract)[2], const INTFLOAT *transient_gain, INTFLOAT g_decay_slope, int len)
#define AAC_MADD30_V8(x, y, a, b, c, d, e, f)