66 double freq = 2*
M_PI/m;
68 for(
int i = 0;
i <= m/4;
i++)
69 tab[
i] = RESCALE(cos(
i*freq));
70 for(
int i = 1;
i < m/4;
i++)
71 tab[m/2 -
i] = tab[
i];
74 #define INIT_FF_COS_TABS_FUNC(index, size) \ 75 static av_cold void init_cos_tabs_ ## size (void) \ 77 init_cos_tabs_idx(index); \ 108 { init_cos_tabs_16, AV_ONCE_INIT },
109 { init_cos_tabs_32, AV_ONCE_INIT },
110 { init_cos_tabs_64, AV_ONCE_INIT },
111 { init_cos_tabs_128, AV_ONCE_INIT },
112 { init_cos_tabs_256, AV_ONCE_INIT },
113 { init_cos_tabs_512, AV_ONCE_INIT },
114 { init_cos_tabs_1024, AV_ONCE_INIT },
115 { init_cos_tabs_2048, AV_ONCE_INIT },
116 { init_cos_tabs_4096, AV_ONCE_INIT },
117 { init_cos_tabs_8192, AV_ONCE_INIT },
118 { init_cos_tabs_16384, AV_ONCE_INIT },
119 { init_cos_tabs_32768, AV_ONCE_INIT },
120 { init_cos_tabs_65536, AV_ONCE_INIT },
121 { init_cos_tabs_131072, AV_ONCE_INIT },
127 cos_tabs_init_once[index].
func);
138 BF(tmp[0].
re, tmp[1].
im, in[1].im, in[2].im);
139 BF(tmp[0].im, tmp[1].re, in[1].re, in[2].re);
145 mtmp[0] = (int64_t)
TX_NAME(ff_cos_53)[0].
re * tmp[0].
re;
146 mtmp[1] = (int64_t)
TX_NAME(ff_cos_53)[0].
im * tmp[0].
im;
147 mtmp[2] = (int64_t)
TX_NAME(ff_cos_53)[1].
re * tmp[1].
re;
148 mtmp[3] = (int64_t)
TX_NAME(ff_cos_53)[1].
re * tmp[1].
im;
149 out[1*
stride].
re = in[0].
re - (mtmp[2] + mtmp[0] + 0x40000000 >> 31);
150 out[1*
stride].
im = in[0].
im - (mtmp[3] - mtmp[1] + 0x40000000 >> 31);
151 out[2*
stride].
re = in[0].
re - (mtmp[2] - mtmp[0] + 0x40000000 >> 31);
152 out[2*
stride].
im = in[0].
im - (mtmp[3] + mtmp[1] + 0x40000000 >> 31);
165 #define DECL_FFT5(NAME, D0, D1, D2, D3, D4) \ 166 static av_always_inline void NAME(FFTComplex *out, FFTComplex *in, \ 169 FFTComplex z0[4], t[6]; \ 171 BF(t[1].im, t[0].re, in[1].re, in[4].re); \ 172 BF(t[1].re, t[0].im, in[1].im, in[4].im); \ 173 BF(t[3].im, t[2].re, in[2].re, in[3].re); \ 174 BF(t[3].re, t[2].im, in[2].im, in[3].im); \ 176 out[D0*stride].re = in[0].re + t[0].re + t[2].re; \ 177 out[D0*stride].im = in[0].im + t[0].im + t[2].im; \ 179 SMUL(t[4].re, t[0].re, TX_NAME(ff_cos_53)[2].re, TX_NAME(ff_cos_53)[3].re, t[2].re, t[0].re); \ 180 SMUL(t[4].im, t[0].im, TX_NAME(ff_cos_53)[2].re, TX_NAME(ff_cos_53)[3].re, t[2].im, t[0].im); \ 181 CMUL(t[5].re, t[1].re, TX_NAME(ff_cos_53)[2].im, TX_NAME(ff_cos_53)[3].im, t[3].re, t[1].re); \ 182 CMUL(t[5].im, t[1].im, TX_NAME(ff_cos_53)[2].im, TX_NAME(ff_cos_53)[3].im, t[3].im, t[1].im); \ 184 BF(z0[0].re, z0[3].re, t[0].re, t[1].re); \ 185 BF(z0[0].im, z0[3].im, t[0].im, t[1].im); \ 186 BF(z0[2].re, z0[1].re, t[4].re, t[5].re); \ 187 BF(z0[2].im, z0[1].im, t[4].im, t[5].im); \ 189 out[D1*stride].re = in[0].re + z0[3].re; \ 190 out[D1*stride].im = in[0].im + z0[0].im; \ 191 out[D2*stride].re = in[0].re + z0[2].re; \ 192 out[D2*stride].im = in[0].im + z0[1].im; \ 193 out[D3*stride].re = in[0].re + z0[1].re; \ 194 out[D3*stride].im = in[0].im + z0[2].im; \ 195 out[D4*stride].re = in[0].re + z0[0].re; \ 196 out[D4*stride].im = in[0].im + z0[3].im; \ 209 for (
int i = 0;
i < 5;
i++)
210 fft3(tmp +
i, in +
i*3, 5);
212 fft5_m1(out, tmp + 0, stride);
213 fft5_m2(out, tmp + 5, stride);
214 fft5_m3(out, tmp + 10, stride);
217 #define BUTTERFLIES(a0,a1,a2,a3) {\ 219 BF(a2.re, a0.re, a0.re, t5);\ 220 BF(a3.im, a1.im, a1.im, t3);\ 222 BF(a3.re, a1.re, a1.re, t4);\ 223 BF(a2.im, a0.im, a0.im, t6);\ 229 #define BUTTERFLIES_BIG(a0,a1,a2,a3) {\ 230 FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\ 232 BF(a2.re, a0.re, r0, t5);\ 233 BF(a3.im, a1.im, i1, t3);\ 235 BF(a3.re, a1.re, r1, t4);\ 236 BF(a2.im, a0.im, i0, t6);\ 239 #define TRANSFORM(a0,a1,a2,a3,wre,wim) {\ 240 CMUL(t1, t2, a2.re, a2.im, wre, -wim);\ 241 CMUL(t5, t6, a3.re, a3.im, wre, wim);\ 242 BUTTERFLIES(a0,a1,a2,a3)\ 245 #define TRANSFORM_ZERO(a0,a1,a2,a3) {\ 250 BUTTERFLIES(a0,a1,a2,a3)\ 255 static void name(FFTComplex *z, const FFTSample *wre, unsigned int n)\ 257 FFTSample t1, t2, t3, t4, t5, t6;\ 261 const FFTSample *wim = wre+o1;\ 264 TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3]);\ 265 TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\ 270 TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0]);\ 271 TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\ 277 #define BUTTERFLIES BUTTERFLIES_BIG 280 #define DECL_FFT(n,n2,n4)\ 281 static void fft##n(FFTComplex *z)\ 286 pass(z,TX_NAME(ff_cos_##n),n4/2);\ 301 BF(t3, t1, z[0].
re, z[1].re);
302 BF(t8, t6, z[3].re, z[2].re);
303 BF(z[2].re, z[0].re, t1, t6);
304 BF(t4, t2, z[0].
im, z[1].im);
305 BF(t7, t5, z[2].im, z[3].im);
306 BF(z[3].im, z[1].im, t4, t8);
307 BF(z[3].re, z[1].re, t3, t7);
308 BF(z[2].im, z[0].im, t2, t5);
317 BF(t1, z[5].
re, z[4].re, -z[5].re);
318 BF(t2, z[5].
im, z[4].im, -z[5].im);
319 BF(t5, z[7].re, z[6].re, -z[7].re);
320 BF(t6, z[7].im, z[6].im, -z[7].im);
338 TRANSFORM(z[1],z[5],z[9],z[13],cos_16_1,cos_16_3);
339 TRANSFORM(z[3],z[7],z[11],z[15],cos_16_3,cos_16_1);
347 #define pass pass_big 358 NULL,
fft2,
fft4,
fft8,
fft16, fft32, fft64, fft128, fft256, fft512,
359 fft1024, fft2048, fft4096, fft8192, fft16384, fft32768, fft65536, fft131072
362 #define DECL_COMP_FFT(N) \ 363 static void compound_fft_##N##xM(AVTXContext *s, void *_out, \ 364 void *_in, ptrdiff_t stride) \ 366 const int m = s->m, *in_map = s->pfatab, *out_map = in_map + N*m; \ 367 FFTComplex *in = _in; \ 368 FFTComplex *out = _out; \ 369 FFTComplex fft##N##in[N]; \ 370 void (*fftp)(FFTComplex *z) = fft_dispatch[av_log2(m)]; \ 372 for (int i = 0; i < m; i++) { \ 373 for (int j = 0; j < N; j++) \ 374 fft##N##in[j] = in[in_map[i*N + j]]; \ 375 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \ 378 for (int i = 0; i < N; i++) \ 379 fftp(s->tmp + m*i); \ 381 for (int i = 0; i < N*m; i++) \ 382 out[i] = s->tmp[out_map[i]]; \ 398 int src, dst, *inplace_idx = s->inplace_idx;
400 src = *inplace_idx++;
404 dst = s->revtab[
src];
407 dst = s->revtab[dst];
408 }
while (dst != src);
410 }
while ((src = *inplace_idx++));
412 for (
int i = 0;
i < m;
i++)
413 out[
i] = in[s->revtab[
i]];
427 for(
int i = 0;
i < n;
i++) {
429 for(
int j = 0; j < n; j++) {
430 const double factor = phase*
i*j;
432 RESCALE(cos(factor)),
433 RESCALE(sin(factor)),
436 CMUL3(res, in[j], mult);
444 #define DECL_COMP_IMDCT(N) \ 445 static void compound_imdct_##N##xM(AVTXContext *s, void *_dst, void *_src, \ 448 FFTComplex fft##N##in[N]; \ 449 FFTComplex *z = _dst, *exp = s->exptab; \ 450 const int m = s->m, len8 = N*m >> 1; \ 451 const int *in_map = s->pfatab, *out_map = in_map + N*m; \ 452 const FFTSample *src = _src, *in1, *in2; \ 453 void (*fftp)(FFTComplex *) = fft_dispatch[av_log2(m)]; \ 455 stride /= sizeof(*src); \ 457 in2 = src + ((N*m*2) - 1) * stride; \ 459 for (int i = 0; i < m; i++) { \ 460 for (int j = 0; j < N; j++) { \ 461 const int k = in_map[i*N + j]; \ 462 FFTComplex tmp = { in2[-k*stride], in1[k*stride] }; \ 463 CMUL3(fft##N##in[j], tmp, exp[k >> 1]); \ 465 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \ 468 for (int i = 0; i < N; i++) \ 469 fftp(s->tmp + m*i); \ 471 for (int i = 0; i < len8; i++) { \ 472 const int i0 = len8 + i, i1 = len8 - i - 1; \ 473 const int s0 = out_map[i0], s1 = out_map[i1]; \ 474 FFTComplex src1 = { s->tmp[s1].im, s->tmp[s1].re }; \ 475 FFTComplex src0 = { s->tmp[s0].im, s->tmp[s0].re }; \ 477 CMUL(z[i1].re, z[i0].im, src1.re, src1.im, exp[i1].im, exp[i1].re); \ 478 CMUL(z[i0].re, z[i1].im, src0.re, src0.im, exp[i0].im, exp[i0].re); \ 486 #define DECL_COMP_MDCT(N) \ 487 static void compound_mdct_##N##xM(AVTXContext *s, void *_dst, void *_src, \ 490 FFTSample *src = _src, *dst = _dst; \ 491 FFTComplex *exp = s->exptab, tmp, fft##N##in[N]; \ 492 const int m = s->m, len4 = N*m, len3 = len4 * 3, len8 = len4 >> 1; \ 493 const int *in_map = s->pfatab, *out_map = in_map + N*m; \ 494 void (*fftp)(FFTComplex *) = fft_dispatch[av_log2(m)]; \ 496 stride /= sizeof(*dst); \ 498 for (int i = 0; i < m; i++) { \ 499 for (int j = 0; j < N; j++) { \ 500 const int k = in_map[i*N + j]; \ 502 tmp.re = FOLD(-src[ len4 + k], src[1*len4 - 1 - k]); \ 503 tmp.im = FOLD(-src[ len3 + k], -src[1*len3 - 1 - k]); \ 505 tmp.re = FOLD(-src[ len4 + k], -src[5*len4 - 1 - k]); \ 506 tmp.im = FOLD( src[-len4 + k], -src[1*len3 - 1 - k]); \ 508 CMUL(fft##N##in[j].im, fft##N##in[j].re, tmp.re, tmp.im, \ 509 exp[k >> 1].re, exp[k >> 1].im); \ 511 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \ 514 for (int i = 0; i < N; i++) \ 515 fftp(s->tmp + m*i); \ 517 for (int i = 0; i < len8; i++) { \ 518 const int i0 = len8 + i, i1 = len8 - i - 1; \ 519 const int s0 = out_map[i0], s1 = out_map[i1]; \ 520 FFTComplex src1 = { s->tmp[s1].re, s->tmp[s1].im }; \ 521 FFTComplex src0 = { s->tmp[s0].re, s->tmp[s0].im }; \ 523 CMUL(dst[2*i1*stride + stride], dst[2*i0*stride], src0.re, src0.im, \ 524 exp[i0].im, exp[i0].re); \ 525 CMUL(dst[2*i0*stride + stride], dst[2*i1*stride], src1.re, src1.im, \ 526 exp[i1].im, exp[i1].re); \ 538 const int m = s->m, len8 = m >> 1;
542 stride /=
sizeof(*src);
544 in2 = src + ((m*2) - 1) *
stride;
546 for (
int i = 0;
i < m;
i++) {
548 CMUL3(z[s->revtab[
i]], tmp, exp[i]);
553 for (
int i = 0;
i < len8;
i++) {
554 const int i0 = len8 +
i, i1 = len8 - i - 1;
558 CMUL(z[i1].
re, z[i0].
im, src1.
re, src1.
im, exp[i1].im, exp[i1].re);
559 CMUL(z[i0].re, z[i1].im, src0.
re, src0.
im, exp[i0].im, exp[i0].re);
568 const int m = s->
m, len4 = m, len3 = len4 * 3, len8 = len4 >> 1;
571 stride /=
sizeof(*dst);
573 for (
int i = 0;
i < m;
i++) {
576 tmp.re = FOLD(-src[ len4 + k], src[1*len4 - 1 - k]);
577 tmp.im = FOLD(-src[ len3 + k], -src[1*len3 - 1 - k]);
579 tmp.re = FOLD(-src[ len4 + k], -src[5*len4 - 1 - k]);
580 tmp.im = FOLD( src[-len4 + k], -src[1*len3 - 1 - k]);
583 exp[i].
re, exp[i].
im);
588 for (
int i = 0;
i < len8;
i++) {
589 const int i0 = len8 +
i, i1 = len8 - i - 1;
593 CMUL(dst[2*i1*stride + stride], dst[2*i0*stride], src0.
re, src0.
im,
594 exp[i0].
im, exp[i0].
re);
595 CMUL(dst[2*i0*stride + stride], dst[2*i1*stride], src1.
re, src1.
im,
596 exp[i1].
im, exp[i1].
re);
607 double scale = s->
scale;
608 const double phase =
M_PI/(4.0*len2);
610 stride /=
sizeof(*src);
612 for (
int i = 0;
i <
len;
i++) {
615 double i_d = phase * (4*len - 2*
i - 1);
616 double i_u = phase * (3*len2 + 2*
i + 1);
617 for (
int j = 0; j < len2; j++) {
618 double a = (2 * j + 1);
619 double a_d = cos(a * i_d);
620 double a_u = cos(a * i_u);
621 double val = UNSCALE(src[j*stride]);
625 dst[
i + 0] = RESCALE( sum_d*scale);
626 dst[
i +
len] = RESCALE(-sum_u*scale);
636 double scale = s->
scale;
637 const double phase =
M_PI/(4.0*
len);
639 stride /=
sizeof(*dst);
641 for (
int i = 0;
i <
len;
i++) {
643 for (
int j = 0; j < len*2; j++) {
644 int a = (2*j + 1 +
len) * (2*
i + 1);
645 sum += UNSCALE(src[j]) * cos(a * phase);
647 dst[
i*
stride] = RESCALE(sum*scale);
653 const double theta = (scale < 0 ? len4 : 0) + 1.0/8.0;
658 scale = sqrt(
fabs(scale));
659 for (
int i = 0;
i < len4;
i++) {
661 s->
exptab[
i].
re = RESCALE(cos(alpha) * scale);
662 s->
exptab[
i].
im = RESCALE(sin(alpha) * scale);
670 const void *scale, uint64_t
flags)
680 #define CHECK_FACTOR(DST, FACTOR, SRC) \ 681 if (DST == 1 && !(SRC % FACTOR)) { \ 691 if (!(len & (len - 1)) && len >= 2 && len <= max_ptwo) {
705 if (len > 1 || m == 1) {
706 if (is_mdct && (l & 1))
714 s->scale = *((SCALE_TYPE *)scale);
720 if (n > 1 && m > 1) {
723 if (!(s->tmp =
av_malloc(n*m*
sizeof(*s->tmp))))
725 *tx = n == 3 ? compound_fft_3xM :
726 n == 5 ? compound_fft_5xM :
729 *tx = n == 3 ? inv ? compound_imdct_3xM : compound_mdct_3xM :
730 n == 5 ? inv ? compound_imdct_5xM : compound_mdct_5xM :
731 inv ? compound_imdct_15xM : compound_mdct_15xM;
743 if (
flags & AV_TX_INPLACE) {
#define DECL_FFT(n, n2, n4)
int ff_tx_gen_compound_mapping(AVTXContext *s)
static av_cold void ff_init_53_tabs(void)
static void monolithic_mdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static av_cold void init_cos_tabs(int index)
static FFTSample *const cos_tabs[18]
static av_always_inline void fft15(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
#define BUTTERFLIES(a0, a1, a2, a3)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void sum_d(const int *input, int *output, int len)
Performs an in-place transformation on the input.
static void monolithic_fft(AVTXContext *s, void *_out, void *_in, ptrdiff_t stride)
#define INIT_FF_COS_TABS_FUNC(index, size)
static void fft5(FFTComplex *out, FFTComplex *in, FFTComplex exptab[2])
#define FF_ARRAY_ELEMS(a)
static av_always_inline void init_cos_tabs_idx(int index)
int ff_tx_type_is_mdct(enum AVTXType type)
#define DECL_COMP_MDCT(N)
FFTComplex TX_NAME(ff_cos_53)[4]
int ff_tx_gen_ptwo_inplace_revtab_idx(AVTXContext *s)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
#define CHECK_FACTOR(DST, FACTOR, SRC)
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
static __device__ float fabs(float a)
static void monolithic_imdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static int16_t mult(Float11 *f1, Float11 *f2)
static void(*const fft_dispatch[])(FFTComplex *)
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static void fft8(FFTComplex *z)
#define DECL_FFT5(NAME, D0, D1, D2, D3, D4)
static void naive_imdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static void naive_fft(AVTXContext *s, void *_out, void *_in, ptrdiff_t stride)
static void naive_mdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static const int16_t alpha[]
static void fft16(FFTComplex *z)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int(* func)(AVBPrint *dst, const char *in, const char *arg)
static av_always_inline void fft3(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
static const int factor[16]
#define flags(name, subs,...)
static int gen_mdct_exptab(AVTXContext *s, int len4, double scale)
GLint GLenum GLboolean GLsizei stride
#define TRANSFORM(a0, a1, a2, a3, wre, wim)
static void fft2(FFTComplex *z)
#define TRANSFORM_ZERO(a0, a1, a2, a3)
static void fft4(FFTComplex *z)
int ff_tx_gen_ptwo_revtab(AVTXContext *s, int invert_lookup)
static int ff_thread_once(char *control, void(*routine)(void))
int TX_NAME() ff_tx_init_mdct_fft(AVTXContext *s, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
static const struct twinvq_data tab
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double val(void *priv, double ch)
static CosTabsInitOnce cos_tabs_init_once[]
#define DECL_COMP_IMDCT(N)