FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
28 #include "audio.h"
29 #include "avfilter.h"
30 #include "internal.h"
31 
32 typedef struct ThreadData {
33  AVFrame *in, *out;
34 } ThreadData;
35 
36 typedef struct Pair {
37  int a, b;
38 } Pair;
39 
40 typedef struct BiquadContext {
41  double a[3];
42  double b[3];
43  double i1, i2;
44  double o1, o2;
46 
47 typedef struct IIRChannel {
48  int nb_ab[2];
49  double *ab[2];
50  double g;
51  double *cache[2];
53  int clippings;
54 } IIRChannel;
55 
56 typedef struct AudioIIRContext {
57  const AVClass *class;
58  char *a_str, *b_str, *g_str;
59  double dry_gain, wet_gain;
60  double mix;
61  int normalize;
62  int format;
63  int process;
64  int precision;
65  int response;
66  int w, h;
69 
71 
73  int channels;
75 
76  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
78 
80 {
81  AudioIIRContext *s = ctx->priv;
84  enum AVSampleFormat sample_fmts[] = {
87  };
88  static const enum AVPixelFormat pix_fmts[] = {
91  };
92  int ret;
93 
94  if (s->response) {
95  AVFilterLink *videolink = ctx->outputs[1];
96 
98  if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
99  return ret;
100  }
101 
103  if (!layouts)
104  return AVERROR(ENOMEM);
106  if (ret < 0)
107  return ret;
108 
109  sample_fmts[0] = s->sample_format;
111  if (!formats)
112  return AVERROR(ENOMEM);
114  if (ret < 0)
115  return ret;
116 
118  if (!formats)
119  return AVERROR(ENOMEM);
121 }
122 
123 #define IIR_CH(name, type, min, max, need_clipping) \
124 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
125 { \
126  AudioIIRContext *s = ctx->priv; \
127  const double ig = s->dry_gain; \
128  const double og = s->wet_gain; \
129  const double mix = s->mix; \
130  ThreadData *td = arg; \
131  AVFrame *in = td->in, *out = td->out; \
132  const type *src = (const type *)in->extended_data[ch]; \
133  double *oc = (double *)s->iir[ch].cache[0]; \
134  double *ic = (double *)s->iir[ch].cache[1]; \
135  const int nb_a = s->iir[ch].nb_ab[0]; \
136  const int nb_b = s->iir[ch].nb_ab[1]; \
137  const double *a = s->iir[ch].ab[0]; \
138  const double *b = s->iir[ch].ab[1]; \
139  const double g = s->iir[ch].g; \
140  int *clippings = &s->iir[ch].clippings; \
141  type *dst = (type *)out->extended_data[ch]; \
142  int n; \
143  \
144  for (n = 0; n < in->nb_samples; n++) { \
145  double sample = 0.; \
146  int x; \
147  \
148  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
149  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
150  ic[0] = src[n] * ig; \
151  for (x = 0; x < nb_b; x++) \
152  sample += b[x] * ic[x]; \
153  \
154  for (x = 1; x < nb_a; x++) \
155  sample -= a[x] * oc[x]; \
156  \
157  oc[0] = sample; \
158  sample *= og * g; \
159  sample = sample * mix + ic[0] * (1. - mix); \
160  if (need_clipping && sample < min) { \
161  (*clippings)++; \
162  dst[n] = min; \
163  } else if (need_clipping && sample > max) { \
164  (*clippings)++; \
165  dst[n] = max; \
166  } else { \
167  dst[n] = sample; \
168  } \
169  } \
170  \
171  return 0; \
172 }
173 
174 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
175 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
176 IIR_CH(fltp, float, -1., 1., 0)
177 IIR_CH(dblp, double, -1., 1., 0)
178 
179 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
180 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
181 { \
182  AudioIIRContext *s = ctx->priv; \
183  const double ig = s->dry_gain; \
184  const double og = s->wet_gain; \
185  const double mix = s->mix; \
186  ThreadData *td = arg; \
187  AVFrame *in = td->in, *out = td->out; \
188  const type *src = (const type *)in->extended_data[ch]; \
189  type *dst = (type *)out->extended_data[ch]; \
190  IIRChannel *iir = &s->iir[ch]; \
191  const double g = iir->g; \
192  int *clippings = &iir->clippings; \
193  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
194  int n, i; \
195  \
196  for (i = 0; i < nb_biquads; i++) { \
197  const double a1 = -iir->biquads[i].a[1]; \
198  const double a2 = -iir->biquads[i].a[2]; \
199  const double b0 = iir->biquads[i].b[0]; \
200  const double b1 = iir->biquads[i].b[1]; \
201  const double b2 = iir->biquads[i].b[2]; \
202  double i1 = iir->biquads[i].i1; \
203  double i2 = iir->biquads[i].i2; \
204  double o1 = iir->biquads[i].o1; \
205  double o2 = iir->biquads[i].o2; \
206  \
207  for (n = 0; n < in->nb_samples; n++) { \
208  double sample = ig * (i ? dst[n] : src[n]); \
209  double o0 = sample * b0 + i1 * b1 + i2 * b2 + o1 * a1 + o2 * a2; \
210  \
211  i2 = i1; \
212  i1 = src[n]; \
213  o2 = o1; \
214  o1 = o0; \
215  o0 *= og * g; \
216  \
217  o0 = o0 * mix + (1. - mix) * sample; \
218  if (need_clipping && o0 < min) { \
219  (*clippings)++; \
220  dst[n] = min; \
221  } else if (need_clipping && o0 > max) { \
222  (*clippings)++; \
223  dst[n] = max; \
224  } else { \
225  dst[n] = o0; \
226  } \
227  } \
228  iir->biquads[i].i1 = i1; \
229  iir->biquads[i].i2 = i2; \
230  iir->biquads[i].o1 = o1; \
231  iir->biquads[i].o2 = o2; \
232  } \
233  \
234  return 0; \
235 }
236 
237 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
238 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
239 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
240 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
241 
242 static void count_coefficients(char *item_str, int *nb_items)
243 {
244  char *p;
245 
246  if (!item_str)
247  return;
248 
249  *nb_items = 1;
250  for (p = item_str; *p && *p != '|'; p++) {
251  if (*p == ' ')
252  (*nb_items)++;
253  }
254 }
255 
256 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
257 {
258  AudioIIRContext *s = ctx->priv;
259  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
260  int i;
261 
262  p = old_str = av_strdup(item_str);
263  if (!p)
264  return AVERROR(ENOMEM);
265  for (i = 0; i < nb_items; i++) {
266  if (!(arg = av_strtok(p, "|", &saveptr)))
267  arg = prev_arg;
268 
269  if (!arg) {
270  av_freep(&old_str);
271  return AVERROR(EINVAL);
272  }
273 
274  p = NULL;
275  if (sscanf(arg, "%lf", &s->iir[i].g) != 1) {
276  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
277  av_freep(&old_str);
278  return AVERROR(EINVAL);
279  }
280 
281  prev_arg = arg;
282  }
283 
284  av_freep(&old_str);
285 
286  return 0;
287 }
288 
289 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
290 {
291  char *p, *arg, *old_str, *saveptr = NULL;
292  int i;
293 
294  p = old_str = av_strdup(item_str);
295  if (!p)
296  return AVERROR(ENOMEM);
297  for (i = 0; i < nb_items; i++) {
298  if (!(arg = av_strtok(p, " ", &saveptr)))
299  break;
300 
301  p = NULL;
302  if (sscanf(arg, "%lf", &dst[i]) != 1) {
303  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
304  av_freep(&old_str);
305  return AVERROR(EINVAL);
306  }
307  }
308 
309  av_freep(&old_str);
310 
311  return 0;
312 }
313 
314 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
315 {
316  char *p, *arg, *old_str, *saveptr = NULL;
317  int i;
318 
319  p = old_str = av_strdup(item_str);
320  if (!p)
321  return AVERROR(ENOMEM);
322  for (i = 0; i < nb_items; i++) {
323  if (!(arg = av_strtok(p, " ", &saveptr)))
324  break;
325 
326  p = NULL;
327  if (sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
328  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
329  av_freep(&old_str);
330  return AVERROR(EINVAL);
331  }
332  }
333 
334  av_freep(&old_str);
335 
336  return 0;
337 }
338 
339 static const char *format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
340 
341 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
342 {
343  AudioIIRContext *s = ctx->priv;
344  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
345  int i, ret;
346 
347  p = old_str = av_strdup(item_str);
348  if (!p)
349  return AVERROR(ENOMEM);
350  for (i = 0; i < channels; i++) {
351  IIRChannel *iir = &s->iir[i];
352 
353  if (!(arg = av_strtok(p, "|", &saveptr)))
354  arg = prev_arg;
355 
356  if (!arg) {
357  av_freep(&old_str);
358  return AVERROR(EINVAL);
359  }
360 
361  count_coefficients(arg, &iir->nb_ab[ab]);
362 
363  p = NULL;
364  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
365  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
366  if (!iir->ab[ab] || !iir->cache[ab]) {
367  av_freep(&old_str);
368  return AVERROR(ENOMEM);
369  }
370 
371  if (s->format) {
372  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
373  } else {
374  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
375  }
376  if (ret < 0) {
377  av_freep(&old_str);
378  return ret;
379  }
380  prev_arg = arg;
381  }
382 
383  av_freep(&old_str);
384 
385  return 0;
386 }
387 
388 static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
389 {
390  *RE = re * re2 - im * im2;
391  *IM = re * im2 + re2 * im;
392 }
393 
394 static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
395 {
396  coefs[2 * n] = 1.0;
397 
398  for (int i = 1; i <= n; i++) {
399  for (int j = n - i; j < n; j++) {
400  double re, im;
401 
402  cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
403  pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
404 
405  coefs[2 * j] -= re;
406  coefs[2 * j + 1] -= im;
407  }
408  }
409 
410  for (int i = 0; i < n + 1; i++) {
411  if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
412  av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
413  coefs[2 * i + 1], i);
414  return AVERROR(EINVAL);
415  }
416  }
417 
418  return 0;
419 }
420 
421 static void normalize_coeffs(AVFilterContext *ctx, int ch)
422 {
423  AudioIIRContext *s = ctx->priv;
424  IIRChannel *iir = &s->iir[ch];
425  double sum_den = 0.;
426 
427  if (!s->normalize)
428  return;
429 
430  for (int i = 0; i < iir->nb_ab[1]; i++) {
431  sum_den += iir->ab[1][i];
432  }
433 
434  if (sum_den > 1e-6) {
435  double factor, sum_num = 0.;
436 
437  for (int i = 0; i < iir->nb_ab[0]; i++) {
438  sum_num += iir->ab[0][i];
439  }
440 
441  factor = sum_num / sum_den;
442 
443  for (int i = 0; i < iir->nb_ab[1]; i++) {
444  iir->ab[1][i] *= factor;
445  }
446  }
447 }
448 
450 {
451  AudioIIRContext *s = ctx->priv;
452  int ch, i, j, ret = 0;
453 
454  for (ch = 0; ch < channels; ch++) {
455  IIRChannel *iir = &s->iir[ch];
456  double *topc, *botc;
457 
458  topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
459  botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
460  if (!topc || !botc) {
461  ret = AVERROR(ENOMEM);
462  goto fail;
463  }
464 
465  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
466  if (ret < 0) {
467  goto fail;
468  }
469 
470  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
471  if (ret < 0) {
472  goto fail;
473  }
474 
475  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
476  iir->ab[1][j] = topc[2 * i];
477  }
478  iir->nb_ab[1]++;
479 
480  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
481  iir->ab[0][j] = botc[2 * i];
482  }
483  iir->nb_ab[0]++;
484 
485  normalize_coeffs(ctx, ch);
486 
487 fail:
488  av_free(topc);
489  av_free(botc);
490  if (ret < 0)
491  break;
492  }
493 
494  return ret;
495 }
496 
498 {
499  AudioIIRContext *s = ctx->priv;
500  int ch, ret;
501 
502  for (ch = 0; ch < channels; ch++) {
503  IIRChannel *iir = &s->iir[ch];
504  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
505  int current_biquad = 0;
506 
507  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
508  if (!iir->biquads)
509  return AVERROR(ENOMEM);
510 
511  while (nb_biquads--) {
512  Pair outmost_pole = { -1, -1 };
513  Pair nearest_zero = { -1, -1 };
514  double zeros[4] = { 0 };
515  double poles[4] = { 0 };
516  double b[6] = { 0 };
517  double a[6] = { 0 };
518  double min_distance = DBL_MAX;
519  double max_mag = 0;
520  double factor;
521  int i;
522 
523  for (i = 0; i < iir->nb_ab[0]; i++) {
524  double mag;
525 
526  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
527  continue;
528  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
529 
530  if (mag > max_mag) {
531  max_mag = mag;
532  outmost_pole.a = i;
533  }
534  }
535 
536  for (i = 0; i < iir->nb_ab[0]; i++) {
537  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
538  continue;
539 
540  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
541  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
542  outmost_pole.b = i;
543  break;
544  }
545  }
546 
547  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
548 
549  if (outmost_pole.a < 0 || outmost_pole.b < 0)
550  return AVERROR(EINVAL);
551 
552  for (i = 0; i < iir->nb_ab[1]; i++) {
553  double distance;
554 
555  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
556  continue;
557  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
558  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
559 
560  if (distance < min_distance) {
561  min_distance = distance;
562  nearest_zero.a = i;
563  }
564  }
565 
566  for (i = 0; i < iir->nb_ab[1]; i++) {
567  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
568  continue;
569 
570  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
571  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
572  nearest_zero.b = i;
573  break;
574  }
575  }
576 
577  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
578 
579  if (nearest_zero.a < 0 || nearest_zero.b < 0)
580  return AVERROR(EINVAL);
581 
582  poles[0] = iir->ab[0][2 * outmost_pole.a ];
583  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
584 
585  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
586  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
587 
588  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
589  zeros[2] = 0;
590  zeros[3] = 0;
591 
592  poles[2] = 0;
593  poles[3] = 0;
594  } else {
595  poles[2] = iir->ab[0][2 * outmost_pole.b ];
596  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
597 
598  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
599  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
600  }
601 
602  ret = expand(ctx, zeros, 2, b);
603  if (ret < 0)
604  return ret;
605 
606  ret = expand(ctx, poles, 2, a);
607  if (ret < 0)
608  return ret;
609 
610  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
611  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
612  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
613  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
614 
615  iir->biquads[current_biquad].a[0] = 1.;
616  iir->biquads[current_biquad].a[1] = a[2] / a[4];
617  iir->biquads[current_biquad].a[2] = a[0] / a[4];
618  iir->biquads[current_biquad].b[0] = b[4] / a[4];
619  iir->biquads[current_biquad].b[1] = b[2] / a[4];
620  iir->biquads[current_biquad].b[2] = b[0] / a[4];
621 
622  if (s->normalize &&
623  fabs(iir->biquads[current_biquad].b[0] +
624  iir->biquads[current_biquad].b[1] +
625  iir->biquads[current_biquad].b[2]) > 1e-6) {
626  factor = (iir->biquads[current_biquad].a[0] +
627  iir->biquads[current_biquad].a[1] +
628  iir->biquads[current_biquad].a[2]) /
629  (iir->biquads[current_biquad].b[0] +
630  iir->biquads[current_biquad].b[1] +
631  iir->biquads[current_biquad].b[2]);
632 
633  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
634 
635  iir->biquads[current_biquad].b[0] *= factor;
636  iir->biquads[current_biquad].b[1] *= factor;
637  iir->biquads[current_biquad].b[2] *= factor;
638  }
639 
640  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
641  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
642  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
643 
644  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
645  iir->biquads[current_biquad].a[0],
646  iir->biquads[current_biquad].a[1],
647  iir->biquads[current_biquad].a[2],
648  iir->biquads[current_biquad].b[0],
649  iir->biquads[current_biquad].b[1],
650  iir->biquads[current_biquad].b[2]);
651 
652  current_biquad++;
653  }
654  }
655 
656  return 0;
657 }
658 
660 {
661  AudioIIRContext *s = ctx->priv;
662  int ch;
663 
664  for (ch = 0; ch < channels; ch++) {
665  IIRChannel *iir = &s->iir[ch];
666  int n;
667 
668  for (n = 0; n < iir->nb_ab[0]; n++) {
669  double r = iir->ab[0][2*n];
670  double angle = iir->ab[0][2*n+1];
671 
672  iir->ab[0][2*n] = r * cos(angle);
673  iir->ab[0][2*n+1] = r * sin(angle);
674  }
675 
676  for (n = 0; n < iir->nb_ab[1]; n++) {
677  double r = iir->ab[1][2*n];
678  double angle = iir->ab[1][2*n+1];
679 
680  iir->ab[1][2*n] = r * cos(angle);
681  iir->ab[1][2*n+1] = r * sin(angle);
682  }
683  }
684 }
685 
687 {
688  AudioIIRContext *s = ctx->priv;
689  int ch;
690 
691  for (ch = 0; ch < channels; ch++) {
692  IIRChannel *iir = &s->iir[ch];
693  int n;
694 
695  for (n = 0; n < iir->nb_ab[0]; n++) {
696  double sr = iir->ab[0][2*n];
697  double si = iir->ab[0][2*n+1];
698  double snr = 1. + sr;
699  double sdr = 1. - sr;
700  double div = sdr * sdr + si * si;
701 
702  iir->ab[0][2*n] = (snr * sdr - si * si) / div;
703  iir->ab[0][2*n+1] = (sdr * si + snr * si) / div;
704  }
705 
706  for (n = 0; n < iir->nb_ab[1]; n++) {
707  double sr = iir->ab[1][2*n];
708  double si = iir->ab[1][2*n+1];
709  double snr = 1. + sr;
710  double sdr = 1. - sr;
711  double div = sdr * sdr + si * si;
712 
713  iir->ab[1][2*n] = (snr * sdr - si * si) / div;
714  iir->ab[1][2*n+1] = (sdr * si + snr * si) / div;
715  }
716  }
717 }
718 
720 {
721  AudioIIRContext *s = ctx->priv;
722  int ch;
723 
724  for (ch = 0; ch < channels; ch++) {
725  IIRChannel *iir = &s->iir[ch];
726  int n;
727 
728  for (n = 0; n < iir->nb_ab[0]; n++) {
729  double r = iir->ab[0][2*n];
730  double angle = M_PI*iir->ab[0][2*n+1]/180.;
731 
732  iir->ab[0][2*n] = r * cos(angle);
733  iir->ab[0][2*n+1] = r * sin(angle);
734  }
735 
736  for (n = 0; n < iir->nb_ab[1]; n++) {
737  double r = iir->ab[1][2*n];
738  double angle = M_PI*iir->ab[1][2*n+1]/180.;
739 
740  iir->ab[1][2*n] = r * cos(angle);
741  iir->ab[1][2*n+1] = r * sin(angle);
742  }
743  }
744 }
745 
747 {
748  AudioIIRContext *s = ctx->priv;
749  int ch;
750 
751  for (ch = 0; ch < channels; ch++) {
752  IIRChannel *iir = &s->iir[ch];
753 
754  for (int n = 0; n < iir->nb_ab[0]; n++) {
755  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
756 
757  if (pr >= 1.) {
758  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
759  break;
760  }
761  }
762  }
763 }
764 
765 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
766 {
767  const uint8_t *font;
768  int font_height;
769  int i;
770 
771  font = avpriv_cga_font, font_height = 8;
772 
773  for (i = 0; txt[i]; i++) {
774  int char_y, mask;
775 
776  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
777  for (char_y = 0; char_y < font_height; char_y++) {
778  for (mask = 0x80; mask; mask >>= 1) {
779  if (font[txt[i] * font_height + char_y] & mask)
780  AV_WL32(p, color);
781  p += 4;
782  }
783  p += pic->linesize[0] - 8 * 4;
784  }
785  }
786 }
787 
788 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
789 {
790  int dx = FFABS(x1-x0);
791  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
792  int err = (dx>dy ? dx : -dy) / 2, e2;
793 
794  for (;;) {
795  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
796 
797  if (x0 == x1 && y0 == y1)
798  break;
799 
800  e2 = err;
801 
802  if (e2 >-dx) {
803  err -= dy;
804  x0--;
805  }
806 
807  if (e2 < dy) {
808  err += dx;
809  y0 += sy;
810  }
811  }
812 }
813 
814 static double distance(double x0, double x1, double y0, double y1)
815 {
816  return hypot(x0 - x1, y0 - y1);
817 }
818 
819 static void get_response(int channel, int format, double w,
820  const double *b, const double *a,
821  int nb_b, int nb_a, double *magnitude, double *phase)
822 {
823  double realz, realp;
824  double imagz, imagp;
825  double real, imag;
826  double div;
827 
828  if (format == 0) {
829  realz = 0., realp = 0.;
830  imagz = 0., imagp = 0.;
831  for (int x = 0; x < nb_a; x++) {
832  realz += cos(-x * w) * a[x];
833  imagz += sin(-x * w) * a[x];
834  }
835 
836  for (int x = 0; x < nb_b; x++) {
837  realp += cos(-x * w) * b[x];
838  imagp += sin(-x * w) * b[x];
839  }
840 
841  div = realp * realp + imagp * imagp;
842  real = (realz * realp + imagz * imagp) / div;
843  imag = (imagz * realp - imagp * realz) / div;
844 
845  *magnitude = hypot(real, imag);
846  *phase = atan2(imag, real);
847  } else {
848  double p = 1., z = 1.;
849  double acc = 0.;
850 
851  for (int x = 0; x < nb_a; x++) {
852  z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
853  acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
854  }
855 
856  for (int x = 0; x < nb_b; x++) {
857  p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
858  acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
859  }
860 
861  *magnitude = z / p;
862  *phase = acc;
863  }
864 }
865 
867 {
868  AudioIIRContext *s = ctx->priv;
869  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
870  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
871  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
872  char text[32];
873  int ch, i;
874 
875  memset(out->data[0], 0, s->h * out->linesize[0]);
876 
877  phase = av_malloc_array(s->w, sizeof(*phase));
878  temp = av_malloc_array(s->w, sizeof(*temp));
879  mag = av_malloc_array(s->w, sizeof(*mag));
880  delay = av_malloc_array(s->w, sizeof(*delay));
881  if (!mag || !phase || !delay || !temp)
882  goto end;
883 
884  ch = av_clip(s->ir_channel, 0, s->channels - 1);
885  for (i = 0; i < s->w; i++) {
886  const double *b = s->iir[ch].ab[0];
887  const double *a = s->iir[ch].ab[1];
888  const int nb_b = s->iir[ch].nb_ab[0];
889  const int nb_a = s->iir[ch].nb_ab[1];
890  double w = i * M_PI / (s->w - 1);
891  double m, p;
892 
893  get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
894 
895  mag[i] = s->iir[ch].g * m;
896  phase[i] = p;
897  min = fmin(min, mag[i]);
898  max = fmax(max, mag[i]);
899  }
900 
901  temp[0] = 0.;
902  for (i = 0; i < s->w - 1; i++) {
903  double d = phase[i] - phase[i + 1];
904  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
905  }
906 
907  min_phase = phase[0];
908  max_phase = phase[0];
909  for (i = 1; i < s->w; i++) {
910  temp[i] += temp[i - 1];
911  phase[i] += temp[i];
912  min_phase = fmin(min_phase, phase[i]);
913  max_phase = fmax(max_phase, phase[i]);
914  }
915 
916  for (i = 0; i < s->w - 1; i++) {
917  double div = s->w / (double)sample_rate;
918 
919  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
920  min_delay = fmin(min_delay, delay[i + 1]);
921  max_delay = fmax(max_delay, delay[i + 1]);
922  }
923  delay[0] = delay[1];
924 
925  for (i = 0; i < s->w; i++) {
926  int ymag = mag[i] / max * (s->h - 1);
927  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
928  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
929 
930  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
931  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
932  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
933 
934  if (prev_ymag < 0)
935  prev_ymag = ymag;
936  if (prev_yphase < 0)
937  prev_yphase = yphase;
938  if (prev_ydelay < 0)
939  prev_ydelay = ydelay;
940 
941  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
942  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
943  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
944 
945  prev_ymag = ymag;
946  prev_yphase = yphase;
947  prev_ydelay = ydelay;
948  }
949 
950  if (s->w > 400 && s->h > 100) {
951  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
952  snprintf(text, sizeof(text), "%.2f", max);
953  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
954 
955  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
956  snprintf(text, sizeof(text), "%.2f", min);
957  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
958 
959  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
960  snprintf(text, sizeof(text), "%.2f", max_phase);
961  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
962 
963  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
964  snprintf(text, sizeof(text), "%.2f", min_phase);
965  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
966 
967  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
968  snprintf(text, sizeof(text), "%.2f", max_delay);
969  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
970 
971  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
972  snprintf(text, sizeof(text), "%.2f", min_delay);
973  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
974  }
975 
976 end:
977  av_free(delay);
978  av_free(temp);
979  av_free(phase);
980  av_free(mag);
981 }
982 
983 static int config_output(AVFilterLink *outlink)
984 {
985  AVFilterContext *ctx = outlink->src;
986  AudioIIRContext *s = ctx->priv;
987  AVFilterLink *inlink = ctx->inputs[0];
988  int ch, ret, i;
989 
990  s->channels = inlink->channels;
991  s->iir = av_calloc(s->channels, sizeof(*s->iir));
992  if (!s->iir)
993  return AVERROR(ENOMEM);
994 
995  ret = read_gains(ctx, s->g_str, inlink->channels);
996  if (ret < 0)
997  return ret;
998 
999  ret = read_channels(ctx, inlink->channels, s->a_str, 0);
1000  if (ret < 0)
1001  return ret;
1002 
1003  ret = read_channels(ctx, inlink->channels, s->b_str, 1);
1004  if (ret < 0)
1005  return ret;
1006 
1007  if (s->format == 2) {
1008  convert_pr2zp(ctx, inlink->channels);
1009  } else if (s->format == 3) {
1010  convert_pd2zp(ctx, inlink->channels);
1011  } else if (s->format == 4) {
1012  convert_sp2zp(ctx, inlink->channels);
1013  }
1014  if (s->format > 0) {
1015  check_stability(ctx, inlink->channels);
1016  }
1017 
1018  av_frame_free(&s->video);
1019  if (s->response) {
1020  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1021  if (!s->video)
1022  return AVERROR(ENOMEM);
1023 
1024  draw_response(ctx, s->video, inlink->sample_rate);
1025  }
1026 
1027  if (s->format == 0)
1028  av_log(ctx, AV_LOG_WARNING, "tf coefficients format is not recommended for too high number of zeros/poles.\n");
1029 
1030  if (s->format > 0 && s->process == 0) {
1031  av_log(ctx, AV_LOG_WARNING, "Direct processsing is not recommended for zp coefficients format.\n");
1032 
1033  ret = convert_zp2tf(ctx, inlink->channels);
1034  if (ret < 0)
1035  return ret;
1036  } else if (s->format == 0 && s->process == 1) {
1037  av_log(ctx, AV_LOG_ERROR, "Serial cascading is not implemented for transfer function.\n");
1038  return AVERROR_PATCHWELCOME;
1039  } else if (s->format > 0 && s->process == 1) {
1040  if (inlink->format == AV_SAMPLE_FMT_S16P)
1041  av_log(ctx, AV_LOG_WARNING, "Serial cascading is not recommended for i16 precision.\n");
1042 
1043  ret = decompose_zp2biquads(ctx, inlink->channels);
1044  if (ret < 0)
1045  return ret;
1046  }
1047 
1048  for (ch = 0; s->format == 0 && ch < inlink->channels; ch++) {
1049  IIRChannel *iir = &s->iir[ch];
1050 
1051  for (i = 1; i < iir->nb_ab[0]; i++) {
1052  iir->ab[0][i] /= iir->ab[0][0];
1053  }
1054 
1055  iir->ab[0][0] = 1.0;
1056  for (i = 0; i < iir->nb_ab[1]; i++) {
1057  iir->ab[1][i] *= iir->g;
1058  }
1059 
1060  normalize_coeffs(ctx, ch);
1061  }
1062 
1063  switch (inlink->format) {
1064  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1065  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1066  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1067  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1068  }
1069 
1070  return 0;
1071 }
1072 
1074 {
1075  AVFilterContext *ctx = inlink->dst;
1076  AudioIIRContext *s = ctx->priv;
1077  AVFilterLink *outlink = ctx->outputs[0];
1078  ThreadData td;
1079  AVFrame *out;
1080  int ch, ret;
1081 
1082  if (av_frame_is_writable(in)) {
1083  out = in;
1084  } else {
1085  out = ff_get_audio_buffer(outlink, in->nb_samples);
1086  if (!out) {
1087  av_frame_free(&in);
1088  return AVERROR(ENOMEM);
1089  }
1091  }
1092 
1093  td.in = in;
1094  td.out = out;
1095  ctx->internal->execute(ctx, s->iir_channel, &td, NULL, outlink->channels);
1096 
1097  for (ch = 0; ch < outlink->channels; ch++) {
1098  if (s->iir[ch].clippings > 0)
1099  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1100  ch, s->iir[ch].clippings);
1101  s->iir[ch].clippings = 0;
1102  }
1103 
1104  if (in != out)
1105  av_frame_free(&in);
1106 
1107  if (s->response) {
1108  AVFilterLink *outlink = ctx->outputs[1];
1109  int64_t old_pts = s->video->pts;
1110  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1111 
1112  if (new_pts > old_pts) {
1113  AVFrame *clone;
1114 
1115  s->video->pts = new_pts;
1116  clone = av_frame_clone(s->video);
1117  if (!clone)
1118  return AVERROR(ENOMEM);
1119  ret = ff_filter_frame(outlink, clone);
1120  if (ret < 0)
1121  return ret;
1122  }
1123  }
1124 
1125  return ff_filter_frame(outlink, out);
1126 }
1127 
1128 static int config_video(AVFilterLink *outlink)
1129 {
1130  AVFilterContext *ctx = outlink->src;
1131  AudioIIRContext *s = ctx->priv;
1132 
1133  outlink->sample_aspect_ratio = (AVRational){1,1};
1134  outlink->w = s->w;
1135  outlink->h = s->h;
1136  outlink->frame_rate = s->rate;
1137  outlink->time_base = av_inv_q(outlink->frame_rate);
1138 
1139  return 0;
1140 }
1141 
1143 {
1144  AudioIIRContext *s = ctx->priv;
1145  AVFilterPad pad, vpad;
1146  int ret;
1147 
1148  if (!s->a_str || !s->b_str || !s->g_str) {
1149  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1150  return AVERROR(EINVAL);
1151  }
1152 
1153  switch (s->precision) {
1154  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1155  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1156  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1157  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1158  default: return AVERROR_BUG;
1159  }
1160 
1161  pad = (AVFilterPad){
1162  .name = "default",
1163  .type = AVMEDIA_TYPE_AUDIO,
1164  .config_props = config_output,
1165  };
1166 
1167  ret = ff_insert_outpad(ctx, 0, &pad);
1168  if (ret < 0)
1169  return ret;
1170 
1171  if (s->response) {
1172  vpad = (AVFilterPad){
1173  .name = "filter_response",
1174  .type = AVMEDIA_TYPE_VIDEO,
1175  .config_props = config_video,
1176  };
1177 
1178  ret = ff_insert_outpad(ctx, 1, &vpad);
1179  if (ret < 0)
1180  return ret;
1181  }
1182 
1183  return 0;
1184 }
1185 
1187 {
1188  AudioIIRContext *s = ctx->priv;
1189  int ch;
1190 
1191  if (s->iir) {
1192  for (ch = 0; ch < s->channels; ch++) {
1193  IIRChannel *iir = &s->iir[ch];
1194  av_freep(&iir->ab[0]);
1195  av_freep(&iir->ab[1]);
1196  av_freep(&iir->cache[0]);
1197  av_freep(&iir->cache[1]);
1198  av_freep(&iir->biquads);
1199  }
1200  }
1201  av_freep(&s->iir);
1202 
1203  av_frame_free(&s->video);
1204 }
1205 
1206 static const AVFilterPad inputs[] = {
1207  {
1208  .name = "default",
1209  .type = AVMEDIA_TYPE_AUDIO,
1210  .filter_frame = filter_frame,
1211  },
1212  { NULL }
1213 };
1214 
1215 #define OFFSET(x) offsetof(AudioIIRContext, x)
1216 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1217 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1218 
1219 static const AVOption aiir_options[] = {
1220  { "zeros", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1221  { "z", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1222  { "poles", "set A/denominator/poles coefficients", OFFSET(a_str),AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1223  { "p", "set A/denominator/poles coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1224  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1225  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1226  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1227  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1228  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, 0, 4, AF, "format" },
1229  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, 0, 4, AF, "format" },
1230  { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "format" },
1231  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "format" },
1232  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "format" },
1233  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "format" },
1234  { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, "format" },
1235  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "process" },
1236  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "process" },
1237  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "process" },
1238  { "s", "serial cascading", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "process" },
1239  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1240  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1241  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "precision" },
1242  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "precision" },
1243  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "precision" },
1244  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "precision" },
1245  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1246  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1247  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1248  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1249  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1250  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1251  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1252  { NULL },
1253 };
1254 
1255 AVFILTER_DEFINE_CLASS(aiir);
1256 
1258  .name = "aiir",
1259  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1260  .priv_size = sizeof(AudioIIRContext),
1261  .priv_class = &aiir_class,
1262  .init = init,
1263  .uninit = uninit,
1265  .inputs = inputs,
1268 };
Pair
Definition: af_aiir.c:36
AudioIIRContext::format
int format
Definition: af_aiir.c:62
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:156
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
mix
static int mix(int c0, int c1)
Definition: 4xm.c:714
r
const char * r
Definition: vf_curves.c:114
acc
int acc
Definition: yuv2rgb.c:555
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:300
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aiir)
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:582
IIRChannel::clippings
int clippings
Definition: af_aiir.c:53
ff_set_common_channel_layouts
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates.
Definition: formats.c:586
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:716
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:236
AF
#define AF
Definition: af_aiir.c:1216
inputs
static const AVFilterPad inputs[]
Definition: af_aiir.c:1206
IIRChannel::nb_ab
int nb_ab[2]
Definition: af_aiir.c:48
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
BiquadContext::a
double a[3]
Definition: af_aiir.c:41
aiir_options
static const AVOption aiir_options[]
Definition: af_aiir.c:1219
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:454
RE
#define RE(x, ch)
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
read_channels
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:341
im
float im
Definition: fft.c:82
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AudioIIRContext::ir_channel
int ir_channel
Definition: af_aiir.c:67
IIRChannel::biquads
BiquadContext * biquads
Definition: af_aiir.c:52
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
IIR_CH
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:123
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
read_tf_coefficients
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:289
AudioIIRContext::iir_channel
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:76
check_stability
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:746
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:983
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AudioIIRContext::b_str
char * b_str
Definition: af_aiir.c:58
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:494
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1128
AudioIIRContext::video
AVFrame * video
Definition: af_aiir.c:70
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1788
sample_rate
sample_rate
Definition: ffmpeg_filter.c:192
AudioIIRContext::g_str
char * g_str
Definition: af_aiir.c:58
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
BiquadContext
Definition: af_acrossover.c:41
BiquadContext::i1
double i1
Definition: af_acrossover.c:44
fail
#define fail()
Definition: checkasm.h:123
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_aiir.c:79
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AudioIIRContext::h
int h
Definition: af_aiir.c:66
AudioIIRContext::process
int process
Definition: af_aiir.c:63
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:605
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1073
convert_zp2tf
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:449
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:484
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:184
BiquadContext::i2
double i2
Definition: af_acrossover.c:44
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
IIRChannel::cache
double * cache[2]
Definition: af_aiir.c:51
NAN
#define NAN
Definition: mathematics.h:64
SERIAL_IIR_CH
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:179
int32_t
int32_t
Definition: audio_convert.c:194
arg
const char * arg
Definition: jacosubdec.c:66
BiquadContext::o1
double o1
Definition: af_acrossover.c:45
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
format
static const char * format[]
Definition: af_aiir.c:339
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:233
AudioIIRContext::rate
AVRational rate
Definition: af_aiir.c:68
normalize_coeffs
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:421
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1142
convert_sp2zp
static void convert_sp2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:686
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:788
fmin
double fmin(double, double)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
read_zp_coefficients
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:314
IM
#define IM(x, ch)
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
xga_font_data.h
IIRChannel::g
double g
Definition: af_aiir.c:50
Pair::b
int b
Definition: af_aiir.c:37
ff_af_aiir
AVFilter ff_af_aiir
Definition: af_aiir.c:1257
M_PI
#define M_PI
Definition: mathematics.h:52
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
internal.h
AudioIIRContext::a_str
char * a_str
Definition: af_aiir.c:58
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
normalize
Definition: normalize.py:1
VF
#define VF
Definition: af_aiir.c:1217
AudioIIRContext::sample_format
enum AVSampleFormat sample_format
Definition: af_aiir.c:74
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
OFFSET
#define OFFSET(x)
Definition: af_aiir.c:1215
IIRChannel::ab
double * ab[2]
Definition: af_aiir.c:49
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
uint8_t
uint8_t
Definition: audio_convert.c:194
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AudioIIRContext::precision
int precision
Definition: af_aiir.c:64
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AudioIIRContext::wet_gain
double wet_gain
Definition: af_aiir.c:59
get_response
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *magnitude, double *phase)
Definition: af_aiir.c:819
AVFilter
Filter definition.
Definition: avfilter.h:144
cmul
static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
Definition: af_aiir.c:388
ret
ret
Definition: filter_design.txt:187
AudioIIRContext::dry_gain
double dry_gain
Definition: af_aiir.c:59
Pair::a
int a
Definition: af_aiir.c:37
fmax
double fmax(double, double)
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:439
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
convert_pr2zp
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:659
BiquadContext::b
double b[3]
Definition: af_aiir.c:42
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:866
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_insert_outpad
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:274
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
AudioIIRContext::response
int response
Definition: af_aiir.c:65
distance
static double distance(double x0, double x1, double y0, double y1)
Definition: af_aiir.c:814
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
temp
else temp
Definition: vf_mcdeint.c:256
AudioIIRContext::channels
int channels
Definition: af_aiir.c:73
decompose_zp2biquads
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:497
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
factor
static const int factor[16]
Definition: vf_pp7.c:75
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
count_coefficients
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:242
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1083
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:394
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AudioIIRContext
Definition: af_aiir.c:56
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AudioIIRContext::iir
IIRChannel * iir
Definition: af_aiir.c:72
BiquadContext::o2
double o2
Definition: af_acrossover.c:45
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:593
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
AudioIIRContext::normalize
int normalize
Definition: af_aiir.c:61
int
int
Definition: ffmpeg_filter.c:192
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1186
snprintf
#define snprintf
Definition: snprintf.h:34
read_gains
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:256
IIRChannel
Definition: af_aiir.c:47
AudioIIRContext::mix
double mix
Definition: af_aiir.c:60
convert_pd2zp
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:719
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:765
channel
channel
Definition: ebur128.h:39
re
float re
Definition: fft.c:82
min
float min
Definition: vorbis_enc_data.h:456
AudioIIRContext::w
int w
Definition: af_aiir.c:66