FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avstring.h"
24 #include "libavutil/intreadwrite.h"
25 #include "libavutil/opt.h"
27 #include "audio.h"
28 #include "avfilter.h"
29 #include "internal.h"
30 
31 typedef struct ThreadData {
32  AVFrame *in, *out;
33 } ThreadData;
34 
35 typedef struct Pair {
36  int a, b;
37 } Pair;
38 
39 typedef struct BiquadContext {
40  double a[3];
41  double b[3];
42  double w1, w2;
44 
45 typedef struct IIRChannel {
46  int nb_ab[2];
47  double *ab[2];
48  double g;
49  double *cache[2];
50  double fir;
52  int clippings;
53 } IIRChannel;
54 
55 typedef struct AudioIIRContext {
56  const AVClass *class;
57  char *a_str, *b_str, *g_str;
58  double dry_gain, wet_gain;
59  double mix;
60  int normalize;
61  int format;
62  int process;
63  int precision;
64  int response;
65  int w, h;
68 
70 
72  int channels;
74 
75  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
77 
79 {
80  AudioIIRContext *s = ctx->priv;
82  enum AVSampleFormat sample_fmts[] = {
85  };
86  static const enum AVPixelFormat pix_fmts[] = {
89  };
90  int ret;
91 
92  if (s->response) {
93  AVFilterLink *videolink = ctx->outputs[1];
94 
96  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
97  return ret;
98  }
99 
101  if (ret < 0)
102  return ret;
103 
104  sample_fmts[0] = s->sample_format;
106  if (ret < 0)
107  return ret;
108 
110 }
111 
112 #define IIR_CH(name, type, min, max, need_clipping) \
113 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
114 { \
115  AudioIIRContext *s = ctx->priv; \
116  const double ig = s->dry_gain; \
117  const double og = s->wet_gain; \
118  const double mix = s->mix; \
119  ThreadData *td = arg; \
120  AVFrame *in = td->in, *out = td->out; \
121  const type *src = (const type *)in->extended_data[ch]; \
122  double *oc = (double *)s->iir[ch].cache[0]; \
123  double *ic = (double *)s->iir[ch].cache[1]; \
124  const int nb_a = s->iir[ch].nb_ab[0]; \
125  const int nb_b = s->iir[ch].nb_ab[1]; \
126  const double *a = s->iir[ch].ab[0]; \
127  const double *b = s->iir[ch].ab[1]; \
128  const double g = s->iir[ch].g; \
129  int *clippings = &s->iir[ch].clippings; \
130  type *dst = (type *)out->extended_data[ch]; \
131  int n; \
132  \
133  for (n = 0; n < in->nb_samples; n++) { \
134  double sample = 0.; \
135  int x; \
136  \
137  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
138  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
139  ic[0] = src[n] * ig; \
140  for (x = 0; x < nb_b; x++) \
141  sample += b[x] * ic[x]; \
142  \
143  for (x = 1; x < nb_a; x++) \
144  sample -= a[x] * oc[x]; \
145  \
146  oc[0] = sample; \
147  sample *= og * g; \
148  sample = sample * mix + ic[0] * (1. - mix); \
149  if (need_clipping && sample < min) { \
150  (*clippings)++; \
151  dst[n] = min; \
152  } else if (need_clipping && sample > max) { \
153  (*clippings)++; \
154  dst[n] = max; \
155  } else { \
156  dst[n] = sample; \
157  } \
158  } \
159  \
160  return 0; \
161 }
162 
163 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
164 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
165 IIR_CH(fltp, float, -1., 1., 0)
166 IIR_CH(dblp, double, -1., 1., 0)
167 
168 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
169 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, \
170  int ch, int nb_jobs) \
171 { \
172  AudioIIRContext *s = ctx->priv; \
173  const double ig = s->dry_gain; \
174  const double og = s->wet_gain; \
175  const double mix = s->mix; \
176  const double imix = 1. - mix; \
177  ThreadData *td = arg; \
178  AVFrame *in = td->in, *out = td->out; \
179  const type *src = (const type *)in->extended_data[ch]; \
180  type *dst = (type *)out->extended_data[ch]; \
181  IIRChannel *iir = &s->iir[ch]; \
182  const double g = iir->g; \
183  int *clippings = &iir->clippings; \
184  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
185  int n, i; \
186  \
187  for (i = nb_biquads - 1; i >= 0; i--) { \
188  const double a1 = -iir->biquads[i].a[1]; \
189  const double a2 = -iir->biquads[i].a[2]; \
190  const double b0 = iir->biquads[i].b[0]; \
191  const double b1 = iir->biquads[i].b[1]; \
192  const double b2 = iir->biquads[i].b[2]; \
193  double w1 = iir->biquads[i].w1; \
194  double w2 = iir->biquads[i].w2; \
195  \
196  for (n = 0; n < in->nb_samples; n++) { \
197  double i0 = ig * (i ? dst[n] : src[n]); \
198  double o0 = i0 * b0 + w1; \
199  \
200  w1 = b1 * i0 + w2 + a1 * o0; \
201  w2 = b2 * i0 + a2 * o0; \
202  o0 *= og * g; \
203  \
204  o0 = o0 * mix + imix * i0; \
205  if (need_clipping && o0 < min) { \
206  (*clippings)++; \
207  dst[n] = min; \
208  } else if (need_clipping && o0 > max) { \
209  (*clippings)++; \
210  dst[n] = max; \
211  } else { \
212  dst[n] = o0; \
213  } \
214  } \
215  iir->biquads[i].w1 = w1; \
216  iir->biquads[i].w2 = w2; \
217  } \
218  \
219  return 0; \
220 }
221 
222 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
223 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
224 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
225 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
226 
227 #define PARALLEL_IIR_CH(name, type, min, max, need_clipping) \
228 static int iir_ch_parallel_## name(AVFilterContext *ctx, void *arg, \
229  int ch, int nb_jobs) \
230 { \
231  AudioIIRContext *s = ctx->priv; \
232  const double ig = s->dry_gain; \
233  const double og = s->wet_gain; \
234  const double mix = s->mix; \
235  const double imix = 1. - mix; \
236  ThreadData *td = arg; \
237  AVFrame *in = td->in, *out = td->out; \
238  const type *src = (const type *)in->extended_data[ch]; \
239  type *dst = (type *)out->extended_data[ch]; \
240  IIRChannel *iir = &s->iir[ch]; \
241  const double g = iir->g; \
242  const double fir = iir->fir; \
243  int *clippings = &iir->clippings; \
244  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
245  int n, i; \
246  \
247  for (i = 0; i < nb_biquads; i++) { \
248  const double a1 = -iir->biquads[i].a[1]; \
249  const double a2 = -iir->biquads[i].a[2]; \
250  const double b1 = iir->biquads[i].b[1]; \
251  const double b2 = iir->biquads[i].b[2]; \
252  double w1 = iir->biquads[i].w1; \
253  double w2 = iir->biquads[i].w2; \
254  \
255  for (n = 0; n < in->nb_samples; n++) { \
256  double i0 = ig * src[n]; \
257  double o0 = w1; \
258  \
259  w1 = b1 * i0 + w2 + a1 * o0; \
260  w2 = b2 * i0 + a2 * o0; \
261  o0 *= og * g; \
262  o0 += dst[n]; \
263  \
264  if (need_clipping && o0 < min) { \
265  (*clippings)++; \
266  dst[n] = min; \
267  } else if (need_clipping && o0 > max) { \
268  (*clippings)++; \
269  dst[n] = max; \
270  } else { \
271  dst[n] = o0; \
272  } \
273  } \
274  iir->biquads[i].w1 = w1; \
275  iir->biquads[i].w2 = w2; \
276  } \
277  \
278  for (n = 0; n < in->nb_samples; n++) { \
279  dst[n] += fir * src[n]; \
280  dst[n] = dst[n] * mix + imix * src[n]; \
281  } \
282  \
283  return 0; \
284 }
285 
286 PARALLEL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
287 PARALLEL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
288 PARALLEL_IIR_CH(fltp, float, -1., 1., 0)
289 PARALLEL_IIR_CH(dblp, double, -1., 1., 0)
290 
291 #define LATTICE_IIR_CH(name, type, min, max, need_clipping) \
292 static int iir_ch_lattice_## name(AVFilterContext *ctx, void *arg, \
293  int ch, int nb_jobs) \
294 { \
295  AudioIIRContext *s = ctx->priv; \
296  const double ig = s->dry_gain; \
297  const double og = s->wet_gain; \
298  const double mix = s->mix; \
299  ThreadData *td = arg; \
300  AVFrame *in = td->in, *out = td->out; \
301  const type *src = (const type *)in->extended_data[ch]; \
302  double n0, n1, p0, *x = (double *)s->iir[ch].cache[0]; \
303  const int nb_stages = s->iir[ch].nb_ab[1]; \
304  const double *v = s->iir[ch].ab[0]; \
305  const double *k = s->iir[ch].ab[1]; \
306  const double g = s->iir[ch].g; \
307  int *clippings = &s->iir[ch].clippings; \
308  type *dst = (type *)out->extended_data[ch]; \
309  int n; \
310  \
311  for (n = 0; n < in->nb_samples; n++) { \
312  const double in = src[n] * ig; \
313  double out = 0.; \
314  \
315  n1 = in; \
316  for (int i = nb_stages - 1; i >= 0; i--) { \
317  n0 = n1 - k[i] * x[i]; \
318  p0 = n0 * k[i] + x[i]; \
319  out += p0 * v[i+1]; \
320  x[i] = p0; \
321  n1 = n0; \
322  } \
323  \
324  out += n1 * v[0]; \
325  memmove(&x[1], &x[0], nb_stages * sizeof(*x)); \
326  x[0] = n1; \
327  out *= og * g; \
328  out = out * mix + in * (1. - mix); \
329  if (need_clipping && out < min) { \
330  (*clippings)++; \
331  dst[n] = min; \
332  } else if (need_clipping && out > max) { \
333  (*clippings)++; \
334  dst[n] = max; \
335  } else { \
336  dst[n] = out; \
337  } \
338  } \
339  \
340  return 0; \
341 }
342 
343 LATTICE_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
344 LATTICE_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
345 LATTICE_IIR_CH(fltp, float, -1., 1., 0)
346 LATTICE_IIR_CH(dblp, double, -1., 1., 0)
347 
348 static void count_coefficients(char *item_str, int *nb_items)
349 {
350  char *p;
351 
352  if (!item_str)
353  return;
354 
355  *nb_items = 1;
356  for (p = item_str; *p && *p != '|'; p++) {
357  if (*p == ' ')
358  (*nb_items)++;
359  }
360 }
361 
362 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
363 {
364  AudioIIRContext *s = ctx->priv;
365  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
366  int i;
367 
368  p = old_str = av_strdup(item_str);
369  if (!p)
370  return AVERROR(ENOMEM);
371  for (i = 0; i < nb_items; i++) {
372  if (!(arg = av_strtok(p, "|", &saveptr)))
373  arg = prev_arg;
374 
375  if (!arg) {
376  av_freep(&old_str);
377  return AVERROR(EINVAL);
378  }
379 
380  p = NULL;
381  if (av_sscanf(arg, "%lf", &s->iir[i].g) != 1) {
382  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
383  av_freep(&old_str);
384  return AVERROR(EINVAL);
385  }
386 
387  prev_arg = arg;
388  }
389 
390  av_freep(&old_str);
391 
392  return 0;
393 }
394 
395 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
396 {
397  char *p, *arg, *old_str, *saveptr = NULL;
398  int i;
399 
400  p = old_str = av_strdup(item_str);
401  if (!p)
402  return AVERROR(ENOMEM);
403  for (i = 0; i < nb_items; i++) {
404  if (!(arg = av_strtok(p, " ", &saveptr)))
405  break;
406 
407  p = NULL;
408  if (av_sscanf(arg, "%lf", &dst[i]) != 1) {
409  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
410  av_freep(&old_str);
411  return AVERROR(EINVAL);
412  }
413  }
414 
415  av_freep(&old_str);
416 
417  return 0;
418 }
419 
420 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
421 {
422  char *p, *arg, *old_str, *saveptr = NULL;
423  int i;
424 
425  p = old_str = av_strdup(item_str);
426  if (!p)
427  return AVERROR(ENOMEM);
428  for (i = 0; i < nb_items; i++) {
429  if (!(arg = av_strtok(p, " ", &saveptr)))
430  break;
431 
432  p = NULL;
433  if (av_sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
434  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
435  av_freep(&old_str);
436  return AVERROR(EINVAL);
437  }
438  }
439 
440  av_freep(&old_str);
441 
442  return 0;
443 }
444 
445 static const char *const format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
446 
447 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
448 {
449  AudioIIRContext *s = ctx->priv;
450  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
451  int i, ret;
452 
453  p = old_str = av_strdup(item_str);
454  if (!p)
455  return AVERROR(ENOMEM);
456  for (i = 0; i < channels; i++) {
457  IIRChannel *iir = &s->iir[i];
458 
459  if (!(arg = av_strtok(p, "|", &saveptr)))
460  arg = prev_arg;
461 
462  if (!arg) {
463  av_freep(&old_str);
464  return AVERROR(EINVAL);
465  }
466 
467  count_coefficients(arg, &iir->nb_ab[ab]);
468 
469  p = NULL;
470  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
471  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
472  if (!iir->ab[ab] || !iir->cache[ab]) {
473  av_freep(&old_str);
474  return AVERROR(ENOMEM);
475  }
476 
477  if (s->format > 0) {
478  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
479  } else {
480  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
481  }
482  if (ret < 0) {
483  av_freep(&old_str);
484  return ret;
485  }
486  prev_arg = arg;
487  }
488 
489  av_freep(&old_str);
490 
491  return 0;
492 }
493 
494 static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
495 {
496  *RE = re * re2 - im * im2;
497  *IM = re * im2 + re2 * im;
498 }
499 
500 static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
501 {
502  coefs[2 * n] = 1.0;
503 
504  for (int i = 1; i <= n; i++) {
505  for (int j = n - i; j < n; j++) {
506  double re, im;
507 
508  cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
509  pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
510 
511  coefs[2 * j] -= re;
512  coefs[2 * j + 1] -= im;
513  }
514  }
515 
516  for (int i = 0; i < n + 1; i++) {
517  if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
518  av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
519  coefs[2 * i + 1], i);
520  return AVERROR(EINVAL);
521  }
522  }
523 
524  return 0;
525 }
526 
527 static void normalize_coeffs(AVFilterContext *ctx, int ch)
528 {
529  AudioIIRContext *s = ctx->priv;
530  IIRChannel *iir = &s->iir[ch];
531  double sum_den = 0.;
532 
533  if (!s->normalize)
534  return;
535 
536  for (int i = 0; i < iir->nb_ab[1]; i++) {
537  sum_den += iir->ab[1][i];
538  }
539 
540  if (sum_den > 1e-6) {
541  double factor, sum_num = 0.;
542 
543  for (int i = 0; i < iir->nb_ab[0]; i++) {
544  sum_num += iir->ab[0][i];
545  }
546 
547  factor = sum_num / sum_den;
548 
549  for (int i = 0; i < iir->nb_ab[1]; i++) {
550  iir->ab[1][i] *= factor;
551  }
552  }
553 }
554 
556 {
557  AudioIIRContext *s = ctx->priv;
558  int ch, i, j, ret = 0;
559 
560  for (ch = 0; ch < channels; ch++) {
561  IIRChannel *iir = &s->iir[ch];
562  double *topc, *botc;
563 
564  topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
565  botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
566  if (!topc || !botc) {
567  ret = AVERROR(ENOMEM);
568  goto fail;
569  }
570 
571  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
572  if (ret < 0) {
573  goto fail;
574  }
575 
576  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
577  if (ret < 0) {
578  goto fail;
579  }
580 
581  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
582  iir->ab[1][j] = topc[2 * i];
583  }
584  iir->nb_ab[1]++;
585 
586  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
587  iir->ab[0][j] = botc[2 * i];
588  }
589  iir->nb_ab[0]++;
590 
591  normalize_coeffs(ctx, ch);
592 
593 fail:
594  av_free(topc);
595  av_free(botc);
596  if (ret < 0)
597  break;
598  }
599 
600  return ret;
601 }
602 
604 {
605  AudioIIRContext *s = ctx->priv;
606  int ch, ret;
607 
608  for (ch = 0; ch < channels; ch++) {
609  IIRChannel *iir = &s->iir[ch];
610  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
611  int current_biquad = 0;
612 
613  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
614  if (!iir->biquads)
615  return AVERROR(ENOMEM);
616 
617  while (nb_biquads--) {
618  Pair outmost_pole = { -1, -1 };
619  Pair nearest_zero = { -1, -1 };
620  double zeros[4] = { 0 };
621  double poles[4] = { 0 };
622  double b[6] = { 0 };
623  double a[6] = { 0 };
624  double min_distance = DBL_MAX;
625  double max_mag = 0;
626  double factor;
627  int i;
628 
629  for (i = 0; i < iir->nb_ab[0]; i++) {
630  double mag;
631 
632  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
633  continue;
634  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
635 
636  if (mag > max_mag) {
637  max_mag = mag;
638  outmost_pole.a = i;
639  }
640  }
641 
642  for (i = 0; i < iir->nb_ab[0]; i++) {
643  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
644  continue;
645 
646  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
647  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
648  outmost_pole.b = i;
649  break;
650  }
651  }
652 
653  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
654 
655  if (outmost_pole.a < 0 || outmost_pole.b < 0)
656  return AVERROR(EINVAL);
657 
658  for (i = 0; i < iir->nb_ab[1]; i++) {
659  double distance;
660 
661  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
662  continue;
663  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
664  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
665 
666  if (distance < min_distance) {
667  min_distance = distance;
668  nearest_zero.a = i;
669  }
670  }
671 
672  for (i = 0; i < iir->nb_ab[1]; i++) {
673  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
674  continue;
675 
676  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
677  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
678  nearest_zero.b = i;
679  break;
680  }
681  }
682 
683  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
684 
685  if (nearest_zero.a < 0 || nearest_zero.b < 0)
686  return AVERROR(EINVAL);
687 
688  poles[0] = iir->ab[0][2 * outmost_pole.a ];
689  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
690 
691  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
692  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
693 
694  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
695  zeros[2] = 0;
696  zeros[3] = 0;
697 
698  poles[2] = 0;
699  poles[3] = 0;
700  } else {
701  poles[2] = iir->ab[0][2 * outmost_pole.b ];
702  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
703 
704  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
705  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
706  }
707 
708  ret = expand(ctx, zeros, 2, b);
709  if (ret < 0)
710  return ret;
711 
712  ret = expand(ctx, poles, 2, a);
713  if (ret < 0)
714  return ret;
715 
716  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
717  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
718  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
719  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
720 
721  iir->biquads[current_biquad].a[0] = 1.;
722  iir->biquads[current_biquad].a[1] = a[2] / a[4];
723  iir->biquads[current_biquad].a[2] = a[0] / a[4];
724  iir->biquads[current_biquad].b[0] = b[4] / a[4];
725  iir->biquads[current_biquad].b[1] = b[2] / a[4];
726  iir->biquads[current_biquad].b[2] = b[0] / a[4];
727 
728  if (s->normalize &&
729  fabs(iir->biquads[current_biquad].b[0] +
730  iir->biquads[current_biquad].b[1] +
731  iir->biquads[current_biquad].b[2]) > 1e-6) {
732  factor = (iir->biquads[current_biquad].a[0] +
733  iir->biquads[current_biquad].a[1] +
734  iir->biquads[current_biquad].a[2]) /
735  (iir->biquads[current_biquad].b[0] +
736  iir->biquads[current_biquad].b[1] +
737  iir->biquads[current_biquad].b[2]);
738 
739  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
740 
741  iir->biquads[current_biquad].b[0] *= factor;
742  iir->biquads[current_biquad].b[1] *= factor;
743  iir->biquads[current_biquad].b[2] *= factor;
744  }
745 
746  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
747  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
748  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
749 
750  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
751  iir->biquads[current_biquad].a[0],
752  iir->biquads[current_biquad].a[1],
753  iir->biquads[current_biquad].a[2],
754  iir->biquads[current_biquad].b[0],
755  iir->biquads[current_biquad].b[1],
756  iir->biquads[current_biquad].b[2]);
757 
758  current_biquad++;
759  }
760  }
761 
762  return 0;
763 }
764 
765 static void biquad_process(double *x, double *y, int length,
766  double b0, double b1, double b2,
767  double a1, double a2)
768 {
769  double w1 = 0., w2 = 0.;
770 
771  a1 = -a1;
772  a2 = -a2;
773 
774  for (int n = 0; n < length; n++) {
775  double out, in = x[n];
776 
777  y[n] = out = in * b0 + w1;
778  w1 = b1 * in + w2 + a1 * out;
779  w2 = b2 * in + a2 * out;
780  }
781 }
782 
783 static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
784 {
785  double sum = 0.;
786 
787  for (int i = 0; i < n; i++) {
788  for (int j = i; j < n; j++) {
789  sum = 0.;
790  for (int k = 0; k < i; k++)
791  sum += lu[i * n + k] * lu[k * n + j];
792  lu[i * n + j] = matrix[j * n + i] - sum;
793  }
794  for (int j = i + 1; j < n; j++) {
795  sum = 0.;
796  for (int k = 0; k < i; k++)
797  sum += lu[j * n + k] * lu[k * n + i];
798  lu[j * n + i] = (1. / lu[i * n + i]) * (matrix[i * n + j] - sum);
799  }
800  }
801 
802  for (int i = 0; i < n; i++) {
803  sum = 0.;
804  for (int k = 0; k < i; k++)
805  sum += lu[i * n + k] * y[k];
806  y[i] = vector[i] - sum;
807  }
808 
809  for (int i = n - 1; i >= 0; i--) {
810  sum = 0.;
811  for (int k = i + 1; k < n; k++)
812  sum += lu[i * n + k] * x[k];
813  x[i] = (1 / lu[i * n + i]) * (y[i] - sum);
814  }
815 }
816 
818 {
819  AudioIIRContext *s = ctx->priv;
820  int ret = 0;
821 
822  for (int ch = 0; ch < channels; ch++) {
823  IIRChannel *iir = &s->iir[ch];
824  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
825  int length = nb_biquads * 2 + 1;
826  double *impulse = av_calloc(length, sizeof(*impulse));
827  double *y = av_calloc(length, sizeof(*y));
828  double *resp = av_calloc(length, sizeof(*resp));
829  double *M = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*M));
830  double *W = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*W));
831 
832  if (!impulse || !y || !resp || !M) {
833  av_free(impulse);
834  av_free(y);
835  av_free(resp);
836  av_free(M);
837  av_free(W);
838  return AVERROR(ENOMEM);
839  }
840 
841  impulse[0] = 1.;
842 
843  for (int n = 0; n < nb_biquads; n++) {
844  BiquadContext *biquad = &iir->biquads[n];
845 
846  biquad_process(n ? y : impulse, y, length,
847  biquad->b[0], biquad->b[1], biquad->b[2],
848  biquad->a[1], biquad->a[2]);
849  }
850 
851  for (int n = 0; n < nb_biquads; n++) {
852  BiquadContext *biquad = &iir->biquads[n];
853 
854  biquad_process(impulse, resp, length - 1,
855  1., 0., 0., biquad->a[1], biquad->a[2]);
856 
857  memcpy(M + n * 2 * (length - 1), resp, sizeof(*resp) * (length - 1));
858  memcpy(M + n * 2 * (length - 1) + length, resp, sizeof(*resp) * (length - 2));
859  memset(resp, 0, length * sizeof(*resp));
860  }
861 
862  solve(M, &y[1], length - 1, &impulse[1], resp, W);
863 
864  iir->fir = y[0];
865 
866  for (int n = 0; n < nb_biquads; n++) {
867  BiquadContext *biquad = &iir->biquads[n];
868 
869  biquad->b[0] = 0.;
870  biquad->b[1] = resp[n * 2 + 0];
871  biquad->b[2] = resp[n * 2 + 1];
872  }
873 
874  av_free(impulse);
875  av_free(y);
876  av_free(resp);
877  av_free(M);
878  av_free(W);
879 
880  if (ret < 0)
881  return ret;
882  }
883 
884  return 0;
885 }
886 
888 {
889  AudioIIRContext *s = ctx->priv;
890  int ch;
891 
892  for (ch = 0; ch < channels; ch++) {
893  IIRChannel *iir = &s->iir[ch];
894  int n;
895 
896  for (n = 0; n < iir->nb_ab[0]; n++) {
897  double r = iir->ab[0][2*n];
898  double angle = iir->ab[0][2*n+1];
899 
900  iir->ab[0][2*n] = r * cos(angle);
901  iir->ab[0][2*n+1] = r * sin(angle);
902  }
903 
904  for (n = 0; n < iir->nb_ab[1]; n++) {
905  double r = iir->ab[1][2*n];
906  double angle = iir->ab[1][2*n+1];
907 
908  iir->ab[1][2*n] = r * cos(angle);
909  iir->ab[1][2*n+1] = r * sin(angle);
910  }
911  }
912 }
913 
915 {
916  AudioIIRContext *s = ctx->priv;
917  int ch;
918 
919  for (ch = 0; ch < channels; ch++) {
920  IIRChannel *iir = &s->iir[ch];
921  int n;
922 
923  for (n = 0; n < iir->nb_ab[0]; n++) {
924  double sr = iir->ab[0][2*n];
925  double si = iir->ab[0][2*n+1];
926 
927  iir->ab[0][2*n] = exp(sr) * cos(si);
928  iir->ab[0][2*n+1] = exp(sr) * sin(si);
929  }
930 
931  for (n = 0; n < iir->nb_ab[1]; n++) {
932  double sr = iir->ab[1][2*n];
933  double si = iir->ab[1][2*n+1];
934 
935  iir->ab[1][2*n] = exp(sr) * cos(si);
936  iir->ab[1][2*n+1] = exp(sr) * sin(si);
937  }
938  }
939 }
940 
941 static double fact(double i)
942 {
943  if (i <= 0.)
944  return 1.;
945  return i * fact(i - 1.);
946 }
947 
948 static double coef_sf2zf(double *a, int N, int n)
949 {
950  double z = 0.;
951 
952  for (int i = 0; i <= N; i++) {
953  double acc = 0.;
954 
955  for (int k = FFMAX(n - N + i, 0); k <= FFMIN(i, n); k++) {
956  acc += ((fact(i) * fact(N - i)) /
957  (fact(k) * fact(i - k) * fact(n - k) * fact(N - i - n + k))) *
958  ((k & 1) ? -1. : 1.);
959  }
960 
961  z += a[i] * pow(2., i) * acc;
962  }
963 
964  return z;
965 }
966 
968 {
969  AudioIIRContext *s = ctx->priv;
970  int ch;
971 
972  for (ch = 0; ch < channels; ch++) {
973  IIRChannel *iir = &s->iir[ch];
974  double *temp0 = av_calloc(iir->nb_ab[0], sizeof(*temp0));
975  double *temp1 = av_calloc(iir->nb_ab[1], sizeof(*temp1));
976 
977  if (!temp0 || !temp1)
978  goto next;
979 
980  memcpy(temp0, iir->ab[0], iir->nb_ab[0] * sizeof(*temp0));
981  memcpy(temp1, iir->ab[1], iir->nb_ab[1] * sizeof(*temp1));
982 
983  for (int n = 0; n < iir->nb_ab[0]; n++)
984  iir->ab[0][n] = coef_sf2zf(temp0, iir->nb_ab[0] - 1, n);
985 
986  for (int n = 0; n < iir->nb_ab[1]; n++)
987  iir->ab[1][n] = coef_sf2zf(temp1, iir->nb_ab[1] - 1, n);
988 
989 next:
990  av_free(temp0);
991  av_free(temp1);
992  }
993 }
994 
996 {
997  AudioIIRContext *s = ctx->priv;
998  int ch;
999 
1000  for (ch = 0; ch < channels; ch++) {
1001  IIRChannel *iir = &s->iir[ch];
1002  int n;
1003 
1004  for (n = 0; n < iir->nb_ab[0]; n++) {
1005  double r = iir->ab[0][2*n];
1006  double angle = M_PI*iir->ab[0][2*n+1]/180.;
1007 
1008  iir->ab[0][2*n] = r * cos(angle);
1009  iir->ab[0][2*n+1] = r * sin(angle);
1010  }
1011 
1012  for (n = 0; n < iir->nb_ab[1]; n++) {
1013  double r = iir->ab[1][2*n];
1014  double angle = M_PI*iir->ab[1][2*n+1]/180.;
1015 
1016  iir->ab[1][2*n] = r * cos(angle);
1017  iir->ab[1][2*n+1] = r * sin(angle);
1018  }
1019  }
1020 }
1021 
1023 {
1024  AudioIIRContext *s = ctx->priv;
1025  int ch;
1026 
1027  for (ch = 0; ch < channels; ch++) {
1028  IIRChannel *iir = &s->iir[ch];
1029 
1030  for (int n = 0; n < iir->nb_ab[0]; n++) {
1031  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
1032 
1033  if (pr >= 1.) {
1034  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
1035  break;
1036  }
1037  }
1038  }
1039 }
1040 
1041 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
1042 {
1043  const uint8_t *font;
1044  int font_height;
1045  int i;
1046 
1047  font = avpriv_cga_font, font_height = 8;
1048 
1049  for (i = 0; txt[i]; i++) {
1050  int char_y, mask;
1051 
1052  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
1053  for (char_y = 0; char_y < font_height; char_y++) {
1054  for (mask = 0x80; mask; mask >>= 1) {
1055  if (font[txt[i] * font_height + char_y] & mask)
1056  AV_WL32(p, color);
1057  p += 4;
1058  }
1059  p += pic->linesize[0] - 8 * 4;
1060  }
1061  }
1062 }
1063 
1064 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
1065 {
1066  int dx = FFABS(x1-x0);
1067  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
1068  int err = (dx>dy ? dx : -dy) / 2, e2;
1069 
1070  for (;;) {
1071  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
1072 
1073  if (x0 == x1 && y0 == y1)
1074  break;
1075 
1076  e2 = err;
1077 
1078  if (e2 >-dx) {
1079  err -= dy;
1080  x0--;
1081  }
1082 
1083  if (e2 < dy) {
1084  err += dx;
1085  y0 += sy;
1086  }
1087  }
1088 }
1089 
1090 static double distance(double x0, double x1, double y0, double y1)
1091 {
1092  return hypot(x0 - x1, y0 - y1);
1093 }
1094 
1095 static void get_response(int channel, int format, double w,
1096  const double *b, const double *a,
1097  int nb_b, int nb_a, double *magnitude, double *phase)
1098 {
1099  double realz, realp;
1100  double imagz, imagp;
1101  double real, imag;
1102  double div;
1103 
1104  if (format == 0) {
1105  realz = 0., realp = 0.;
1106  imagz = 0., imagp = 0.;
1107  for (int x = 0; x < nb_a; x++) {
1108  realz += cos(-x * w) * a[x];
1109  imagz += sin(-x * w) * a[x];
1110  }
1111 
1112  for (int x = 0; x < nb_b; x++) {
1113  realp += cos(-x * w) * b[x];
1114  imagp += sin(-x * w) * b[x];
1115  }
1116 
1117  div = realp * realp + imagp * imagp;
1118  real = (realz * realp + imagz * imagp) / div;
1119  imag = (imagz * realp - imagp * realz) / div;
1120 
1121  *magnitude = hypot(real, imag);
1122  *phase = atan2(imag, real);
1123  } else {
1124  double p = 1., z = 1.;
1125  double acc = 0.;
1126 
1127  for (int x = 0; x < nb_a; x++) {
1128  z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
1129  acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
1130  }
1131 
1132  for (int x = 0; x < nb_b; x++) {
1133  p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
1134  acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
1135  }
1136 
1137  *magnitude = z / p;
1138  *phase = acc;
1139  }
1140 }
1141 
1143 {
1144  AudioIIRContext *s = ctx->priv;
1145  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
1146  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
1147  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
1148  char text[32];
1149  int ch, i;
1150 
1151  memset(out->data[0], 0, s->h * out->linesize[0]);
1152 
1153  phase = av_malloc_array(s->w, sizeof(*phase));
1154  temp = av_malloc_array(s->w, sizeof(*temp));
1155  mag = av_malloc_array(s->w, sizeof(*mag));
1156  delay = av_malloc_array(s->w, sizeof(*delay));
1157  if (!mag || !phase || !delay || !temp)
1158  goto end;
1159 
1160  ch = av_clip(s->ir_channel, 0, s->channels - 1);
1161  for (i = 0; i < s->w; i++) {
1162  const double *b = s->iir[ch].ab[0];
1163  const double *a = s->iir[ch].ab[1];
1164  const int nb_b = s->iir[ch].nb_ab[0];
1165  const int nb_a = s->iir[ch].nb_ab[1];
1166  double w = i * M_PI / (s->w - 1);
1167  double m, p;
1168 
1169  get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
1170 
1171  mag[i] = s->iir[ch].g * m;
1172  phase[i] = p;
1173  min = fmin(min, mag[i]);
1174  max = fmax(max, mag[i]);
1175  }
1176 
1177  temp[0] = 0.;
1178  for (i = 0; i < s->w - 1; i++) {
1179  double d = phase[i] - phase[i + 1];
1180  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
1181  }
1182 
1183  min_phase = phase[0];
1184  max_phase = phase[0];
1185  for (i = 1; i < s->w; i++) {
1186  temp[i] += temp[i - 1];
1187  phase[i] += temp[i];
1188  min_phase = fmin(min_phase, phase[i]);
1189  max_phase = fmax(max_phase, phase[i]);
1190  }
1191 
1192  for (i = 0; i < s->w - 1; i++) {
1193  double div = s->w / (double)sample_rate;
1194 
1195  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
1196  min_delay = fmin(min_delay, delay[i + 1]);
1197  max_delay = fmax(max_delay, delay[i + 1]);
1198  }
1199  delay[0] = delay[1];
1200 
1201  for (i = 0; i < s->w; i++) {
1202  int ymag = mag[i] / max * (s->h - 1);
1203  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
1204  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
1205 
1206  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
1207  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
1208  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
1209 
1210  if (prev_ymag < 0)
1211  prev_ymag = ymag;
1212  if (prev_yphase < 0)
1213  prev_yphase = yphase;
1214  if (prev_ydelay < 0)
1215  prev_ydelay = ydelay;
1216 
1217  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
1218  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
1219  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
1220 
1221  prev_ymag = ymag;
1222  prev_yphase = yphase;
1223  prev_ydelay = ydelay;
1224  }
1225 
1226  if (s->w > 400 && s->h > 100) {
1227  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
1228  snprintf(text, sizeof(text), "%.2f", max);
1229  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
1230 
1231  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
1232  snprintf(text, sizeof(text), "%.2f", min);
1233  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
1234 
1235  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
1236  snprintf(text, sizeof(text), "%.2f", max_phase);
1237  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
1238 
1239  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
1240  snprintf(text, sizeof(text), "%.2f", min_phase);
1241  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
1242 
1243  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
1244  snprintf(text, sizeof(text), "%.2f", max_delay);
1245  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
1246 
1247  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
1248  snprintf(text, sizeof(text), "%.2f", min_delay);
1249  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
1250  }
1251 
1252 end:
1253  av_free(delay);
1254  av_free(temp);
1255  av_free(phase);
1256  av_free(mag);
1257 }
1258 
1259 static int config_output(AVFilterLink *outlink)
1260 {
1261  AVFilterContext *ctx = outlink->src;
1262  AudioIIRContext *s = ctx->priv;
1263  AVFilterLink *inlink = ctx->inputs[0];
1264  int ch, ret, i;
1265 
1266  s->channels = inlink->channels;
1267  s->iir = av_calloc(s->channels, sizeof(*s->iir));
1268  if (!s->iir)
1269  return AVERROR(ENOMEM);
1270 
1271  ret = read_gains(ctx, s->g_str, inlink->channels);
1272  if (ret < 0)
1273  return ret;
1274 
1275  ret = read_channels(ctx, inlink->channels, s->a_str, 0);
1276  if (ret < 0)
1277  return ret;
1278 
1279  ret = read_channels(ctx, inlink->channels, s->b_str, 1);
1280  if (ret < 0)
1281  return ret;
1282 
1283  if (s->format == -1) {
1284  convert_sf2tf(ctx, inlink->channels);
1285  s->format = 0;
1286  } else if (s->format == 2) {
1287  convert_pr2zp(ctx, inlink->channels);
1288  } else if (s->format == 3) {
1289  convert_pd2zp(ctx, inlink->channels);
1290  } else if (s->format == 4) {
1291  convert_sp2zp(ctx, inlink->channels);
1292  }
1293  if (s->format > 0) {
1294  check_stability(ctx, inlink->channels);
1295  }
1296 
1297  av_frame_free(&s->video);
1298  if (s->response) {
1299  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1300  if (!s->video)
1301  return AVERROR(ENOMEM);
1302 
1303  draw_response(ctx, s->video, inlink->sample_rate);
1304  }
1305 
1306  if (s->format == 0)
1307  av_log(ctx, AV_LOG_WARNING, "transfer function coefficients format is not recommended for too high number of zeros/poles.\n");
1308 
1309  if (s->format > 0 && s->process == 0) {
1310  av_log(ctx, AV_LOG_WARNING, "Direct processsing is not recommended for zp coefficients format.\n");
1311 
1312  ret = convert_zp2tf(ctx, inlink->channels);
1313  if (ret < 0)
1314  return ret;
1315  } else if (s->format == -2 && s->process > 0) {
1316  av_log(ctx, AV_LOG_ERROR, "Only direct processing is implemented for lattice-ladder function.\n");
1317  return AVERROR_PATCHWELCOME;
1318  } else if (s->format <= 0 && s->process == 1) {
1319  av_log(ctx, AV_LOG_ERROR, "Serial processing is not implemented for transfer function.\n");
1320  return AVERROR_PATCHWELCOME;
1321  } else if (s->format <= 0 && s->process == 2) {
1322  av_log(ctx, AV_LOG_ERROR, "Parallel processing is not implemented for transfer function.\n");
1323  return AVERROR_PATCHWELCOME;
1324  } else if (s->format > 0 && s->process == 1) {
1325  ret = decompose_zp2biquads(ctx, inlink->channels);
1326  if (ret < 0)
1327  return ret;
1328  } else if (s->format > 0 && s->process == 2) {
1329  if (s->precision > 1)
1330  av_log(ctx, AV_LOG_WARNING, "Parallel processing is not recommended for fixed-point precisions.\n");
1331  ret = decompose_zp2biquads(ctx, inlink->channels);
1332  if (ret < 0)
1333  return ret;
1334  ret = convert_serial2parallel(ctx, inlink->channels);
1335  if (ret < 0)
1336  return ret;
1337  }
1338 
1339  for (ch = 0; s->format == -2 && ch < inlink->channels; ch++) {
1340  IIRChannel *iir = &s->iir[ch];
1341 
1342  if (iir->nb_ab[0] != iir->nb_ab[1] + 1) {
1343  av_log(ctx, AV_LOG_ERROR, "Number of ladder coefficients must be one more than number of reflection coefficients.\n");
1344  return AVERROR(EINVAL);
1345  }
1346  }
1347 
1348  for (ch = 0; s->format == 0 && ch < inlink->channels; ch++) {
1349  IIRChannel *iir = &s->iir[ch];
1350 
1351  for (i = 1; i < iir->nb_ab[0]; i++) {
1352  iir->ab[0][i] /= iir->ab[0][0];
1353  }
1354 
1355  iir->ab[0][0] = 1.0;
1356  for (i = 0; i < iir->nb_ab[1]; i++) {
1357  iir->ab[1][i] *= iir->g;
1358  }
1359 
1360  normalize_coeffs(ctx, ch);
1361  }
1362 
1363  switch (inlink->format) {
1364  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 2 ? iir_ch_parallel_dblp : s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1365  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 2 ? iir_ch_parallel_fltp : s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1366  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s32p : s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1367  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s16p : s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1368  }
1369 
1370  if (s->format == -2) {
1371  switch (inlink->format) {
1372  case AV_SAMPLE_FMT_DBLP: s->iir_channel = iir_ch_lattice_dblp; break;
1373  case AV_SAMPLE_FMT_FLTP: s->iir_channel = iir_ch_lattice_fltp; break;
1374  case AV_SAMPLE_FMT_S32P: s->iir_channel = iir_ch_lattice_s32p; break;
1375  case AV_SAMPLE_FMT_S16P: s->iir_channel = iir_ch_lattice_s16p; break;
1376  }
1377  }
1378 
1379  return 0;
1380 }
1381 
1383 {
1384  AVFilterContext *ctx = inlink->dst;
1385  AudioIIRContext *s = ctx->priv;
1386  AVFilterLink *outlink = ctx->outputs[0];
1387  ThreadData td;
1388  AVFrame *out;
1389  int ch, ret;
1390 
1391  if (av_frame_is_writable(in) && s->process != 2) {
1392  out = in;
1393  } else {
1394  out = ff_get_audio_buffer(outlink, in->nb_samples);
1395  if (!out) {
1396  av_frame_free(&in);
1397  return AVERROR(ENOMEM);
1398  }
1399  av_frame_copy_props(out, in);
1400  }
1401 
1402  td.in = in;
1403  td.out = out;
1404  ff_filter_execute(ctx, s->iir_channel, &td, NULL, outlink->channels);
1405 
1406  for (ch = 0; ch < outlink->channels; ch++) {
1407  if (s->iir[ch].clippings > 0)
1408  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1409  ch, s->iir[ch].clippings);
1410  s->iir[ch].clippings = 0;
1411  }
1412 
1413  if (in != out)
1414  av_frame_free(&in);
1415 
1416  if (s->response) {
1417  AVFilterLink *outlink = ctx->outputs[1];
1418  int64_t old_pts = s->video->pts;
1419  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1420 
1421  if (new_pts > old_pts) {
1422  AVFrame *clone;
1423 
1424  s->video->pts = new_pts;
1425  clone = av_frame_clone(s->video);
1426  if (!clone)
1427  return AVERROR(ENOMEM);
1428  ret = ff_filter_frame(outlink, clone);
1429  if (ret < 0)
1430  return ret;
1431  }
1432  }
1433 
1434  return ff_filter_frame(outlink, out);
1435 }
1436 
1437 static int config_video(AVFilterLink *outlink)
1438 {
1439  AVFilterContext *ctx = outlink->src;
1440  AudioIIRContext *s = ctx->priv;
1441 
1442  outlink->sample_aspect_ratio = (AVRational){1,1};
1443  outlink->w = s->w;
1444  outlink->h = s->h;
1445  outlink->frame_rate = s->rate;
1446  outlink->time_base = av_inv_q(outlink->frame_rate);
1447 
1448  return 0;
1449 }
1450 
1452 {
1453  AudioIIRContext *s = ctx->priv;
1454  AVFilterPad pad, vpad;
1455  int ret;
1456 
1457  if (!s->a_str || !s->b_str || !s->g_str) {
1458  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1459  return AVERROR(EINVAL);
1460  }
1461 
1462  switch (s->precision) {
1463  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1464  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1465  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1466  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1467  default: return AVERROR_BUG;
1468  }
1469 
1470  pad = (AVFilterPad){
1471  .name = "default",
1472  .type = AVMEDIA_TYPE_AUDIO,
1473  .config_props = config_output,
1474  };
1475 
1476  ret = ff_append_outpad(ctx, &pad);
1477  if (ret < 0)
1478  return ret;
1479 
1480  if (s->response) {
1481  vpad = (AVFilterPad){
1482  .name = "filter_response",
1483  .type = AVMEDIA_TYPE_VIDEO,
1484  .config_props = config_video,
1485  };
1486 
1487  ret = ff_append_outpad(ctx, &vpad);
1488  if (ret < 0)
1489  return ret;
1490  }
1491 
1492  return 0;
1493 }
1494 
1496 {
1497  AudioIIRContext *s = ctx->priv;
1498  int ch;
1499 
1500  if (s->iir) {
1501  for (ch = 0; ch < s->channels; ch++) {
1502  IIRChannel *iir = &s->iir[ch];
1503  av_freep(&iir->ab[0]);
1504  av_freep(&iir->ab[1]);
1505  av_freep(&iir->cache[0]);
1506  av_freep(&iir->cache[1]);
1507  av_freep(&iir->biquads);
1508  }
1509  }
1510  av_freep(&s->iir);
1511 
1512  av_frame_free(&s->video);
1513 }
1514 
1515 static const AVFilterPad inputs[] = {
1516  {
1517  .name = "default",
1518  .type = AVMEDIA_TYPE_AUDIO,
1519  .filter_frame = filter_frame,
1520  },
1521 };
1522 
1523 #define OFFSET(x) offsetof(AudioIIRContext, x)
1524 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1525 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1526 
1527 static const AVOption aiir_options[] = {
1528  { "zeros", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1529  { "z", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1530  { "poles", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1531  { "p", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1532  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1533  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1534  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1535  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1536  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, "format" },
1537  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, "format" },
1538  { "ll", "lattice-ladder function", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, AF, "format" },
1539  { "sf", "analog transfer function", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "format" },
1540  { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "format" },
1541  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "format" },
1542  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "format" },
1543  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "format" },
1544  { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, "format" },
1545  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
1546  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
1547  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "process" },
1548  { "s", "serial", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "process" },
1549  { "p", "parallel", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "process" },
1550  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1551  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1552  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "precision" },
1553  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "precision" },
1554  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "precision" },
1555  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "precision" },
1556  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1557  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1558  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1559  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1560  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1561  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1562  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1563  { NULL },
1564 };
1565 
1566 AVFILTER_DEFINE_CLASS(aiir);
1567 
1569  .name = "aiir",
1570  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1571  .priv_size = sizeof(AudioIIRContext),
1572  .priv_class = &aiir_class,
1573  .init = init,
1574  .uninit = uninit,
1579 };
coef_sf2zf
static double coef_sf2zf(double *a, int N, int n)
Definition: af_aiir.c:948
Pair
Definition: af_aiir.c:35
M
#define M(a, b)
Definition: vp3dsp.c:48
AudioIIRContext::format
int format
Definition: af_aiir.c:61
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:156
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
W
@ W
Definition: vf_addroi.c:26
av_clip
#define av_clip
Definition: common.h:96
mix
static int mix(int c0, int c1)
Definition: 4xm.c:716
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:554
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:381
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aiir)
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:599
IIRChannel::clippings
int clippings
Definition: af_aiir.c:52
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:237
AF
#define AF
Definition: af_aiir.c:1524
inputs
static const AVFilterPad inputs[]
Definition: af_aiir.c:1515
IIRChannel::nb_ab
int nb_ab[2]
Definition: af_aiir.c:46
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
BiquadContext::a
double a[3]
Definition: af_aiir.c:40
aiir_options
static const AVOption aiir_options[]
Definition: af_aiir.c:1527
convert_serial2parallel
static int convert_serial2parallel(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:817
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
RE
#define RE(x, ch)
read_channels
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:447
im
float im
Definition: fft.c:78
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AudioIIRContext::ir_channel
int ir_channel
Definition: af_aiir.c:66
IIRChannel::biquads
BiquadContext * biquads
Definition: af_aiir.c:51
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:247
b
#define b
Definition: input.c:40
IIR_CH
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:112
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:168
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
read_tf_coefficients
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:395
AudioIIRContext::iir_channel
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:75
check_stability
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:1022
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:689
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:1259
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AudioIIRContext::b_str
char * b_str
Definition: af_aiir.c:57
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
solve
static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
Definition: af_aiir.c:783
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:473
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1437
AudioIIRContext::video
AVFrame * video
Definition: af_aiir.c:69
sample_rate
sample_rate
Definition: ffmpeg_filter.c:153
AudioIIRContext::g_str
char * g_str
Definition: af_aiir.c:57
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
BiquadContext
Definition: af_aiir.c:39
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:1703
fail
#define fail()
Definition: checkasm.h:127
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_aiir.c:78
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AudioIIRContext::h
int h
Definition: af_aiir.c:65
a1
#define a1
Definition: regdef.h:47
AudioIIRContext::process
int process
Definition: af_aiir.c:62
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1382
convert_zp2tf
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:555
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:555
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:705
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:422
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
IIRChannel::cache
double * cache[2]
Definition: af_aiir.c:49
NAN
#define NAN
Definition: mathematics.h:64
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
SERIAL_IIR_CH
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:168
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:960
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
biquad
@ biquad
Definition: af_biquads.c:74
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:234
AudioIIRContext::rate
AVRational rate
Definition: af_aiir.c:67
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:671
normalize_coeffs
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:527
exp
int8_t exp
Definition: eval.c:72
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1451
convert_sp2zp
static void convert_sp2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:914
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:116
BiquadContext::w1
double w1
Definition: af_aiir.c:42
BiquadContext::w2
double w2
Definition: af_aiir.c:42
format
static const char *const format[]
Definition: af_aiir.c:445
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:1064
fmin
double fmin(double, double)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
read_zp_coefficients
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:420
IM
#define IM(x, ch)
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1704
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:228
xga_font_data.h
N
#define N
Definition: af_mcompand.c:53
fact
static double fact(double i)
Definition: af_aiir.c:941
IIRChannel::g
double g
Definition: af_aiir.c:48
Pair::b
int b
Definition: af_aiir.c:36
M_PI
#define M_PI
Definition: mathematics.h:52
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
internal.h
AudioIIRContext::a_str
char * a_str
Definition: af_aiir.c:57
normalize
Definition: normalize.py:1
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:397
VF
#define VF
Definition: af_aiir.c:1525
AudioIIRContext::sample_format
enum AVSampleFormat sample_format
Definition: af_aiir.c:73
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
OFFSET
#define OFFSET(x)
Definition: af_aiir.c:1523
IIRChannel::ab
double * ab[2]
Definition: af_aiir.c:47
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
a2
#define a2
Definition: regdef.h:48
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AudioIIRContext::precision
int precision
Definition: af_aiir.c:63
convert_sf2tf
static void convert_sf2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:967
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
AudioIIRContext::wet_gain
double wet_gain
Definition: af_aiir.c:58
get_response
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *magnitude, double *phase)
Definition: af_aiir.c:1095
AVFilter
Filter definition.
Definition: avfilter.h:165
cmul
static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
Definition: af_aiir.c:494
PARALLEL_IIR_CH
#define PARALLEL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:227
ret
ret
Definition: filter_design.txt:187
AudioIIRContext::dry_gain
double dry_gain
Definition: af_aiir.c:58
Pair::a
int a
Definition: af_aiir.c:36
fmax
double fmax(double, double)
convert_pr2zp
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:887
BiquadContext::b
double b[3]
Definition: af_aiir.c:41
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:1142
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
AudioIIRContext::response
int response
Definition: af_aiir.c:64
distance
static double distance(double x0, double x1, double y0, double y1)
Definition: af_aiir.c:1090
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
temp
else temp
Definition: vf_mcdeint.c:248
AudioIIRContext::channels
int channels
Definition: af_aiir.c:72
decompose_zp2biquads
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:603
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
factor
static const int factor[16]
Definition: vf_pp7.c:76
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:279
LATTICE_IIR_CH
#define LATTICE_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:291
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
count_coefficients
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:348
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:506
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
biquad_process
static void biquad_process(double *x, double *y, int length, double b0, double b1, double b2, double a1, double a2)
Definition: af_aiir.c:765
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:500
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:150
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AudioIIRContext
Definition: af_aiir.c:55
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AudioIIRContext::iir
IIRChannel * iir
Definition: af_aiir.c:71
d
d
Definition: ffmpeg_filter.c:153
int32_t
int32_t
Definition: audioconvert.c:56
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
b0
static double b0(void *priv, double x, double y)
Definition: vf_xfade.c:1702
IIRChannel::fir
double fir
Definition: af_aiir.c:50
ff_af_aiir
const AVFilter ff_af_aiir
Definition: af_aiir.c:1568
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
AudioIIRContext::normalize
int normalize
Definition: af_aiir.c:60
int
int
Definition: ffmpeg_filter.c:153
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1495
snprintf
#define snprintf
Definition: snprintf.h:34
read_gains
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:362
IIRChannel
Definition: af_aiir.c:45
AudioIIRContext::mix
double mix
Definition: af_aiir.c:59
convert_pd2zp
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:995
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:1041
channel
channel
Definition: ebur128.h:39
re
float re
Definition: fft.c:78
min
float min
Definition: vorbis_enc_data.h:429
AudioIIRContext::w
int w
Definition: af_aiir.c:65