FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avstring.h"
24 #include "libavutil/intreadwrite.h"
25 #include "libavutil/opt.h"
27 #include "audio.h"
28 #include "avfilter.h"
29 #include "internal.h"
30 
31 typedef struct ThreadData {
32  AVFrame *in, *out;
33 } ThreadData;
34 
35 typedef struct Pair {
36  int a, b;
37 } Pair;
38 
39 typedef struct BiquadContext {
40  double a[3];
41  double b[3];
42  double w1, w2;
44 
45 typedef struct IIRChannel {
46  int nb_ab[2];
47  double *ab[2];
48  double g;
49  double *cache[2];
50  double fir;
52  int clippings;
53 } IIRChannel;
54 
55 typedef struct AudioIIRContext {
56  const AVClass *class;
57  char *a_str, *b_str, *g_str;
58  double dry_gain, wet_gain;
59  double mix;
60  int normalize;
61  int format;
62  int process;
63  int precision;
64  int response;
65  int w, h;
68 
70 
72  int channels;
74 
75  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
77 
79 {
80  AudioIIRContext *s = ctx->priv;
83  enum AVSampleFormat sample_fmts[] = {
86  };
87  static const enum AVPixelFormat pix_fmts[] = {
90  };
91  int ret;
92 
93  if (s->response) {
94  AVFilterLink *videolink = ctx->outputs[1];
95 
97  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
98  return ret;
99  }
100 
102  if (!layouts)
103  return AVERROR(ENOMEM);
105  if (ret < 0)
106  return ret;
107 
108  sample_fmts[0] = s->sample_format;
110  if (!formats)
111  return AVERROR(ENOMEM);
113  if (ret < 0)
114  return ret;
115 
117  if (!formats)
118  return AVERROR(ENOMEM);
120 }
121 
122 #define IIR_CH(name, type, min, max, need_clipping) \
123 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
124 { \
125  AudioIIRContext *s = ctx->priv; \
126  const double ig = s->dry_gain; \
127  const double og = s->wet_gain; \
128  const double mix = s->mix; \
129  ThreadData *td = arg; \
130  AVFrame *in = td->in, *out = td->out; \
131  const type *src = (const type *)in->extended_data[ch]; \
132  double *oc = (double *)s->iir[ch].cache[0]; \
133  double *ic = (double *)s->iir[ch].cache[1]; \
134  const int nb_a = s->iir[ch].nb_ab[0]; \
135  const int nb_b = s->iir[ch].nb_ab[1]; \
136  const double *a = s->iir[ch].ab[0]; \
137  const double *b = s->iir[ch].ab[1]; \
138  const double g = s->iir[ch].g; \
139  int *clippings = &s->iir[ch].clippings; \
140  type *dst = (type *)out->extended_data[ch]; \
141  int n; \
142  \
143  for (n = 0; n < in->nb_samples; n++) { \
144  double sample = 0.; \
145  int x; \
146  \
147  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
148  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
149  ic[0] = src[n] * ig; \
150  for (x = 0; x < nb_b; x++) \
151  sample += b[x] * ic[x]; \
152  \
153  for (x = 1; x < nb_a; x++) \
154  sample -= a[x] * oc[x]; \
155  \
156  oc[0] = sample; \
157  sample *= og * g; \
158  sample = sample * mix + ic[0] * (1. - mix); \
159  if (need_clipping && sample < min) { \
160  (*clippings)++; \
161  dst[n] = min; \
162  } else if (need_clipping && sample > max) { \
163  (*clippings)++; \
164  dst[n] = max; \
165  } else { \
166  dst[n] = sample; \
167  } \
168  } \
169  \
170  return 0; \
171 }
172 
173 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
174 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
175 IIR_CH(fltp, float, -1., 1., 0)
176 IIR_CH(dblp, double, -1., 1., 0)
177 
178 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
179 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, \
180  int ch, int nb_jobs) \
181 { \
182  AudioIIRContext *s = ctx->priv; \
183  const double ig = s->dry_gain; \
184  const double og = s->wet_gain; \
185  const double mix = s->mix; \
186  const double imix = 1. - mix; \
187  ThreadData *td = arg; \
188  AVFrame *in = td->in, *out = td->out; \
189  const type *src = (const type *)in->extended_data[ch]; \
190  type *dst = (type *)out->extended_data[ch]; \
191  IIRChannel *iir = &s->iir[ch]; \
192  const double g = iir->g; \
193  int *clippings = &iir->clippings; \
194  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
195  int n, i; \
196  \
197  for (i = nb_biquads - 1; i >= 0; i--) { \
198  const double a1 = -iir->biquads[i].a[1]; \
199  const double a2 = -iir->biquads[i].a[2]; \
200  const double b0 = iir->biquads[i].b[0]; \
201  const double b1 = iir->biquads[i].b[1]; \
202  const double b2 = iir->biquads[i].b[2]; \
203  double w1 = iir->biquads[i].w1; \
204  double w2 = iir->biquads[i].w2; \
205  \
206  for (n = 0; n < in->nb_samples; n++) { \
207  double i0 = ig * (i ? dst[n] : src[n]); \
208  double o0 = i0 * b0 + w1; \
209  \
210  w1 = b1 * i0 + w2 + a1 * o0; \
211  w2 = b2 * i0 + a2 * o0; \
212  o0 *= og * g; \
213  \
214  o0 = o0 * mix + imix * i0; \
215  if (need_clipping && o0 < min) { \
216  (*clippings)++; \
217  dst[n] = min; \
218  } else if (need_clipping && o0 > max) { \
219  (*clippings)++; \
220  dst[n] = max; \
221  } else { \
222  dst[n] = o0; \
223  } \
224  } \
225  iir->biquads[i].w1 = w1; \
226  iir->biquads[i].w2 = w2; \
227  } \
228  \
229  return 0; \
230 }
231 
232 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
233 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
234 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
235 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
236 
237 #define PARALLEL_IIR_CH(name, type, min, max, need_clipping) \
238 static int iir_ch_parallel_## name(AVFilterContext *ctx, void *arg, \
239  int ch, int nb_jobs) \
240 { \
241  AudioIIRContext *s = ctx->priv; \
242  const double ig = s->dry_gain; \
243  const double og = s->wet_gain; \
244  const double mix = s->mix; \
245  const double imix = 1. - mix; \
246  ThreadData *td = arg; \
247  AVFrame *in = td->in, *out = td->out; \
248  const type *src = (const type *)in->extended_data[ch]; \
249  type *dst = (type *)out->extended_data[ch]; \
250  IIRChannel *iir = &s->iir[ch]; \
251  const double g = iir->g; \
252  const double fir = iir->fir; \
253  int *clippings = &iir->clippings; \
254  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
255  int n, i; \
256  \
257  for (i = 0; i < nb_biquads; i++) { \
258  const double a1 = -iir->biquads[i].a[1]; \
259  const double a2 = -iir->biquads[i].a[2]; \
260  const double b1 = iir->biquads[i].b[1]; \
261  const double b2 = iir->biquads[i].b[2]; \
262  double w1 = iir->biquads[i].w1; \
263  double w2 = iir->biquads[i].w2; \
264  \
265  for (n = 0; n < in->nb_samples; n++) { \
266  double i0 = ig * src[n]; \
267  double o0 = w1; \
268  \
269  w1 = b1 * i0 + w2 + a1 * o0; \
270  w2 = b2 * i0 + a2 * o0; \
271  o0 *= og * g; \
272  o0 += dst[n]; \
273  \
274  if (need_clipping && o0 < min) { \
275  (*clippings)++; \
276  dst[n] = min; \
277  } else if (need_clipping && o0 > max) { \
278  (*clippings)++; \
279  dst[n] = max; \
280  } else { \
281  dst[n] = o0; \
282  } \
283  } \
284  iir->biquads[i].w1 = w1; \
285  iir->biquads[i].w2 = w2; \
286  } \
287  \
288  for (n = 0; n < in->nb_samples; n++) { \
289  dst[n] += fir * src[n]; \
290  dst[n] = dst[n] * mix + imix * src[n]; \
291  } \
292  \
293  return 0; \
294 }
295 
296 PARALLEL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
297 PARALLEL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
298 PARALLEL_IIR_CH(fltp, float, -1., 1., 0)
299 PARALLEL_IIR_CH(dblp, double, -1., 1., 0)
300 
301 #define LATTICE_IIR_CH(name, type, min, max, need_clipping) \
302 static int iir_ch_lattice_## name(AVFilterContext *ctx, void *arg, \
303  int ch, int nb_jobs) \
304 { \
305  AudioIIRContext *s = ctx->priv; \
306  const double ig = s->dry_gain; \
307  const double og = s->wet_gain; \
308  const double mix = s->mix; \
309  ThreadData *td = arg; \
310  AVFrame *in = td->in, *out = td->out; \
311  const type *src = (const type *)in->extended_data[ch]; \
312  double n0, n1, p0, *x = (double *)s->iir[ch].cache[0]; \
313  const int nb_stages = s->iir[ch].nb_ab[1]; \
314  const double *v = s->iir[ch].ab[0]; \
315  const double *k = s->iir[ch].ab[1]; \
316  const double g = s->iir[ch].g; \
317  int *clippings = &s->iir[ch].clippings; \
318  type *dst = (type *)out->extended_data[ch]; \
319  int n; \
320  \
321  for (n = 0; n < in->nb_samples; n++) { \
322  const double in = src[n] * ig; \
323  double out = 0.; \
324  \
325  n1 = in; \
326  for (int i = nb_stages - 1; i >= 0; i--) { \
327  n0 = n1 - k[i] * x[i]; \
328  p0 = n0 * k[i] + x[i]; \
329  out += p0 * v[i+1]; \
330  x[i] = p0; \
331  n1 = n0; \
332  } \
333  \
334  out += n1 * v[0]; \
335  memmove(&x[1], &x[0], nb_stages * sizeof(*x)); \
336  x[0] = n1; \
337  out *= og * g; \
338  out = out * mix + in * (1. - mix); \
339  if (need_clipping && out < min) { \
340  (*clippings)++; \
341  dst[n] = min; \
342  } else if (need_clipping && out > max) { \
343  (*clippings)++; \
344  dst[n] = max; \
345  } else { \
346  dst[n] = out; \
347  } \
348  } \
349  \
350  return 0; \
351 }
352 
353 LATTICE_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
354 LATTICE_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
355 LATTICE_IIR_CH(fltp, float, -1., 1., 0)
356 LATTICE_IIR_CH(dblp, double, -1., 1., 0)
357 
358 static void count_coefficients(char *item_str, int *nb_items)
359 {
360  char *p;
361 
362  if (!item_str)
363  return;
364 
365  *nb_items = 1;
366  for (p = item_str; *p && *p != '|'; p++) {
367  if (*p == ' ')
368  (*nb_items)++;
369  }
370 }
371 
372 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
373 {
374  AudioIIRContext *s = ctx->priv;
375  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
376  int i;
377 
378  p = old_str = av_strdup(item_str);
379  if (!p)
380  return AVERROR(ENOMEM);
381  for (i = 0; i < nb_items; i++) {
382  if (!(arg = av_strtok(p, "|", &saveptr)))
383  arg = prev_arg;
384 
385  if (!arg) {
386  av_freep(&old_str);
387  return AVERROR(EINVAL);
388  }
389 
390  p = NULL;
391  if (av_sscanf(arg, "%lf", &s->iir[i].g) != 1) {
392  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
393  av_freep(&old_str);
394  return AVERROR(EINVAL);
395  }
396 
397  prev_arg = arg;
398  }
399 
400  av_freep(&old_str);
401 
402  return 0;
403 }
404 
405 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
406 {
407  char *p, *arg, *old_str, *saveptr = NULL;
408  int i;
409 
410  p = old_str = av_strdup(item_str);
411  if (!p)
412  return AVERROR(ENOMEM);
413  for (i = 0; i < nb_items; i++) {
414  if (!(arg = av_strtok(p, " ", &saveptr)))
415  break;
416 
417  p = NULL;
418  if (av_sscanf(arg, "%lf", &dst[i]) != 1) {
419  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
420  av_freep(&old_str);
421  return AVERROR(EINVAL);
422  }
423  }
424 
425  av_freep(&old_str);
426 
427  return 0;
428 }
429 
430 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
431 {
432  char *p, *arg, *old_str, *saveptr = NULL;
433  int i;
434 
435  p = old_str = av_strdup(item_str);
436  if (!p)
437  return AVERROR(ENOMEM);
438  for (i = 0; i < nb_items; i++) {
439  if (!(arg = av_strtok(p, " ", &saveptr)))
440  break;
441 
442  p = NULL;
443  if (av_sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
444  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
445  av_freep(&old_str);
446  return AVERROR(EINVAL);
447  }
448  }
449 
450  av_freep(&old_str);
451 
452  return 0;
453 }
454 
455 static const char *const format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
456 
457 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
458 {
459  AudioIIRContext *s = ctx->priv;
460  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
461  int i, ret;
462 
463  p = old_str = av_strdup(item_str);
464  if (!p)
465  return AVERROR(ENOMEM);
466  for (i = 0; i < channels; i++) {
467  IIRChannel *iir = &s->iir[i];
468 
469  if (!(arg = av_strtok(p, "|", &saveptr)))
470  arg = prev_arg;
471 
472  if (!arg) {
473  av_freep(&old_str);
474  return AVERROR(EINVAL);
475  }
476 
477  count_coefficients(arg, &iir->nb_ab[ab]);
478 
479  p = NULL;
480  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
481  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
482  if (!iir->ab[ab] || !iir->cache[ab]) {
483  av_freep(&old_str);
484  return AVERROR(ENOMEM);
485  }
486 
487  if (s->format > 0) {
488  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
489  } else {
490  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
491  }
492  if (ret < 0) {
493  av_freep(&old_str);
494  return ret;
495  }
496  prev_arg = arg;
497  }
498 
499  av_freep(&old_str);
500 
501  return 0;
502 }
503 
504 static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
505 {
506  *RE = re * re2 - im * im2;
507  *IM = re * im2 + re2 * im;
508 }
509 
510 static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
511 {
512  coefs[2 * n] = 1.0;
513 
514  for (int i = 1; i <= n; i++) {
515  for (int j = n - i; j < n; j++) {
516  double re, im;
517 
518  cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
519  pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
520 
521  coefs[2 * j] -= re;
522  coefs[2 * j + 1] -= im;
523  }
524  }
525 
526  for (int i = 0; i < n + 1; i++) {
527  if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
528  av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
529  coefs[2 * i + 1], i);
530  return AVERROR(EINVAL);
531  }
532  }
533 
534  return 0;
535 }
536 
537 static void normalize_coeffs(AVFilterContext *ctx, int ch)
538 {
539  AudioIIRContext *s = ctx->priv;
540  IIRChannel *iir = &s->iir[ch];
541  double sum_den = 0.;
542 
543  if (!s->normalize)
544  return;
545 
546  for (int i = 0; i < iir->nb_ab[1]; i++) {
547  sum_den += iir->ab[1][i];
548  }
549 
550  if (sum_den > 1e-6) {
551  double factor, sum_num = 0.;
552 
553  for (int i = 0; i < iir->nb_ab[0]; i++) {
554  sum_num += iir->ab[0][i];
555  }
556 
557  factor = sum_num / sum_den;
558 
559  for (int i = 0; i < iir->nb_ab[1]; i++) {
560  iir->ab[1][i] *= factor;
561  }
562  }
563 }
564 
566 {
567  AudioIIRContext *s = ctx->priv;
568  int ch, i, j, ret = 0;
569 
570  for (ch = 0; ch < channels; ch++) {
571  IIRChannel *iir = &s->iir[ch];
572  double *topc, *botc;
573 
574  topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
575  botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
576  if (!topc || !botc) {
577  ret = AVERROR(ENOMEM);
578  goto fail;
579  }
580 
581  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
582  if (ret < 0) {
583  goto fail;
584  }
585 
586  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
587  if (ret < 0) {
588  goto fail;
589  }
590 
591  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
592  iir->ab[1][j] = topc[2 * i];
593  }
594  iir->nb_ab[1]++;
595 
596  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
597  iir->ab[0][j] = botc[2 * i];
598  }
599  iir->nb_ab[0]++;
600 
601  normalize_coeffs(ctx, ch);
602 
603 fail:
604  av_free(topc);
605  av_free(botc);
606  if (ret < 0)
607  break;
608  }
609 
610  return ret;
611 }
612 
614 {
615  AudioIIRContext *s = ctx->priv;
616  int ch, ret;
617 
618  for (ch = 0; ch < channels; ch++) {
619  IIRChannel *iir = &s->iir[ch];
620  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
621  int current_biquad = 0;
622 
623  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
624  if (!iir->biquads)
625  return AVERROR(ENOMEM);
626 
627  while (nb_biquads--) {
628  Pair outmost_pole = { -1, -1 };
629  Pair nearest_zero = { -1, -1 };
630  double zeros[4] = { 0 };
631  double poles[4] = { 0 };
632  double b[6] = { 0 };
633  double a[6] = { 0 };
634  double min_distance = DBL_MAX;
635  double max_mag = 0;
636  double factor;
637  int i;
638 
639  for (i = 0; i < iir->nb_ab[0]; i++) {
640  double mag;
641 
642  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
643  continue;
644  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
645 
646  if (mag > max_mag) {
647  max_mag = mag;
648  outmost_pole.a = i;
649  }
650  }
651 
652  for (i = 0; i < iir->nb_ab[0]; i++) {
653  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
654  continue;
655 
656  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
657  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
658  outmost_pole.b = i;
659  break;
660  }
661  }
662 
663  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
664 
665  if (outmost_pole.a < 0 || outmost_pole.b < 0)
666  return AVERROR(EINVAL);
667 
668  for (i = 0; i < iir->nb_ab[1]; i++) {
669  double distance;
670 
671  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
672  continue;
673  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
674  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
675 
676  if (distance < min_distance) {
677  min_distance = distance;
678  nearest_zero.a = i;
679  }
680  }
681 
682  for (i = 0; i < iir->nb_ab[1]; i++) {
683  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
684  continue;
685 
686  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
687  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
688  nearest_zero.b = i;
689  break;
690  }
691  }
692 
693  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
694 
695  if (nearest_zero.a < 0 || nearest_zero.b < 0)
696  return AVERROR(EINVAL);
697 
698  poles[0] = iir->ab[0][2 * outmost_pole.a ];
699  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
700 
701  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
702  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
703 
704  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
705  zeros[2] = 0;
706  zeros[3] = 0;
707 
708  poles[2] = 0;
709  poles[3] = 0;
710  } else {
711  poles[2] = iir->ab[0][2 * outmost_pole.b ];
712  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
713 
714  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
715  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
716  }
717 
718  ret = expand(ctx, zeros, 2, b);
719  if (ret < 0)
720  return ret;
721 
722  ret = expand(ctx, poles, 2, a);
723  if (ret < 0)
724  return ret;
725 
726  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
727  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
728  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
729  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
730 
731  iir->biquads[current_biquad].a[0] = 1.;
732  iir->biquads[current_biquad].a[1] = a[2] / a[4];
733  iir->biquads[current_biquad].a[2] = a[0] / a[4];
734  iir->biquads[current_biquad].b[0] = b[4] / a[4];
735  iir->biquads[current_biquad].b[1] = b[2] / a[4];
736  iir->biquads[current_biquad].b[2] = b[0] / a[4];
737 
738  if (s->normalize &&
739  fabs(iir->biquads[current_biquad].b[0] +
740  iir->biquads[current_biquad].b[1] +
741  iir->biquads[current_biquad].b[2]) > 1e-6) {
742  factor = (iir->biquads[current_biquad].a[0] +
743  iir->biquads[current_biquad].a[1] +
744  iir->biquads[current_biquad].a[2]) /
745  (iir->biquads[current_biquad].b[0] +
746  iir->biquads[current_biquad].b[1] +
747  iir->biquads[current_biquad].b[2]);
748 
749  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
750 
751  iir->biquads[current_biquad].b[0] *= factor;
752  iir->biquads[current_biquad].b[1] *= factor;
753  iir->biquads[current_biquad].b[2] *= factor;
754  }
755 
756  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
757  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
758  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
759 
760  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
761  iir->biquads[current_biquad].a[0],
762  iir->biquads[current_biquad].a[1],
763  iir->biquads[current_biquad].a[2],
764  iir->biquads[current_biquad].b[0],
765  iir->biquads[current_biquad].b[1],
766  iir->biquads[current_biquad].b[2]);
767 
768  current_biquad++;
769  }
770  }
771 
772  return 0;
773 }
774 
775 static void biquad_process(double *x, double *y, int length,
776  double b0, double b1, double b2,
777  double a1, double a2)
778 {
779  double w1 = 0., w2 = 0.;
780 
781  a1 = -a1;
782  a2 = -a2;
783 
784  for (int n = 0; n < length; n++) {
785  double out, in = x[n];
786 
787  y[n] = out = in * b0 + w1;
788  w1 = b1 * in + w2 + a1 * out;
789  w2 = b2 * in + a2 * out;
790  }
791 }
792 
793 static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
794 {
795  double sum = 0.;
796 
797  for (int i = 0; i < n; i++) {
798  for (int j = i; j < n; j++) {
799  sum = 0.;
800  for (int k = 0; k < i; k++)
801  sum += lu[i * n + k] * lu[k * n + j];
802  lu[i * n + j] = matrix[j * n + i] - sum;
803  }
804  for (int j = i + 1; j < n; j++) {
805  sum = 0.;
806  for (int k = 0; k < i; k++)
807  sum += lu[j * n + k] * lu[k * n + i];
808  lu[j * n + i] = (1. / lu[i * n + i]) * (matrix[i * n + j] - sum);
809  }
810  }
811 
812  for (int i = 0; i < n; i++) {
813  sum = 0.;
814  for (int k = 0; k < i; k++)
815  sum += lu[i * n + k] * y[k];
816  y[i] = vector[i] - sum;
817  }
818 
819  for (int i = n - 1; i >= 0; i--) {
820  sum = 0.;
821  for (int k = i + 1; k < n; k++)
822  sum += lu[i * n + k] * x[k];
823  x[i] = (1 / lu[i * n + i]) * (y[i] - sum);
824  }
825 }
826 
828 {
829  AudioIIRContext *s = ctx->priv;
830  int ret = 0;
831 
832  for (int ch = 0; ch < channels; ch++) {
833  IIRChannel *iir = &s->iir[ch];
834  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
835  int length = nb_biquads * 2 + 1;
836  double *impulse = av_calloc(length, sizeof(*impulse));
837  double *y = av_calloc(length, sizeof(*y));
838  double *resp = av_calloc(length, sizeof(*resp));
839  double *M = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*M));
840  double *W = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*W));
841 
842  if (!impulse || !y || !resp || !M) {
843  av_free(impulse);
844  av_free(y);
845  av_free(resp);
846  av_free(M);
847  av_free(W);
848  return AVERROR(ENOMEM);
849  }
850 
851  impulse[0] = 1.;
852 
853  for (int n = 0; n < nb_biquads; n++) {
854  BiquadContext *biquad = &iir->biquads[n];
855 
856  biquad_process(n ? y : impulse, y, length,
857  biquad->b[0], biquad->b[1], biquad->b[2],
858  biquad->a[1], biquad->a[2]);
859  }
860 
861  for (int n = 0; n < nb_biquads; n++) {
862  BiquadContext *biquad = &iir->biquads[n];
863 
864  biquad_process(impulse, resp, length - 1,
865  1., 0., 0., biquad->a[1], biquad->a[2]);
866 
867  memcpy(M + n * 2 * (length - 1), resp, sizeof(*resp) * (length - 1));
868  memcpy(M + n * 2 * (length - 1) + length, resp, sizeof(*resp) * (length - 2));
869  memset(resp, 0, length * sizeof(*resp));
870  }
871 
872  solve(M, &y[1], length - 1, &impulse[1], resp, W);
873 
874  iir->fir = y[0];
875 
876  for (int n = 0; n < nb_biquads; n++) {
877  BiquadContext *biquad = &iir->biquads[n];
878 
879  biquad->b[0] = 0.;
880  biquad->b[1] = resp[n * 2 + 0];
881  biquad->b[2] = resp[n * 2 + 1];
882  }
883 
884  av_free(impulse);
885  av_free(y);
886  av_free(resp);
887  av_free(M);
888  av_free(W);
889 
890  if (ret < 0)
891  return ret;
892  }
893 
894  return 0;
895 }
896 
898 {
899  AudioIIRContext *s = ctx->priv;
900  int ch;
901 
902  for (ch = 0; ch < channels; ch++) {
903  IIRChannel *iir = &s->iir[ch];
904  int n;
905 
906  for (n = 0; n < iir->nb_ab[0]; n++) {
907  double r = iir->ab[0][2*n];
908  double angle = iir->ab[0][2*n+1];
909 
910  iir->ab[0][2*n] = r * cos(angle);
911  iir->ab[0][2*n+1] = r * sin(angle);
912  }
913 
914  for (n = 0; n < iir->nb_ab[1]; n++) {
915  double r = iir->ab[1][2*n];
916  double angle = iir->ab[1][2*n+1];
917 
918  iir->ab[1][2*n] = r * cos(angle);
919  iir->ab[1][2*n+1] = r * sin(angle);
920  }
921  }
922 }
923 
925 {
926  AudioIIRContext *s = ctx->priv;
927  int ch;
928 
929  for (ch = 0; ch < channels; ch++) {
930  IIRChannel *iir = &s->iir[ch];
931  int n;
932 
933  for (n = 0; n < iir->nb_ab[0]; n++) {
934  double sr = iir->ab[0][2*n];
935  double si = iir->ab[0][2*n+1];
936 
937  iir->ab[0][2*n] = exp(sr) * cos(si);
938  iir->ab[0][2*n+1] = exp(sr) * sin(si);
939  }
940 
941  for (n = 0; n < iir->nb_ab[1]; n++) {
942  double sr = iir->ab[1][2*n];
943  double si = iir->ab[1][2*n+1];
944 
945  iir->ab[1][2*n] = exp(sr) * cos(si);
946  iir->ab[1][2*n+1] = exp(sr) * sin(si);
947  }
948  }
949 }
950 
951 static double fact(double i)
952 {
953  if (i <= 0.)
954  return 1.;
955  return i * fact(i - 1.);
956 }
957 
958 static double coef_sf2zf(double *a, int N, int n)
959 {
960  double z = 0.;
961 
962  for (int i = 0; i <= N; i++) {
963  double acc = 0.;
964 
965  for (int k = FFMAX(n - N + i, 0); k <= FFMIN(i, n); k++) {
966  acc += ((fact(i) * fact(N - i)) /
967  (fact(k) * fact(i - k) * fact(n - k) * fact(N - i - n + k))) *
968  ((k & 1) ? -1. : 1.);
969  }
970 
971  z += a[i] * pow(2., i) * acc;
972  }
973 
974  return z;
975 }
976 
978 {
979  AudioIIRContext *s = ctx->priv;
980  int ch;
981 
982  for (ch = 0; ch < channels; ch++) {
983  IIRChannel *iir = &s->iir[ch];
984  double *temp0 = av_calloc(iir->nb_ab[0], sizeof(*temp0));
985  double *temp1 = av_calloc(iir->nb_ab[1], sizeof(*temp1));
986 
987  if (!temp0 || !temp1)
988  goto next;
989 
990  memcpy(temp0, iir->ab[0], iir->nb_ab[0] * sizeof(*temp0));
991  memcpy(temp1, iir->ab[1], iir->nb_ab[1] * sizeof(*temp1));
992 
993  for (int n = 0; n < iir->nb_ab[0]; n++)
994  iir->ab[0][n] = coef_sf2zf(temp0, iir->nb_ab[0] - 1, n);
995 
996  for (int n = 0; n < iir->nb_ab[1]; n++)
997  iir->ab[1][n] = coef_sf2zf(temp1, iir->nb_ab[1] - 1, n);
998 
999 next:
1000  av_free(temp0);
1001  av_free(temp1);
1002  }
1003 }
1004 
1006 {
1007  AudioIIRContext *s = ctx->priv;
1008  int ch;
1009 
1010  for (ch = 0; ch < channels; ch++) {
1011  IIRChannel *iir = &s->iir[ch];
1012  int n;
1013 
1014  for (n = 0; n < iir->nb_ab[0]; n++) {
1015  double r = iir->ab[0][2*n];
1016  double angle = M_PI*iir->ab[0][2*n+1]/180.;
1017 
1018  iir->ab[0][2*n] = r * cos(angle);
1019  iir->ab[0][2*n+1] = r * sin(angle);
1020  }
1021 
1022  for (n = 0; n < iir->nb_ab[1]; n++) {
1023  double r = iir->ab[1][2*n];
1024  double angle = M_PI*iir->ab[1][2*n+1]/180.;
1025 
1026  iir->ab[1][2*n] = r * cos(angle);
1027  iir->ab[1][2*n+1] = r * sin(angle);
1028  }
1029  }
1030 }
1031 
1033 {
1034  AudioIIRContext *s = ctx->priv;
1035  int ch;
1036 
1037  for (ch = 0; ch < channels; ch++) {
1038  IIRChannel *iir = &s->iir[ch];
1039 
1040  for (int n = 0; n < iir->nb_ab[0]; n++) {
1041  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
1042 
1043  if (pr >= 1.) {
1044  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
1045  break;
1046  }
1047  }
1048  }
1049 }
1050 
1051 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
1052 {
1053  const uint8_t *font;
1054  int font_height;
1055  int i;
1056 
1057  font = avpriv_cga_font, font_height = 8;
1058 
1059  for (i = 0; txt[i]; i++) {
1060  int char_y, mask;
1061 
1062  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
1063  for (char_y = 0; char_y < font_height; char_y++) {
1064  for (mask = 0x80; mask; mask >>= 1) {
1065  if (font[txt[i] * font_height + char_y] & mask)
1066  AV_WL32(p, color);
1067  p += 4;
1068  }
1069  p += pic->linesize[0] - 8 * 4;
1070  }
1071  }
1072 }
1073 
1074 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
1075 {
1076  int dx = FFABS(x1-x0);
1077  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
1078  int err = (dx>dy ? dx : -dy) / 2, e2;
1079 
1080  for (;;) {
1081  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
1082 
1083  if (x0 == x1 && y0 == y1)
1084  break;
1085 
1086  e2 = err;
1087 
1088  if (e2 >-dx) {
1089  err -= dy;
1090  x0--;
1091  }
1092 
1093  if (e2 < dy) {
1094  err += dx;
1095  y0 += sy;
1096  }
1097  }
1098 }
1099 
1100 static double distance(double x0, double x1, double y0, double y1)
1101 {
1102  return hypot(x0 - x1, y0 - y1);
1103 }
1104 
1105 static void get_response(int channel, int format, double w,
1106  const double *b, const double *a,
1107  int nb_b, int nb_a, double *magnitude, double *phase)
1108 {
1109  double realz, realp;
1110  double imagz, imagp;
1111  double real, imag;
1112  double div;
1113 
1114  if (format == 0) {
1115  realz = 0., realp = 0.;
1116  imagz = 0., imagp = 0.;
1117  for (int x = 0; x < nb_a; x++) {
1118  realz += cos(-x * w) * a[x];
1119  imagz += sin(-x * w) * a[x];
1120  }
1121 
1122  for (int x = 0; x < nb_b; x++) {
1123  realp += cos(-x * w) * b[x];
1124  imagp += sin(-x * w) * b[x];
1125  }
1126 
1127  div = realp * realp + imagp * imagp;
1128  real = (realz * realp + imagz * imagp) / div;
1129  imag = (imagz * realp - imagp * realz) / div;
1130 
1131  *magnitude = hypot(real, imag);
1132  *phase = atan2(imag, real);
1133  } else {
1134  double p = 1., z = 1.;
1135  double acc = 0.;
1136 
1137  for (int x = 0; x < nb_a; x++) {
1138  z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
1139  acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
1140  }
1141 
1142  for (int x = 0; x < nb_b; x++) {
1143  p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
1144  acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
1145  }
1146 
1147  *magnitude = z / p;
1148  *phase = acc;
1149  }
1150 }
1151 
1153 {
1154  AudioIIRContext *s = ctx->priv;
1155  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
1156  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
1157  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
1158  char text[32];
1159  int ch, i;
1160 
1161  memset(out->data[0], 0, s->h * out->linesize[0]);
1162 
1163  phase = av_malloc_array(s->w, sizeof(*phase));
1164  temp = av_malloc_array(s->w, sizeof(*temp));
1165  mag = av_malloc_array(s->w, sizeof(*mag));
1166  delay = av_malloc_array(s->w, sizeof(*delay));
1167  if (!mag || !phase || !delay || !temp)
1168  goto end;
1169 
1170  ch = av_clip(s->ir_channel, 0, s->channels - 1);
1171  for (i = 0; i < s->w; i++) {
1172  const double *b = s->iir[ch].ab[0];
1173  const double *a = s->iir[ch].ab[1];
1174  const int nb_b = s->iir[ch].nb_ab[0];
1175  const int nb_a = s->iir[ch].nb_ab[1];
1176  double w = i * M_PI / (s->w - 1);
1177  double m, p;
1178 
1179  get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
1180 
1181  mag[i] = s->iir[ch].g * m;
1182  phase[i] = p;
1183  min = fmin(min, mag[i]);
1184  max = fmax(max, mag[i]);
1185  }
1186 
1187  temp[0] = 0.;
1188  for (i = 0; i < s->w - 1; i++) {
1189  double d = phase[i] - phase[i + 1];
1190  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
1191  }
1192 
1193  min_phase = phase[0];
1194  max_phase = phase[0];
1195  for (i = 1; i < s->w; i++) {
1196  temp[i] += temp[i - 1];
1197  phase[i] += temp[i];
1198  min_phase = fmin(min_phase, phase[i]);
1199  max_phase = fmax(max_phase, phase[i]);
1200  }
1201 
1202  for (i = 0; i < s->w - 1; i++) {
1203  double div = s->w / (double)sample_rate;
1204 
1205  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
1206  min_delay = fmin(min_delay, delay[i + 1]);
1207  max_delay = fmax(max_delay, delay[i + 1]);
1208  }
1209  delay[0] = delay[1];
1210 
1211  for (i = 0; i < s->w; i++) {
1212  int ymag = mag[i] / max * (s->h - 1);
1213  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
1214  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
1215 
1216  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
1217  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
1218  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
1219 
1220  if (prev_ymag < 0)
1221  prev_ymag = ymag;
1222  if (prev_yphase < 0)
1223  prev_yphase = yphase;
1224  if (prev_ydelay < 0)
1225  prev_ydelay = ydelay;
1226 
1227  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
1228  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
1229  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
1230 
1231  prev_ymag = ymag;
1232  prev_yphase = yphase;
1233  prev_ydelay = ydelay;
1234  }
1235 
1236  if (s->w > 400 && s->h > 100) {
1237  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
1238  snprintf(text, sizeof(text), "%.2f", max);
1239  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
1240 
1241  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
1242  snprintf(text, sizeof(text), "%.2f", min);
1243  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
1244 
1245  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
1246  snprintf(text, sizeof(text), "%.2f", max_phase);
1247  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
1248 
1249  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
1250  snprintf(text, sizeof(text), "%.2f", min_phase);
1251  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
1252 
1253  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
1254  snprintf(text, sizeof(text), "%.2f", max_delay);
1255  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
1256 
1257  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
1258  snprintf(text, sizeof(text), "%.2f", min_delay);
1259  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
1260  }
1261 
1262 end:
1263  av_free(delay);
1264  av_free(temp);
1265  av_free(phase);
1266  av_free(mag);
1267 }
1268 
1269 static int config_output(AVFilterLink *outlink)
1270 {
1271  AVFilterContext *ctx = outlink->src;
1272  AudioIIRContext *s = ctx->priv;
1273  AVFilterLink *inlink = ctx->inputs[0];
1274  int ch, ret, i;
1275 
1276  s->channels = inlink->channels;
1277  s->iir = av_calloc(s->channels, sizeof(*s->iir));
1278  if (!s->iir)
1279  return AVERROR(ENOMEM);
1280 
1281  ret = read_gains(ctx, s->g_str, inlink->channels);
1282  if (ret < 0)
1283  return ret;
1284 
1285  ret = read_channels(ctx, inlink->channels, s->a_str, 0);
1286  if (ret < 0)
1287  return ret;
1288 
1289  ret = read_channels(ctx, inlink->channels, s->b_str, 1);
1290  if (ret < 0)
1291  return ret;
1292 
1293  if (s->format == -1) {
1294  convert_sf2tf(ctx, inlink->channels);
1295  s->format = 0;
1296  } else if (s->format == 2) {
1297  convert_pr2zp(ctx, inlink->channels);
1298  } else if (s->format == 3) {
1299  convert_pd2zp(ctx, inlink->channels);
1300  } else if (s->format == 4) {
1301  convert_sp2zp(ctx, inlink->channels);
1302  }
1303  if (s->format > 0) {
1304  check_stability(ctx, inlink->channels);
1305  }
1306 
1307  av_frame_free(&s->video);
1308  if (s->response) {
1309  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1310  if (!s->video)
1311  return AVERROR(ENOMEM);
1312 
1313  draw_response(ctx, s->video, inlink->sample_rate);
1314  }
1315 
1316  if (s->format == 0)
1317  av_log(ctx, AV_LOG_WARNING, "transfer function coefficients format is not recommended for too high number of zeros/poles.\n");
1318 
1319  if (s->format > 0 && s->process == 0) {
1320  av_log(ctx, AV_LOG_WARNING, "Direct processsing is not recommended for zp coefficients format.\n");
1321 
1322  ret = convert_zp2tf(ctx, inlink->channels);
1323  if (ret < 0)
1324  return ret;
1325  } else if (s->format == -2 && s->process > 0) {
1326  av_log(ctx, AV_LOG_ERROR, "Only direct processing is implemented for lattice-ladder function.\n");
1327  return AVERROR_PATCHWELCOME;
1328  } else if (s->format <= 0 && s->process == 1) {
1329  av_log(ctx, AV_LOG_ERROR, "Serial processing is not implemented for transfer function.\n");
1330  return AVERROR_PATCHWELCOME;
1331  } else if (s->format <= 0 && s->process == 2) {
1332  av_log(ctx, AV_LOG_ERROR, "Parallel processing is not implemented for transfer function.\n");
1333  return AVERROR_PATCHWELCOME;
1334  } else if (s->format > 0 && s->process == 1) {
1335  ret = decompose_zp2biquads(ctx, inlink->channels);
1336  if (ret < 0)
1337  return ret;
1338  } else if (s->format > 0 && s->process == 2) {
1339  if (s->precision > 1)
1340  av_log(ctx, AV_LOG_WARNING, "Parallel processing is not recommended for fixed-point precisions.\n");
1341  ret = decompose_zp2biquads(ctx, inlink->channels);
1342  if (ret < 0)
1343  return ret;
1344  ret = convert_serial2parallel(ctx, inlink->channels);
1345  if (ret < 0)
1346  return ret;
1347  }
1348 
1349  for (ch = 0; s->format == -2 && ch < inlink->channels; ch++) {
1350  IIRChannel *iir = &s->iir[ch];
1351 
1352  if (iir->nb_ab[0] != iir->nb_ab[1] + 1) {
1353  av_log(ctx, AV_LOG_ERROR, "Number of ladder coefficients must be one more than number of reflection coefficients.\n");
1354  return AVERROR(EINVAL);
1355  }
1356  }
1357 
1358  for (ch = 0; s->format == 0 && ch < inlink->channels; ch++) {
1359  IIRChannel *iir = &s->iir[ch];
1360 
1361  for (i = 1; i < iir->nb_ab[0]; i++) {
1362  iir->ab[0][i] /= iir->ab[0][0];
1363  }
1364 
1365  iir->ab[0][0] = 1.0;
1366  for (i = 0; i < iir->nb_ab[1]; i++) {
1367  iir->ab[1][i] *= iir->g;
1368  }
1369 
1370  normalize_coeffs(ctx, ch);
1371  }
1372 
1373  switch (inlink->format) {
1374  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 2 ? iir_ch_parallel_dblp : s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1375  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 2 ? iir_ch_parallel_fltp : s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1376  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s32p : s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1377  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s16p : s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1378  }
1379 
1380  if (s->format == -2) {
1381  switch (inlink->format) {
1382  case AV_SAMPLE_FMT_DBLP: s->iir_channel = iir_ch_lattice_dblp; break;
1383  case AV_SAMPLE_FMT_FLTP: s->iir_channel = iir_ch_lattice_fltp; break;
1384  case AV_SAMPLE_FMT_S32P: s->iir_channel = iir_ch_lattice_s32p; break;
1385  case AV_SAMPLE_FMT_S16P: s->iir_channel = iir_ch_lattice_s16p; break;
1386  }
1387  }
1388 
1389  return 0;
1390 }
1391 
1393 {
1394  AVFilterContext *ctx = inlink->dst;
1395  AudioIIRContext *s = ctx->priv;
1396  AVFilterLink *outlink = ctx->outputs[0];
1397  ThreadData td;
1398  AVFrame *out;
1399  int ch, ret;
1400 
1401  if (av_frame_is_writable(in) && s->process != 2) {
1402  out = in;
1403  } else {
1404  out = ff_get_audio_buffer(outlink, in->nb_samples);
1405  if (!out) {
1406  av_frame_free(&in);
1407  return AVERROR(ENOMEM);
1408  }
1409  av_frame_copy_props(out, in);
1410  }
1411 
1412  td.in = in;
1413  td.out = out;
1414  ctx->internal->execute(ctx, s->iir_channel, &td, NULL, outlink->channels);
1415 
1416  for (ch = 0; ch < outlink->channels; ch++) {
1417  if (s->iir[ch].clippings > 0)
1418  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1419  ch, s->iir[ch].clippings);
1420  s->iir[ch].clippings = 0;
1421  }
1422 
1423  if (in != out)
1424  av_frame_free(&in);
1425 
1426  if (s->response) {
1427  AVFilterLink *outlink = ctx->outputs[1];
1428  int64_t old_pts = s->video->pts;
1429  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1430 
1431  if (new_pts > old_pts) {
1432  AVFrame *clone;
1433 
1434  s->video->pts = new_pts;
1435  clone = av_frame_clone(s->video);
1436  if (!clone)
1437  return AVERROR(ENOMEM);
1438  ret = ff_filter_frame(outlink, clone);
1439  if (ret < 0)
1440  return ret;
1441  }
1442  }
1443 
1444  return ff_filter_frame(outlink, out);
1445 }
1446 
1447 static int config_video(AVFilterLink *outlink)
1448 {
1449  AVFilterContext *ctx = outlink->src;
1450  AudioIIRContext *s = ctx->priv;
1451 
1452  outlink->sample_aspect_ratio = (AVRational){1,1};
1453  outlink->w = s->w;
1454  outlink->h = s->h;
1455  outlink->frame_rate = s->rate;
1456  outlink->time_base = av_inv_q(outlink->frame_rate);
1457 
1458  return 0;
1459 }
1460 
1462 {
1463  AudioIIRContext *s = ctx->priv;
1464  AVFilterPad pad, vpad;
1465  int ret;
1466 
1467  if (!s->a_str || !s->b_str || !s->g_str) {
1468  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1469  return AVERROR(EINVAL);
1470  }
1471 
1472  switch (s->precision) {
1473  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1474  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1475  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1476  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1477  default: return AVERROR_BUG;
1478  }
1479 
1480  pad = (AVFilterPad){
1481  .name = "default",
1482  .type = AVMEDIA_TYPE_AUDIO,
1483  .config_props = config_output,
1484  };
1485 
1486  ret = ff_insert_outpad(ctx, 0, &pad);
1487  if (ret < 0)
1488  return ret;
1489 
1490  if (s->response) {
1491  vpad = (AVFilterPad){
1492  .name = "filter_response",
1493  .type = AVMEDIA_TYPE_VIDEO,
1494  .config_props = config_video,
1495  };
1496 
1497  ret = ff_insert_outpad(ctx, 1, &vpad);
1498  if (ret < 0)
1499  return ret;
1500  }
1501 
1502  return 0;
1503 }
1504 
1506 {
1507  AudioIIRContext *s = ctx->priv;
1508  int ch;
1509 
1510  if (s->iir) {
1511  for (ch = 0; ch < s->channels; ch++) {
1512  IIRChannel *iir = &s->iir[ch];
1513  av_freep(&iir->ab[0]);
1514  av_freep(&iir->ab[1]);
1515  av_freep(&iir->cache[0]);
1516  av_freep(&iir->cache[1]);
1517  av_freep(&iir->biquads);
1518  }
1519  }
1520  av_freep(&s->iir);
1521 
1522  av_frame_free(&s->video);
1523 }
1524 
1525 static const AVFilterPad inputs[] = {
1526  {
1527  .name = "default",
1528  .type = AVMEDIA_TYPE_AUDIO,
1529  .filter_frame = filter_frame,
1530  },
1531  { NULL }
1532 };
1533 
1534 #define OFFSET(x) offsetof(AudioIIRContext, x)
1535 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1536 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1537 
1538 static const AVOption aiir_options[] = {
1539  { "zeros", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1540  { "z", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1541  { "poles", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1542  { "p", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1543  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1544  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1545  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1546  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1547  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, "format" },
1548  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, "format" },
1549  { "ll", "lattice-ladder function", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, AF, "format" },
1550  { "sf", "analog transfer function", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "format" },
1551  { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "format" },
1552  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "format" },
1553  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "format" },
1554  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "format" },
1555  { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, "format" },
1556  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
1557  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
1558  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "process" },
1559  { "s", "serial", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "process" },
1560  { "p", "parallel", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "process" },
1561  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1562  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1563  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "precision" },
1564  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "precision" },
1565  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "precision" },
1566  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "precision" },
1567  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1568  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1569  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1570  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1571  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1572  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1573  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1574  { NULL },
1575 };
1576 
1577 AVFILTER_DEFINE_CLASS(aiir);
1578 
1580  .name = "aiir",
1581  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1582  .priv_size = sizeof(AudioIIRContext),
1583  .priv_class = &aiir_class,
1584  .init = init,
1585  .uninit = uninit,
1587  .inputs = inputs,
1590 };
coef_sf2zf
static double coef_sf2zf(double *a, int N, int n)
Definition: af_aiir.c:958
Pair
Definition: af_aiir.c:35
M
#define M(a, b)
Definition: vp3dsp.c:48
AudioIIRContext::format
int format
Definition: af_aiir.c:61
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:97
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:156
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_clip
#define av_clip
Definition: common.h:96
mix
static int mix(int c0, int c1)
Definition: 4xm.c:716
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:554
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aiir)
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:583
IIRChannel::clippings
int clippings
Definition: af_aiir.c:52
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:237
AF
#define AF
Definition: af_aiir.c:1535
inputs
static const AVFilterPad inputs[]
Definition: af_aiir.c:1525
IIRChannel::nb_ab
int nb_ab[2]
Definition: af_aiir.c:46
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
BiquadContext::a
double a[3]
Definition: af_aiir.c:40
aiir_options
static const AVOption aiir_options[]
Definition: af_aiir.c:1538
convert_serial2parallel
static int convert_serial2parallel(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:827
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:429
RE
#define RE(x, ch)
read_channels
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:457
im
float im
Definition: fft.c:82
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AudioIIRContext::ir_channel
int ir_channel
Definition: af_aiir.c:66
IIRChannel::biquads
BiquadContext * biquads
Definition: af_aiir.c:51
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:247
b
#define b
Definition: input.c:40
IIR_CH
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:122
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
read_tf_coefficients
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:405
AudioIIRContext::iir_channel
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:75
check_stability
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:1032
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:1269
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AudioIIRContext::b_str
char * b_str
Definition: af_aiir.c:57
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
solve
static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
Definition: af_aiir.c:793
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:502
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1447
AudioIIRContext::video
AVFrame * video
Definition: af_aiir.c:69
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1565
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
AudioIIRContext::g_str
char * g_str
Definition: af_aiir.c:57
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
BiquadContext
Definition: af_aiir.c:39
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:1665
fail
#define fail()
Definition: checkasm.h:136
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_aiir.c:78
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AudioIIRContext::h
int h
Definition: af_aiir.c:65
a1
#define a1
Definition: regdef.h:47
AudioIIRContext::process
int process
Definition: af_aiir.c:62
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:580
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1392
convert_zp2tf
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:565
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:459
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:424
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
IIRChannel::cache
double * cache[2]
Definition: af_aiir.c:49
NAN
#define NAN
Definition: mathematics.h:64
SERIAL_IIR_CH
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:178
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:960
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
biquad
@ biquad
Definition: af_biquads.c:74
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:234
AudioIIRContext::rate
AVRational rate
Definition: af_aiir.c:67
normalize_coeffs
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:537
exp
int8_t exp
Definition: eval.c:72
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1461
convert_sp2zp
static void convert_sp2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:924
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
BiquadContext::w1
double w1
Definition: af_aiir.c:42
BiquadContext::w2
double w2
Definition: af_aiir.c:42
format
static const char *const format[]
Definition: af_aiir.c:455
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:116
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:1074
fmin
double fmin(double, double)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
W
@ W
Definition: vf_addroi.c:26
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
read_zp_coefficients
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:430
IM
#define IM(x, ch)
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1666
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:228
xga_font_data.h
N
#define N
Definition: af_mcompand.c:53
fact
static double fact(double i)
Definition: af_aiir.c:951
IIRChannel::g
double g
Definition: af_aiir.c:48
Pair::b
int b
Definition: af_aiir.c:36
M_PI
#define M_PI
Definition: mathematics.h:52
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
internal.h
AudioIIRContext::a_str
char * a_str
Definition: af_aiir.c:57
normalize
Definition: normalize.py:1
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:369
i
int i
Definition: input.c:406
VF
#define VF
Definition: af_aiir.c:1536
AudioIIRContext::sample_format
enum AVSampleFormat sample_format
Definition: af_aiir.c:73
OFFSET
#define OFFSET(x)
Definition: af_aiir.c:1534
IIRChannel::ab
double * ab[2]
Definition: af_aiir.c:47
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
a2
#define a2
Definition: regdef.h:48
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AudioIIRContext::precision
int precision
Definition: af_aiir.c:63
convert_sf2tf
static void convert_sf2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:977
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AudioIIRContext::wet_gain
double wet_gain
Definition: af_aiir.c:58
get_response
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *magnitude, double *phase)
Definition: af_aiir.c:1105
AVFilter
Filter definition.
Definition: avfilter.h:145
cmul
static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
Definition: af_aiir.c:504
PARALLEL_IIR_CH
#define PARALLEL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:237
ret
ret
Definition: filter_design.txt:187
AudioIIRContext::dry_gain
double dry_gain
Definition: af_aiir.c:58
Pair::a
int a
Definition: af_aiir.c:36
fmax
double fmax(double, double)
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:414
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:251
convert_pr2zp
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:897
BiquadContext::b
double b[3]
Definition: af_aiir.c:41
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:1152
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_insert_outpad
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:248
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
AudioIIRContext::response
int response
Definition: af_aiir.c:64
distance
static double distance(double x0, double x1, double y0, double y1)
Definition: af_aiir.c:1100
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
temp
else temp
Definition: vf_mcdeint.c:259
AudioIIRContext::channels
int channels
Definition: af_aiir.c:72
decompose_zp2biquads
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:613
AVFilterContext
An instance of a filter.
Definition: avfilter.h:333
factor
static const int factor[16]
Definition: vf_pp7.c:76
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:259
LATTICE_IIR_CH
#define LATTICE_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:301
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
count_coefficients
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:358
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:437
ThreadData::in
AVFrame * in
Definition: af_adenorm.c:223
biquad_process
static void biquad_process(double *x, double *y, int length, double b0, double b1, double b2, double a1, double a2)
Definition: af_aiir.c:775
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:510
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AudioIIRContext
Definition: af_aiir.c:55
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AudioIIRContext::iir
IIRChannel * iir
Definition: af_aiir.c:71
d
d
Definition: ffmpeg_filter.c:156
int32_t
int32_t
Definition: audioconvert.c:56
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:568
b0
static double b0(void *priv, double x, double y)
Definition: vf_xfade.c:1664
IIRChannel::fir
double fir
Definition: af_aiir.c:50
ff_af_aiir
const AVFilter ff_af_aiir
Definition: af_aiir.c:1579
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
AudioIIRContext::normalize
int normalize
Definition: af_aiir.c:60
int
int
Definition: ffmpeg_filter.c:156
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1505
snprintf
#define snprintf
Definition: snprintf.h:34
read_gains
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:372
IIRChannel
Definition: af_aiir.c:45
AudioIIRContext::mix
double mix
Definition: af_aiir.c:59
convert_pd2zp
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:1005
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:1051
channel
channel
Definition: ebur128.h:39
re
float re
Definition: fft.c:82
ff_set_common_channel_layouts
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *channel_layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates.
Definition: formats.c:561
min
float min
Definition: vorbis_enc_data.h:429
AudioIIRContext::w
int w
Definition: af_aiir.c:65