FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
28 #include "audio.h"
29 #include "avfilter.h"
30 #include "internal.h"
31 
32 typedef struct ThreadData {
33  AVFrame *in, *out;
34 } ThreadData;
35 
36 typedef struct Pair {
37  int a, b;
38 } Pair;
39 
40 typedef struct BiquadContext {
41  double a[3];
42  double b[3];
43  double w1, w2;
45 
46 typedef struct IIRChannel {
47  int nb_ab[2];
48  double *ab[2];
49  double g;
50  double *cache[2];
51  double fir;
53  int clippings;
54 } IIRChannel;
55 
56 typedef struct AudioIIRContext {
57  const AVClass *class;
58  char *a_str, *b_str, *g_str;
59  double dry_gain, wet_gain;
60  double mix;
61  int normalize;
62  int format;
63  int process;
64  int precision;
65  int response;
66  int w, h;
69 
71 
73  int channels;
75 
76  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
78 
80 {
81  AudioIIRContext *s = ctx->priv;
84  enum AVSampleFormat sample_fmts[] = {
87  };
88  static const enum AVPixelFormat pix_fmts[] = {
91  };
92  int ret;
93 
94  if (s->response) {
95  AVFilterLink *videolink = ctx->outputs[1];
96 
98  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
99  return ret;
100  }
101 
103  if (!layouts)
104  return AVERROR(ENOMEM);
106  if (ret < 0)
107  return ret;
108 
109  sample_fmts[0] = s->sample_format;
111  if (!formats)
112  return AVERROR(ENOMEM);
114  if (ret < 0)
115  return ret;
116 
118  if (!formats)
119  return AVERROR(ENOMEM);
121 }
122 
123 #define IIR_CH(name, type, min, max, need_clipping) \
124 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
125 { \
126  AudioIIRContext *s = ctx->priv; \
127  const double ig = s->dry_gain; \
128  const double og = s->wet_gain; \
129  const double mix = s->mix; \
130  ThreadData *td = arg; \
131  AVFrame *in = td->in, *out = td->out; \
132  const type *src = (const type *)in->extended_data[ch]; \
133  double *oc = (double *)s->iir[ch].cache[0]; \
134  double *ic = (double *)s->iir[ch].cache[1]; \
135  const int nb_a = s->iir[ch].nb_ab[0]; \
136  const int nb_b = s->iir[ch].nb_ab[1]; \
137  const double *a = s->iir[ch].ab[0]; \
138  const double *b = s->iir[ch].ab[1]; \
139  const double g = s->iir[ch].g; \
140  int *clippings = &s->iir[ch].clippings; \
141  type *dst = (type *)out->extended_data[ch]; \
142  int n; \
143  \
144  for (n = 0; n < in->nb_samples; n++) { \
145  double sample = 0.; \
146  int x; \
147  \
148  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
149  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
150  ic[0] = src[n] * ig; \
151  for (x = 0; x < nb_b; x++) \
152  sample += b[x] * ic[x]; \
153  \
154  for (x = 1; x < nb_a; x++) \
155  sample -= a[x] * oc[x]; \
156  \
157  oc[0] = sample; \
158  sample *= og * g; \
159  sample = sample * mix + ic[0] * (1. - mix); \
160  if (need_clipping && sample < min) { \
161  (*clippings)++; \
162  dst[n] = min; \
163  } else if (need_clipping && sample > max) { \
164  (*clippings)++; \
165  dst[n] = max; \
166  } else { \
167  dst[n] = sample; \
168  } \
169  } \
170  \
171  return 0; \
172 }
173 
174 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
175 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
176 IIR_CH(fltp, float, -1., 1., 0)
177 IIR_CH(dblp, double, -1., 1., 0)
178 
179 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
180 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, \
181  int ch, int nb_jobs) \
182 { \
183  AudioIIRContext *s = ctx->priv; \
184  const double ig = s->dry_gain; \
185  const double og = s->wet_gain; \
186  const double mix = s->mix; \
187  const double imix = 1. - mix; \
188  ThreadData *td = arg; \
189  AVFrame *in = td->in, *out = td->out; \
190  const type *src = (const type *)in->extended_data[ch]; \
191  type *dst = (type *)out->extended_data[ch]; \
192  IIRChannel *iir = &s->iir[ch]; \
193  const double g = iir->g; \
194  int *clippings = &iir->clippings; \
195  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
196  int n, i; \
197  \
198  for (i = nb_biquads - 1; i >= 0; i--) { \
199  const double a1 = -iir->biquads[i].a[1]; \
200  const double a2 = -iir->biquads[i].a[2]; \
201  const double b0 = iir->biquads[i].b[0]; \
202  const double b1 = iir->biquads[i].b[1]; \
203  const double b2 = iir->biquads[i].b[2]; \
204  double w1 = iir->biquads[i].w1; \
205  double w2 = iir->biquads[i].w2; \
206  \
207  for (n = 0; n < in->nb_samples; n++) { \
208  double i0 = ig * (i ? dst[n] : src[n]); \
209  double o0 = i0 * b0 + w1; \
210  \
211  w1 = b1 * i0 + w2 + a1 * o0; \
212  w2 = b2 * i0 + a2 * o0; \
213  o0 *= og * g; \
214  \
215  o0 = o0 * mix + imix * i0; \
216  if (need_clipping && o0 < min) { \
217  (*clippings)++; \
218  dst[n] = min; \
219  } else if (need_clipping && o0 > max) { \
220  (*clippings)++; \
221  dst[n] = max; \
222  } else { \
223  dst[n] = o0; \
224  } \
225  } \
226  iir->biquads[i].w1 = w1; \
227  iir->biquads[i].w2 = w2; \
228  } \
229  \
230  return 0; \
231 }
232 
233 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
234 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
235 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
236 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
237 
238 #define PARALLEL_IIR_CH(name, type, min, max, need_clipping) \
239 static int iir_ch_parallel_## name(AVFilterContext *ctx, void *arg, \
240  int ch, int nb_jobs) \
241 { \
242  AudioIIRContext *s = ctx->priv; \
243  const double ig = s->dry_gain; \
244  const double og = s->wet_gain; \
245  const double mix = s->mix; \
246  const double imix = 1. - mix; \
247  ThreadData *td = arg; \
248  AVFrame *in = td->in, *out = td->out; \
249  const type *src = (const type *)in->extended_data[ch]; \
250  type *dst = (type *)out->extended_data[ch]; \
251  IIRChannel *iir = &s->iir[ch]; \
252  const double g = iir->g; \
253  const double fir = iir->fir; \
254  int *clippings = &iir->clippings; \
255  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
256  int n, i; \
257  \
258  for (i = 0; i < nb_biquads; i++) { \
259  const double a1 = -iir->biquads[i].a[1]; \
260  const double a2 = -iir->biquads[i].a[2]; \
261  const double b1 = iir->biquads[i].b[1]; \
262  const double b2 = iir->biquads[i].b[2]; \
263  double w1 = iir->biquads[i].w1; \
264  double w2 = iir->biquads[i].w2; \
265  \
266  for (n = 0; n < in->nb_samples; n++) { \
267  double i0 = ig * src[n]; \
268  double o0 = w1; \
269  \
270  w1 = b1 * i0 + w2 + a1 * o0; \
271  w2 = b2 * i0 + a2 * o0; \
272  o0 *= og * g; \
273  o0 += dst[n]; \
274  \
275  if (need_clipping && o0 < min) { \
276  (*clippings)++; \
277  dst[n] = min; \
278  } else if (need_clipping && o0 > max) { \
279  (*clippings)++; \
280  dst[n] = max; \
281  } else { \
282  dst[n] = o0; \
283  } \
284  } \
285  iir->biquads[i].w1 = w1; \
286  iir->biquads[i].w2 = w2; \
287  } \
288  \
289  for (n = 0; n < in->nb_samples; n++) { \
290  dst[n] += fir * src[n]; \
291  dst[n] = dst[n] * mix + imix * src[n]; \
292  } \
293  \
294  return 0; \
295 }
296 
297 PARALLEL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
298 PARALLEL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
299 PARALLEL_IIR_CH(fltp, float, -1., 1., 0)
300 PARALLEL_IIR_CH(dblp, double, -1., 1., 0)
301 
302 #define LATTICE_IIR_CH(name, type, min, max, need_clipping) \
303 static int iir_ch_lattice_## name(AVFilterContext *ctx, void *arg, \
304  int ch, int nb_jobs) \
305 { \
306  AudioIIRContext *s = ctx->priv; \
307  const double ig = s->dry_gain; \
308  const double og = s->wet_gain; \
309  const double mix = s->mix; \
310  ThreadData *td = arg; \
311  AVFrame *in = td->in, *out = td->out; \
312  const type *src = (const type *)in->extended_data[ch]; \
313  double n0, n1, p0, *x = (double *)s->iir[ch].cache[0]; \
314  const int nb_stages = s->iir[ch].nb_ab[1]; \
315  const double *v = s->iir[ch].ab[0]; \
316  const double *k = s->iir[ch].ab[1]; \
317  const double g = s->iir[ch].g; \
318  int *clippings = &s->iir[ch].clippings; \
319  type *dst = (type *)out->extended_data[ch]; \
320  int n; \
321  \
322  for (n = 0; n < in->nb_samples; n++) { \
323  const double in = src[n] * ig; \
324  double out = 0.; \
325  \
326  n1 = in; \
327  for (int i = nb_stages - 1; i >= 0; i--) { \
328  n0 = n1 - k[i] * x[i]; \
329  p0 = n0 * k[i] + x[i]; \
330  out += p0 * v[i+1]; \
331  x[i] = p0; \
332  n1 = n0; \
333  } \
334  \
335  out += n1 * v[0]; \
336  memmove(&x[1], &x[0], nb_stages * sizeof(*x)); \
337  x[0] = n1; \
338  out *= og * g; \
339  out = out * mix + in * (1. - mix); \
340  if (need_clipping && out < min) { \
341  (*clippings)++; \
342  dst[n] = min; \
343  } else if (need_clipping && out > max) { \
344  (*clippings)++; \
345  dst[n] = max; \
346  } else { \
347  dst[n] = out; \
348  } \
349  } \
350  \
351  return 0; \
352 }
353 
354 LATTICE_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
355 LATTICE_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
356 LATTICE_IIR_CH(fltp, float, -1., 1., 0)
357 LATTICE_IIR_CH(dblp, double, -1., 1., 0)
358 
359 static void count_coefficients(char *item_str, int *nb_items)
360 {
361  char *p;
362 
363  if (!item_str)
364  return;
365 
366  *nb_items = 1;
367  for (p = item_str; *p && *p != '|'; p++) {
368  if (*p == ' ')
369  (*nb_items)++;
370  }
371 }
372 
373 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
374 {
375  AudioIIRContext *s = ctx->priv;
376  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
377  int i;
378 
379  p = old_str = av_strdup(item_str);
380  if (!p)
381  return AVERROR(ENOMEM);
382  for (i = 0; i < nb_items; i++) {
383  if (!(arg = av_strtok(p, "|", &saveptr)))
384  arg = prev_arg;
385 
386  if (!arg) {
387  av_freep(&old_str);
388  return AVERROR(EINVAL);
389  }
390 
391  p = NULL;
392  if (av_sscanf(arg, "%lf", &s->iir[i].g) != 1) {
393  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
394  av_freep(&old_str);
395  return AVERROR(EINVAL);
396  }
397 
398  prev_arg = arg;
399  }
400 
401  av_freep(&old_str);
402 
403  return 0;
404 }
405 
406 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
407 {
408  char *p, *arg, *old_str, *saveptr = NULL;
409  int i;
410 
411  p = old_str = av_strdup(item_str);
412  if (!p)
413  return AVERROR(ENOMEM);
414  for (i = 0; i < nb_items; i++) {
415  if (!(arg = av_strtok(p, " ", &saveptr)))
416  break;
417 
418  p = NULL;
419  if (av_sscanf(arg, "%lf", &dst[i]) != 1) {
420  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
421  av_freep(&old_str);
422  return AVERROR(EINVAL);
423  }
424  }
425 
426  av_freep(&old_str);
427 
428  return 0;
429 }
430 
431 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
432 {
433  char *p, *arg, *old_str, *saveptr = NULL;
434  int i;
435 
436  p = old_str = av_strdup(item_str);
437  if (!p)
438  return AVERROR(ENOMEM);
439  for (i = 0; i < nb_items; i++) {
440  if (!(arg = av_strtok(p, " ", &saveptr)))
441  break;
442 
443  p = NULL;
444  if (av_sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
445  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
446  av_freep(&old_str);
447  return AVERROR(EINVAL);
448  }
449  }
450 
451  av_freep(&old_str);
452 
453  return 0;
454 }
455 
456 static const char *const format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
457 
458 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
459 {
460  AudioIIRContext *s = ctx->priv;
461  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
462  int i, ret;
463 
464  p = old_str = av_strdup(item_str);
465  if (!p)
466  return AVERROR(ENOMEM);
467  for (i = 0; i < channels; i++) {
468  IIRChannel *iir = &s->iir[i];
469 
470  if (!(arg = av_strtok(p, "|", &saveptr)))
471  arg = prev_arg;
472 
473  if (!arg) {
474  av_freep(&old_str);
475  return AVERROR(EINVAL);
476  }
477 
478  count_coefficients(arg, &iir->nb_ab[ab]);
479 
480  p = NULL;
481  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
482  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
483  if (!iir->ab[ab] || !iir->cache[ab]) {
484  av_freep(&old_str);
485  return AVERROR(ENOMEM);
486  }
487 
488  if (s->format > 0) {
489  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
490  } else {
491  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
492  }
493  if (ret < 0) {
494  av_freep(&old_str);
495  return ret;
496  }
497  prev_arg = arg;
498  }
499 
500  av_freep(&old_str);
501 
502  return 0;
503 }
504 
505 static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
506 {
507  *RE = re * re2 - im * im2;
508  *IM = re * im2 + re2 * im;
509 }
510 
511 static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
512 {
513  coefs[2 * n] = 1.0;
514 
515  for (int i = 1; i <= n; i++) {
516  for (int j = n - i; j < n; j++) {
517  double re, im;
518 
519  cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
520  pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
521 
522  coefs[2 * j] -= re;
523  coefs[2 * j + 1] -= im;
524  }
525  }
526 
527  for (int i = 0; i < n + 1; i++) {
528  if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
529  av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
530  coefs[2 * i + 1], i);
531  return AVERROR(EINVAL);
532  }
533  }
534 
535  return 0;
536 }
537 
538 static void normalize_coeffs(AVFilterContext *ctx, int ch)
539 {
540  AudioIIRContext *s = ctx->priv;
541  IIRChannel *iir = &s->iir[ch];
542  double sum_den = 0.;
543 
544  if (!s->normalize)
545  return;
546 
547  for (int i = 0; i < iir->nb_ab[1]; i++) {
548  sum_den += iir->ab[1][i];
549  }
550 
551  if (sum_den > 1e-6) {
552  double factor, sum_num = 0.;
553 
554  for (int i = 0; i < iir->nb_ab[0]; i++) {
555  sum_num += iir->ab[0][i];
556  }
557 
558  factor = sum_num / sum_den;
559 
560  for (int i = 0; i < iir->nb_ab[1]; i++) {
561  iir->ab[1][i] *= factor;
562  }
563  }
564 }
565 
567 {
568  AudioIIRContext *s = ctx->priv;
569  int ch, i, j, ret = 0;
570 
571  for (ch = 0; ch < channels; ch++) {
572  IIRChannel *iir = &s->iir[ch];
573  double *topc, *botc;
574 
575  topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
576  botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
577  if (!topc || !botc) {
578  ret = AVERROR(ENOMEM);
579  goto fail;
580  }
581 
582  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
583  if (ret < 0) {
584  goto fail;
585  }
586 
587  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
588  if (ret < 0) {
589  goto fail;
590  }
591 
592  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
593  iir->ab[1][j] = topc[2 * i];
594  }
595  iir->nb_ab[1]++;
596 
597  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
598  iir->ab[0][j] = botc[2 * i];
599  }
600  iir->nb_ab[0]++;
601 
602  normalize_coeffs(ctx, ch);
603 
604 fail:
605  av_free(topc);
606  av_free(botc);
607  if (ret < 0)
608  break;
609  }
610 
611  return ret;
612 }
613 
615 {
616  AudioIIRContext *s = ctx->priv;
617  int ch, ret;
618 
619  for (ch = 0; ch < channels; ch++) {
620  IIRChannel *iir = &s->iir[ch];
621  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
622  int current_biquad = 0;
623 
624  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
625  if (!iir->biquads)
626  return AVERROR(ENOMEM);
627 
628  while (nb_biquads--) {
629  Pair outmost_pole = { -1, -1 };
630  Pair nearest_zero = { -1, -1 };
631  double zeros[4] = { 0 };
632  double poles[4] = { 0 };
633  double b[6] = { 0 };
634  double a[6] = { 0 };
635  double min_distance = DBL_MAX;
636  double max_mag = 0;
637  double factor;
638  int i;
639 
640  for (i = 0; i < iir->nb_ab[0]; i++) {
641  double mag;
642 
643  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
644  continue;
645  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
646 
647  if (mag > max_mag) {
648  max_mag = mag;
649  outmost_pole.a = i;
650  }
651  }
652 
653  for (i = 0; i < iir->nb_ab[0]; i++) {
654  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
655  continue;
656 
657  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
658  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
659  outmost_pole.b = i;
660  break;
661  }
662  }
663 
664  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
665 
666  if (outmost_pole.a < 0 || outmost_pole.b < 0)
667  return AVERROR(EINVAL);
668 
669  for (i = 0; i < iir->nb_ab[1]; i++) {
670  double distance;
671 
672  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
673  continue;
674  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
675  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
676 
677  if (distance < min_distance) {
678  min_distance = distance;
679  nearest_zero.a = i;
680  }
681  }
682 
683  for (i = 0; i < iir->nb_ab[1]; i++) {
684  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
685  continue;
686 
687  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
688  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
689  nearest_zero.b = i;
690  break;
691  }
692  }
693 
694  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
695 
696  if (nearest_zero.a < 0 || nearest_zero.b < 0)
697  return AVERROR(EINVAL);
698 
699  poles[0] = iir->ab[0][2 * outmost_pole.a ];
700  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
701 
702  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
703  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
704 
705  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
706  zeros[2] = 0;
707  zeros[3] = 0;
708 
709  poles[2] = 0;
710  poles[3] = 0;
711  } else {
712  poles[2] = iir->ab[0][2 * outmost_pole.b ];
713  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
714 
715  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
716  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
717  }
718 
719  ret = expand(ctx, zeros, 2, b);
720  if (ret < 0)
721  return ret;
722 
723  ret = expand(ctx, poles, 2, a);
724  if (ret < 0)
725  return ret;
726 
727  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
728  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
729  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
730  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
731 
732  iir->biquads[current_biquad].a[0] = 1.;
733  iir->biquads[current_biquad].a[1] = a[2] / a[4];
734  iir->biquads[current_biquad].a[2] = a[0] / a[4];
735  iir->biquads[current_biquad].b[0] = b[4] / a[4];
736  iir->biquads[current_biquad].b[1] = b[2] / a[4];
737  iir->biquads[current_biquad].b[2] = b[0] / a[4];
738 
739  if (s->normalize &&
740  fabs(iir->biquads[current_biquad].b[0] +
741  iir->biquads[current_biquad].b[1] +
742  iir->biquads[current_biquad].b[2]) > 1e-6) {
743  factor = (iir->biquads[current_biquad].a[0] +
744  iir->biquads[current_biquad].a[1] +
745  iir->biquads[current_biquad].a[2]) /
746  (iir->biquads[current_biquad].b[0] +
747  iir->biquads[current_biquad].b[1] +
748  iir->biquads[current_biquad].b[2]);
749 
750  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
751 
752  iir->biquads[current_biquad].b[0] *= factor;
753  iir->biquads[current_biquad].b[1] *= factor;
754  iir->biquads[current_biquad].b[2] *= factor;
755  }
756 
757  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
758  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
759  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
760 
761  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
762  iir->biquads[current_biquad].a[0],
763  iir->biquads[current_biquad].a[1],
764  iir->biquads[current_biquad].a[2],
765  iir->biquads[current_biquad].b[0],
766  iir->biquads[current_biquad].b[1],
767  iir->biquads[current_biquad].b[2]);
768 
769  current_biquad++;
770  }
771  }
772 
773  return 0;
774 }
775 
776 static void biquad_process(double *x, double *y, int length,
777  double b0, double b1, double b2,
778  double a1, double a2)
779 {
780  double w1 = 0., w2 = 0.;
781 
782  a1 = -a1;
783  a2 = -a2;
784 
785  for (int n = 0; n < length; n++) {
786  double out, in = x[n];
787 
788  y[n] = out = in * b0 + w1;
789  w1 = b1 * in + w2 + a1 * out;
790  w2 = b2 * in + a2 * out;
791  }
792 }
793 
794 static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
795 {
796  double sum = 0.;
797 
798  for (int i = 0; i < n; i++) {
799  for (int j = i; j < n; j++) {
800  sum = 0.;
801  for (int k = 0; k < i; k++)
802  sum += lu[i * n + k] * lu[k * n + j];
803  lu[i * n + j] = matrix[j * n + i] - sum;
804  }
805  for (int j = i + 1; j < n; j++) {
806  sum = 0.;
807  for (int k = 0; k < i; k++)
808  sum += lu[j * n + k] * lu[k * n + i];
809  lu[j * n + i] = (1. / lu[i * n + i]) * (matrix[i * n + j] - sum);
810  }
811  }
812 
813  for (int i = 0; i < n; i++) {
814  sum = 0.;
815  for (int k = 0; k < i; k++)
816  sum += lu[i * n + k] * y[k];
817  y[i] = vector[i] - sum;
818  }
819 
820  for (int i = n - 1; i >= 0; i--) {
821  sum = 0.;
822  for (int k = i + 1; k < n; k++)
823  sum += lu[i * n + k] * x[k];
824  x[i] = (1 / lu[i * n + i]) * (y[i] - sum);
825  }
826 }
827 
829 {
830  AudioIIRContext *s = ctx->priv;
831  int ret = 0;
832 
833  for (int ch = 0; ch < channels; ch++) {
834  IIRChannel *iir = &s->iir[ch];
835  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
836  int length = nb_biquads * 2 + 1;
837  double *impulse = av_calloc(length, sizeof(*impulse));
838  double *y = av_calloc(length, sizeof(*y));
839  double *resp = av_calloc(length, sizeof(*resp));
840  double *M = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*M));
841  double *W = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*W));
842 
843  if (!impulse || !y || !resp || !M) {
844  av_free(impulse);
845  av_free(y);
846  av_free(resp);
847  av_free(M);
848  av_free(W);
849  return AVERROR(ENOMEM);
850  }
851 
852  impulse[0] = 1.;
853 
854  for (int n = 0; n < nb_biquads; n++) {
855  BiquadContext *biquad = &iir->biquads[n];
856 
857  biquad_process(n ? y : impulse, y, length,
858  biquad->b[0], biquad->b[1], biquad->b[2],
859  biquad->a[1], biquad->a[2]);
860  }
861 
862  for (int n = 0; n < nb_biquads; n++) {
863  BiquadContext *biquad = &iir->biquads[n];
864 
865  biquad_process(impulse, resp, length - 1,
866  1., 0., 0., biquad->a[1], biquad->a[2]);
867 
868  memcpy(M + n * 2 * (length - 1), resp, sizeof(*resp) * (length - 1));
869  memcpy(M + n * 2 * (length - 1) + length, resp, sizeof(*resp) * (length - 2));
870  memset(resp, 0, length * sizeof(*resp));
871  }
872 
873  solve(M, &y[1], length - 1, &impulse[1], resp, W);
874 
875  iir->fir = y[0];
876 
877  for (int n = 0; n < nb_biquads; n++) {
878  BiquadContext *biquad = &iir->biquads[n];
879 
880  biquad->b[0] = 0.;
881  biquad->b[1] = resp[n * 2 + 0];
882  biquad->b[2] = resp[n * 2 + 1];
883  }
884 
885  av_free(impulse);
886  av_free(y);
887  av_free(resp);
888  av_free(M);
889  av_free(W);
890 
891  if (ret < 0)
892  return ret;
893  }
894 
895  return 0;
896 }
897 
899 {
900  AudioIIRContext *s = ctx->priv;
901  int ch;
902 
903  for (ch = 0; ch < channels; ch++) {
904  IIRChannel *iir = &s->iir[ch];
905  int n;
906 
907  for (n = 0; n < iir->nb_ab[0]; n++) {
908  double r = iir->ab[0][2*n];
909  double angle = iir->ab[0][2*n+1];
910 
911  iir->ab[0][2*n] = r * cos(angle);
912  iir->ab[0][2*n+1] = r * sin(angle);
913  }
914 
915  for (n = 0; n < iir->nb_ab[1]; n++) {
916  double r = iir->ab[1][2*n];
917  double angle = iir->ab[1][2*n+1];
918 
919  iir->ab[1][2*n] = r * cos(angle);
920  iir->ab[1][2*n+1] = r * sin(angle);
921  }
922  }
923 }
924 
926 {
927  AudioIIRContext *s = ctx->priv;
928  int ch;
929 
930  for (ch = 0; ch < channels; ch++) {
931  IIRChannel *iir = &s->iir[ch];
932  int n;
933 
934  for (n = 0; n < iir->nb_ab[0]; n++) {
935  double sr = iir->ab[0][2*n];
936  double si = iir->ab[0][2*n+1];
937 
938  iir->ab[0][2*n] = exp(sr) * cos(si);
939  iir->ab[0][2*n+1] = exp(sr) * sin(si);
940  }
941 
942  for (n = 0; n < iir->nb_ab[1]; n++) {
943  double sr = iir->ab[1][2*n];
944  double si = iir->ab[1][2*n+1];
945 
946  iir->ab[1][2*n] = exp(sr) * cos(si);
947  iir->ab[1][2*n+1] = exp(sr) * sin(si);
948  }
949  }
950 }
951 
952 static double fact(double i)
953 {
954  if (i <= 0.)
955  return 1.;
956  return i * fact(i - 1.);
957 }
958 
959 static double coef_sf2zf(double *a, int N, int n)
960 {
961  double z = 0.;
962 
963  for (int i = 0; i <= N; i++) {
964  double acc = 0.;
965 
966  for (int k = FFMAX(n - N + i, 0); k <= FFMIN(i, n); k++) {
967  acc += ((fact(i) * fact(N - i)) /
968  (fact(k) * fact(i - k) * fact(n - k) * fact(N - i - n + k))) *
969  ((k & 1) ? -1. : 1.);
970  }
971 
972  z += a[i] * pow(2., i) * acc;
973  }
974 
975  return z;
976 }
977 
979 {
980  AudioIIRContext *s = ctx->priv;
981  int ch;
982 
983  for (ch = 0; ch < channels; ch++) {
984  IIRChannel *iir = &s->iir[ch];
985  double *temp0 = av_calloc(iir->nb_ab[0], sizeof(*temp0));
986  double *temp1 = av_calloc(iir->nb_ab[1], sizeof(*temp1));
987 
988  if (!temp0 || !temp1)
989  goto next;
990 
991  memcpy(temp0, iir->ab[0], iir->nb_ab[0] * sizeof(*temp0));
992  memcpy(temp1, iir->ab[1], iir->nb_ab[1] * sizeof(*temp1));
993 
994  for (int n = 0; n < iir->nb_ab[0]; n++)
995  iir->ab[0][n] = coef_sf2zf(temp0, iir->nb_ab[0] - 1, n);
996 
997  for (int n = 0; n < iir->nb_ab[1]; n++)
998  iir->ab[1][n] = coef_sf2zf(temp1, iir->nb_ab[1] - 1, n);
999 
1000 next:
1001  av_free(temp0);
1002  av_free(temp1);
1003  }
1004 }
1005 
1007 {
1008  AudioIIRContext *s = ctx->priv;
1009  int ch;
1010 
1011  for (ch = 0; ch < channels; ch++) {
1012  IIRChannel *iir = &s->iir[ch];
1013  int n;
1014 
1015  for (n = 0; n < iir->nb_ab[0]; n++) {
1016  double r = iir->ab[0][2*n];
1017  double angle = M_PI*iir->ab[0][2*n+1]/180.;
1018 
1019  iir->ab[0][2*n] = r * cos(angle);
1020  iir->ab[0][2*n+1] = r * sin(angle);
1021  }
1022 
1023  for (n = 0; n < iir->nb_ab[1]; n++) {
1024  double r = iir->ab[1][2*n];
1025  double angle = M_PI*iir->ab[1][2*n+1]/180.;
1026 
1027  iir->ab[1][2*n] = r * cos(angle);
1028  iir->ab[1][2*n+1] = r * sin(angle);
1029  }
1030  }
1031 }
1032 
1034 {
1035  AudioIIRContext *s = ctx->priv;
1036  int ch;
1037 
1038  for (ch = 0; ch < channels; ch++) {
1039  IIRChannel *iir = &s->iir[ch];
1040 
1041  for (int n = 0; n < iir->nb_ab[0]; n++) {
1042  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
1043 
1044  if (pr >= 1.) {
1045  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
1046  break;
1047  }
1048  }
1049  }
1050 }
1051 
1052 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
1053 {
1054  const uint8_t *font;
1055  int font_height;
1056  int i;
1057 
1058  font = avpriv_cga_font, font_height = 8;
1059 
1060  for (i = 0; txt[i]; i++) {
1061  int char_y, mask;
1062 
1063  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
1064  for (char_y = 0; char_y < font_height; char_y++) {
1065  for (mask = 0x80; mask; mask >>= 1) {
1066  if (font[txt[i] * font_height + char_y] & mask)
1067  AV_WL32(p, color);
1068  p += 4;
1069  }
1070  p += pic->linesize[0] - 8 * 4;
1071  }
1072  }
1073 }
1074 
1075 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
1076 {
1077  int dx = FFABS(x1-x0);
1078  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
1079  int err = (dx>dy ? dx : -dy) / 2, e2;
1080 
1081  for (;;) {
1082  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
1083 
1084  if (x0 == x1 && y0 == y1)
1085  break;
1086 
1087  e2 = err;
1088 
1089  if (e2 >-dx) {
1090  err -= dy;
1091  x0--;
1092  }
1093 
1094  if (e2 < dy) {
1095  err += dx;
1096  y0 += sy;
1097  }
1098  }
1099 }
1100 
1101 static double distance(double x0, double x1, double y0, double y1)
1102 {
1103  return hypot(x0 - x1, y0 - y1);
1104 }
1105 
1106 static void get_response(int channel, int format, double w,
1107  const double *b, const double *a,
1108  int nb_b, int nb_a, double *magnitude, double *phase)
1109 {
1110  double realz, realp;
1111  double imagz, imagp;
1112  double real, imag;
1113  double div;
1114 
1115  if (format == 0) {
1116  realz = 0., realp = 0.;
1117  imagz = 0., imagp = 0.;
1118  for (int x = 0; x < nb_a; x++) {
1119  realz += cos(-x * w) * a[x];
1120  imagz += sin(-x * w) * a[x];
1121  }
1122 
1123  for (int x = 0; x < nb_b; x++) {
1124  realp += cos(-x * w) * b[x];
1125  imagp += sin(-x * w) * b[x];
1126  }
1127 
1128  div = realp * realp + imagp * imagp;
1129  real = (realz * realp + imagz * imagp) / div;
1130  imag = (imagz * realp - imagp * realz) / div;
1131 
1132  *magnitude = hypot(real, imag);
1133  *phase = atan2(imag, real);
1134  } else {
1135  double p = 1., z = 1.;
1136  double acc = 0.;
1137 
1138  for (int x = 0; x < nb_a; x++) {
1139  z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
1140  acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
1141  }
1142 
1143  for (int x = 0; x < nb_b; x++) {
1144  p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
1145  acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
1146  }
1147 
1148  *magnitude = z / p;
1149  *phase = acc;
1150  }
1151 }
1152 
1154 {
1155  AudioIIRContext *s = ctx->priv;
1156  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
1157  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
1158  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
1159  char text[32];
1160  int ch, i;
1161 
1162  memset(out->data[0], 0, s->h * out->linesize[0]);
1163 
1164  phase = av_malloc_array(s->w, sizeof(*phase));
1165  temp = av_malloc_array(s->w, sizeof(*temp));
1166  mag = av_malloc_array(s->w, sizeof(*mag));
1167  delay = av_malloc_array(s->w, sizeof(*delay));
1168  if (!mag || !phase || !delay || !temp)
1169  goto end;
1170 
1171  ch = av_clip(s->ir_channel, 0, s->channels - 1);
1172  for (i = 0; i < s->w; i++) {
1173  const double *b = s->iir[ch].ab[0];
1174  const double *a = s->iir[ch].ab[1];
1175  const int nb_b = s->iir[ch].nb_ab[0];
1176  const int nb_a = s->iir[ch].nb_ab[1];
1177  double w = i * M_PI / (s->w - 1);
1178  double m, p;
1179 
1180  get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
1181 
1182  mag[i] = s->iir[ch].g * m;
1183  phase[i] = p;
1184  min = fmin(min, mag[i]);
1185  max = fmax(max, mag[i]);
1186  }
1187 
1188  temp[0] = 0.;
1189  for (i = 0; i < s->w - 1; i++) {
1190  double d = phase[i] - phase[i + 1];
1191  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
1192  }
1193 
1194  min_phase = phase[0];
1195  max_phase = phase[0];
1196  for (i = 1; i < s->w; i++) {
1197  temp[i] += temp[i - 1];
1198  phase[i] += temp[i];
1199  min_phase = fmin(min_phase, phase[i]);
1200  max_phase = fmax(max_phase, phase[i]);
1201  }
1202 
1203  for (i = 0; i < s->w - 1; i++) {
1204  double div = s->w / (double)sample_rate;
1205 
1206  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
1207  min_delay = fmin(min_delay, delay[i + 1]);
1208  max_delay = fmax(max_delay, delay[i + 1]);
1209  }
1210  delay[0] = delay[1];
1211 
1212  for (i = 0; i < s->w; i++) {
1213  int ymag = mag[i] / max * (s->h - 1);
1214  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
1215  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
1216 
1217  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
1218  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
1219  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
1220 
1221  if (prev_ymag < 0)
1222  prev_ymag = ymag;
1223  if (prev_yphase < 0)
1224  prev_yphase = yphase;
1225  if (prev_ydelay < 0)
1226  prev_ydelay = ydelay;
1227 
1228  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
1229  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
1230  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
1231 
1232  prev_ymag = ymag;
1233  prev_yphase = yphase;
1234  prev_ydelay = ydelay;
1235  }
1236 
1237  if (s->w > 400 && s->h > 100) {
1238  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
1239  snprintf(text, sizeof(text), "%.2f", max);
1240  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
1241 
1242  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
1243  snprintf(text, sizeof(text), "%.2f", min);
1244  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
1245 
1246  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
1247  snprintf(text, sizeof(text), "%.2f", max_phase);
1248  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
1249 
1250  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
1251  snprintf(text, sizeof(text), "%.2f", min_phase);
1252  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
1253 
1254  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
1255  snprintf(text, sizeof(text), "%.2f", max_delay);
1256  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
1257 
1258  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
1259  snprintf(text, sizeof(text), "%.2f", min_delay);
1260  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
1261  }
1262 
1263 end:
1264  av_free(delay);
1265  av_free(temp);
1266  av_free(phase);
1267  av_free(mag);
1268 }
1269 
1270 static int config_output(AVFilterLink *outlink)
1271 {
1272  AVFilterContext *ctx = outlink->src;
1273  AudioIIRContext *s = ctx->priv;
1274  AVFilterLink *inlink = ctx->inputs[0];
1275  int ch, ret, i;
1276 
1277  s->channels = inlink->channels;
1278  s->iir = av_calloc(s->channels, sizeof(*s->iir));
1279  if (!s->iir)
1280  return AVERROR(ENOMEM);
1281 
1282  ret = read_gains(ctx, s->g_str, inlink->channels);
1283  if (ret < 0)
1284  return ret;
1285 
1286  ret = read_channels(ctx, inlink->channels, s->a_str, 0);
1287  if (ret < 0)
1288  return ret;
1289 
1290  ret = read_channels(ctx, inlink->channels, s->b_str, 1);
1291  if (ret < 0)
1292  return ret;
1293 
1294  if (s->format == -1) {
1295  convert_sf2tf(ctx, inlink->channels);
1296  s->format = 0;
1297  } else if (s->format == 2) {
1298  convert_pr2zp(ctx, inlink->channels);
1299  } else if (s->format == 3) {
1300  convert_pd2zp(ctx, inlink->channels);
1301  } else if (s->format == 4) {
1302  convert_sp2zp(ctx, inlink->channels);
1303  }
1304  if (s->format > 0) {
1305  check_stability(ctx, inlink->channels);
1306  }
1307 
1308  av_frame_free(&s->video);
1309  if (s->response) {
1310  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1311  if (!s->video)
1312  return AVERROR(ENOMEM);
1313 
1314  draw_response(ctx, s->video, inlink->sample_rate);
1315  }
1316 
1317  if (s->format == 0)
1318  av_log(ctx, AV_LOG_WARNING, "transfer function coefficients format is not recommended for too high number of zeros/poles.\n");
1319 
1320  if (s->format > 0 && s->process == 0) {
1321  av_log(ctx, AV_LOG_WARNING, "Direct processsing is not recommended for zp coefficients format.\n");
1322 
1323  ret = convert_zp2tf(ctx, inlink->channels);
1324  if (ret < 0)
1325  return ret;
1326  } else if (s->format == -2 && s->process > 0) {
1327  av_log(ctx, AV_LOG_ERROR, "Only direct processing is implemented for lattice-ladder function.\n");
1328  return AVERROR_PATCHWELCOME;
1329  } else if (s->format <= 0 && s->process == 1) {
1330  av_log(ctx, AV_LOG_ERROR, "Serial processing is not implemented for transfer function.\n");
1331  return AVERROR_PATCHWELCOME;
1332  } else if (s->format <= 0 && s->process == 2) {
1333  av_log(ctx, AV_LOG_ERROR, "Parallel processing is not implemented for transfer function.\n");
1334  return AVERROR_PATCHWELCOME;
1335  } else if (s->format > 0 && s->process == 1) {
1336  ret = decompose_zp2biquads(ctx, inlink->channels);
1337  if (ret < 0)
1338  return ret;
1339  } else if (s->format > 0 && s->process == 2) {
1340  if (s->precision > 1)
1341  av_log(ctx, AV_LOG_WARNING, "Parallel processing is not recommended for fixed-point precisions.\n");
1342  ret = decompose_zp2biquads(ctx, inlink->channels);
1343  if (ret < 0)
1344  return ret;
1345  ret = convert_serial2parallel(ctx, inlink->channels);
1346  if (ret < 0)
1347  return ret;
1348  }
1349 
1350  for (ch = 0; s->format == -2 && ch < inlink->channels; ch++) {
1351  IIRChannel *iir = &s->iir[ch];
1352 
1353  if (iir->nb_ab[0] != iir->nb_ab[1] + 1) {
1354  av_log(ctx, AV_LOG_ERROR, "Number of ladder coefficients must be one more than number of reflection coefficients.\n");
1355  return AVERROR(EINVAL);
1356  }
1357  }
1358 
1359  for (ch = 0; s->format == 0 && ch < inlink->channels; ch++) {
1360  IIRChannel *iir = &s->iir[ch];
1361 
1362  for (i = 1; i < iir->nb_ab[0]; i++) {
1363  iir->ab[0][i] /= iir->ab[0][0];
1364  }
1365 
1366  iir->ab[0][0] = 1.0;
1367  for (i = 0; i < iir->nb_ab[1]; i++) {
1368  iir->ab[1][i] *= iir->g;
1369  }
1370 
1371  normalize_coeffs(ctx, ch);
1372  }
1373 
1374  switch (inlink->format) {
1375  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 2 ? iir_ch_parallel_dblp : s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1376  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 2 ? iir_ch_parallel_fltp : s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1377  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s32p : s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1378  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s16p : s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1379  }
1380 
1381  if (s->format == -2) {
1382  switch (inlink->format) {
1383  case AV_SAMPLE_FMT_DBLP: s->iir_channel = iir_ch_lattice_dblp; break;
1384  case AV_SAMPLE_FMT_FLTP: s->iir_channel = iir_ch_lattice_fltp; break;
1385  case AV_SAMPLE_FMT_S32P: s->iir_channel = iir_ch_lattice_s32p; break;
1386  case AV_SAMPLE_FMT_S16P: s->iir_channel = iir_ch_lattice_s16p; break;
1387  }
1388  }
1389 
1390  return 0;
1391 }
1392 
1394 {
1395  AVFilterContext *ctx = inlink->dst;
1396  AudioIIRContext *s = ctx->priv;
1397  AVFilterLink *outlink = ctx->outputs[0];
1398  ThreadData td;
1399  AVFrame *out;
1400  int ch, ret;
1401 
1402  if (av_frame_is_writable(in) && s->process != 2) {
1403  out = in;
1404  } else {
1405  out = ff_get_audio_buffer(outlink, in->nb_samples);
1406  if (!out) {
1407  av_frame_free(&in);
1408  return AVERROR(ENOMEM);
1409  }
1411  }
1412 
1413  td.in = in;
1414  td.out = out;
1415  ctx->internal->execute(ctx, s->iir_channel, &td, NULL, outlink->channels);
1416 
1417  for (ch = 0; ch < outlink->channels; ch++) {
1418  if (s->iir[ch].clippings > 0)
1419  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1420  ch, s->iir[ch].clippings);
1421  s->iir[ch].clippings = 0;
1422  }
1423 
1424  if (in != out)
1425  av_frame_free(&in);
1426 
1427  if (s->response) {
1428  AVFilterLink *outlink = ctx->outputs[1];
1429  int64_t old_pts = s->video->pts;
1430  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1431 
1432  if (new_pts > old_pts) {
1433  AVFrame *clone;
1434 
1435  s->video->pts = new_pts;
1436  clone = av_frame_clone(s->video);
1437  if (!clone)
1438  return AVERROR(ENOMEM);
1439  ret = ff_filter_frame(outlink, clone);
1440  if (ret < 0)
1441  return ret;
1442  }
1443  }
1444 
1445  return ff_filter_frame(outlink, out);
1446 }
1447 
1448 static int config_video(AVFilterLink *outlink)
1449 {
1450  AVFilterContext *ctx = outlink->src;
1451  AudioIIRContext *s = ctx->priv;
1452 
1453  outlink->sample_aspect_ratio = (AVRational){1,1};
1454  outlink->w = s->w;
1455  outlink->h = s->h;
1456  outlink->frame_rate = s->rate;
1457  outlink->time_base = av_inv_q(outlink->frame_rate);
1458 
1459  return 0;
1460 }
1461 
1463 {
1464  AudioIIRContext *s = ctx->priv;
1465  AVFilterPad pad, vpad;
1466  int ret;
1467 
1468  if (!s->a_str || !s->b_str || !s->g_str) {
1469  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1470  return AVERROR(EINVAL);
1471  }
1472 
1473  switch (s->precision) {
1474  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1475  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1476  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1477  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1478  default: return AVERROR_BUG;
1479  }
1480 
1481  pad = (AVFilterPad){
1482  .name = "default",
1483  .type = AVMEDIA_TYPE_AUDIO,
1484  .config_props = config_output,
1485  };
1486 
1487  ret = ff_insert_outpad(ctx, 0, &pad);
1488  if (ret < 0)
1489  return ret;
1490 
1491  if (s->response) {
1492  vpad = (AVFilterPad){
1493  .name = "filter_response",
1494  .type = AVMEDIA_TYPE_VIDEO,
1495  .config_props = config_video,
1496  };
1497 
1498  ret = ff_insert_outpad(ctx, 1, &vpad);
1499  if (ret < 0)
1500  return ret;
1501  }
1502 
1503  return 0;
1504 }
1505 
1507 {
1508  AudioIIRContext *s = ctx->priv;
1509  int ch;
1510 
1511  if (s->iir) {
1512  for (ch = 0; ch < s->channels; ch++) {
1513  IIRChannel *iir = &s->iir[ch];
1514  av_freep(&iir->ab[0]);
1515  av_freep(&iir->ab[1]);
1516  av_freep(&iir->cache[0]);
1517  av_freep(&iir->cache[1]);
1518  av_freep(&iir->biquads);
1519  }
1520  }
1521  av_freep(&s->iir);
1522 
1523  av_frame_free(&s->video);
1524 }
1525 
1526 static const AVFilterPad inputs[] = {
1527  {
1528  .name = "default",
1529  .type = AVMEDIA_TYPE_AUDIO,
1530  .filter_frame = filter_frame,
1531  },
1532  { NULL }
1533 };
1534 
1535 #define OFFSET(x) offsetof(AudioIIRContext, x)
1536 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1537 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1538 
1539 static const AVOption aiir_options[] = {
1540  { "zeros", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1541  { "z", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1542  { "poles", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1543  { "p", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1544  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1545  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1546  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1547  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1548  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, "format" },
1549  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, "format" },
1550  { "ll", "lattice-ladder function", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, AF, "format" },
1551  { "sf", "analog transfer function", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "format" },
1552  { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "format" },
1553  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "format" },
1554  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "format" },
1555  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "format" },
1556  { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, "format" },
1557  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
1558  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
1559  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "process" },
1560  { "s", "serial", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "process" },
1561  { "p", "parallel", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "process" },
1562  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1563  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1564  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "precision" },
1565  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "precision" },
1566  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "precision" },
1567  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "precision" },
1568  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1569  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1570  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1571  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1572  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1573  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1574  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1575  { NULL },
1576 };
1577 
1578 AVFILTER_DEFINE_CLASS(aiir);
1579 
1581  .name = "aiir",
1582  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1583  .priv_size = sizeof(AudioIIRContext),
1584  .priv_class = &aiir_class,
1585  .init = init,
1586  .uninit = uninit,
1588  .inputs = inputs,
1591 };
coef_sf2zf
static double coef_sf2zf(double *a, int N, int n)
Definition: af_aiir.c:959
Pair
Definition: af_aiir.c:36
M
#define M(a, b)
Definition: vp3dsp.c:45
AudioIIRContext::format
int format
Definition: af_aiir.c:62
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:86
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:156
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
W
@ W
Definition: vf_addroi.c:26
av_clip
#define av_clip
Definition: common.h:122
mix
static int mix(int c0, int c1)
Definition: 4xm.c:715
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:555
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aiir)
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:583
IIRChannel::clippings
int clippings
Definition: af_aiir.c:53
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:238
AF
#define AF
Definition: af_aiir.c:1536
inputs
static const AVFilterPad inputs[]
Definition: af_aiir.c:1526
IIRChannel::nb_ab
int nb_ab[2]
Definition: af_aiir.c:47
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
BiquadContext::a
double a[3]
Definition: af_aiir.c:41
aiir_options
static const AVOption aiir_options[]
Definition: af_aiir.c:1539
convert_serial2parallel
static int convert_serial2parallel(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:828
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:436
RE
#define RE(x, ch)
read_channels
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:458
im
float im
Definition: fft.c:82
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AudioIIRContext::ir_channel
int ir_channel
Definition: af_aiir.c:67
IIRChannel::biquads
BiquadContext * biquads
Definition: af_aiir.c:52
w
uint8_t w
Definition: llviddspenc.c:39
AVOption
AVOption.
Definition: opt.h:248
b
#define b
Definition: input.c:41
IIR_CH
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:123
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
read_tf_coefficients
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:406
AudioIIRContext::iir_channel
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:76
check_stability
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:1033
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:1270
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AudioIIRContext::b_str
char * b_str
Definition: af_aiir.c:58
solve
static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
Definition: af_aiir.c:794
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:502
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1448
AudioIIRContext::video
AVFrame * video
Definition: af_aiir.c:70
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
sample_rate
sample_rate
Definition: ffmpeg_filter.c:170
AudioIIRContext::g_str
char * g_str
Definition: af_aiir.c:58
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
BiquadContext
Definition: af_aiir.c:40
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:1665
fail
#define fail()
Definition: checkasm.h:133
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_aiir.c:79
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AudioIIRContext::h
int h
Definition: af_aiir.c:66
a1
#define a1
Definition: regdef.h:47
AudioIIRContext::process
int process
Definition: af_aiir.c:63
avassert.h
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1393
convert_zp2tf
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:566
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:466
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
IIRChannel::cache
double * cache[2]
Definition: af_aiir.c:50
NAN
#define NAN
Definition: mathematics.h:64
SERIAL_IIR_CH
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:179
int32_t
int32_t
Definition: audio_convert.c:194
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:962
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
biquad
@ biquad
Definition: af_biquads.c:73
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:235
AudioIIRContext::rate
AVRational rate
Definition: af_aiir.c:68
normalize_coeffs
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:538
exp
int8_t exp
Definition: eval.c:72
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1462
convert_sp2zp
static void convert_sp2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:925
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
BiquadContext::w1
double w1
Definition: af_aiir.c:43
BiquadContext::w2
double w2
Definition: af_aiir.c:43
format
static const char *const format[]
Definition: af_aiir.c:456
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:1075
fmin
double fmin(double, double)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
read_zp_coefficients
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:431
IM
#define IM(x, ch)
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1666
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
xga_font_data.h
N
#define N
Definition: af_mcompand.c:54
fact
static double fact(double i)
Definition: af_aiir.c:952
IIRChannel::g
double g
Definition: af_aiir.c:49
Pair::b
int b
Definition: af_aiir.c:37
ff_af_aiir
AVFilter ff_af_aiir
Definition: af_aiir.c:1580
M_PI
#define M_PI
Definition: mathematics.h:52
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
internal.h
AudioIIRContext::a_str
char * a_str
Definition: af_aiir.c:58
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
normalize
Definition: normalize.py:1
i
int i
Definition: input.c:407
VF
#define VF
Definition: af_aiir.c:1537
AudioIIRContext::sample_format
enum AVSampleFormat sample_format
Definition: af_aiir.c:74
OFFSET
#define OFFSET(x)
Definition: af_aiir.c:1535
IIRChannel::ab
double * ab[2]
Definition: af_aiir.c:48
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
a2
#define a2
Definition: regdef.h:48
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
uint8_t
uint8_t
Definition: audio_convert.c:194
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AudioIIRContext::precision
int precision
Definition: af_aiir.c:64
convert_sf2tf
static void convert_sf2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:978
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AudioIIRContext::wet_gain
double wet_gain
Definition: af_aiir.c:59
get_response
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *magnitude, double *phase)
Definition: af_aiir.c:1106
AVFilter
Filter definition.
Definition: avfilter.h:145
cmul
static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
Definition: af_aiir.c:505
PARALLEL_IIR_CH
#define PARALLEL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:238
ret
ret
Definition: filter_design.txt:187
AudioIIRContext::dry_gain
double dry_gain
Definition: af_aiir.c:59
Pair::a
int a
Definition: af_aiir.c:37
fmax
double fmax(double, double)
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:421
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
convert_pr2zp
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:898
BiquadContext::b
double b[3]
Definition: af_aiir.c:42
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:1153
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_insert_outpad
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:248
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
AudioIIRContext::response
int response
Definition: af_aiir.c:65
distance
static double distance(double x0, double x1, double y0, double y1)
Definition: af_aiir.c:1101
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
temp
else temp
Definition: vf_mcdeint.c:259
AudioIIRContext::channels
int channels
Definition: af_aiir.c:73
decompose_zp2biquads
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:614
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
factor
static const int factor[16]
Definition: vf_pp7.c:77
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
LATTICE_IIR_CH
#define LATTICE_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:302
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
count_coefficients
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:359
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
ThreadData::in
AVFrame * in
Definition: af_adenorm.c:223
biquad_process
static void biquad_process(double *x, double *y, int length, double b0, double b1, double b2, double a1, double a2)
Definition: af_aiir.c:776
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:511
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AudioIIRContext
Definition: af_aiir.c:56
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AudioIIRContext::iir
IIRChannel * iir
Definition: af_aiir.c:72
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:575
b0
static double b0(void *priv, double x, double y)
Definition: vf_xfade.c:1664
IIRChannel::fir
double fir
Definition: af_aiir.c:51
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
AudioIIRContext::normalize
int normalize
Definition: af_aiir.c:61
int
int
Definition: ffmpeg_filter.c:170
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1506
snprintf
#define snprintf
Definition: snprintf.h:34
read_gains
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:373
IIRChannel
Definition: af_aiir.c:46
AudioIIRContext::mix
double mix
Definition: af_aiir.c:60
convert_pd2zp
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:1006
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:1052
channel
channel
Definition: ebur128.h:39
re
float re
Definition: fft.c:82
ff_set_common_channel_layouts
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *channel_layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates.
Definition: formats.c:568
min
float min
Definition: vorbis_enc_data.h:456
AudioIIRContext::w
int w
Definition: af_aiir.c:66