FFmpeg
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "audio.h"
30 
31 #define FILTER_ORDER 4
32 
33 enum FilterType {
38 };
39 
40 typedef struct FoSection {
41  double a0, a1, a2, a3, a4;
42  double b0, b1, b2, b3, b4;
43 
44  double num[4];
45  double denum[4];
46 } FoSection;
47 
48 typedef struct EqualizatorFilter {
49  int ignore;
50  int channel;
51  int type;
52 
53  double freq;
54  double gain;
55  double width;
56 
59 
60 typedef struct AudioNEqualizerContext {
61  const AVClass *class;
62  char *args;
63  char *colors;
65  int w, h;
66 
67  double mag;
68  int fscale;
74 
75 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
76 #define A AV_OPT_FLAG_AUDIO_PARAM
77 #define V AV_OPT_FLAG_VIDEO_PARAM
78 #define F AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption anequalizer_options[] = {
81  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
82  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
83  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
84  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
85  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
86  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
87  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
88  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
89  { NULL }
90 };
91 
92 AVFILTER_DEFINE_CLASS(anequalizer);
93 
95 {
97  char *colors, *color, *saveptr = NULL;
98  int ch, i, n;
99 
100  colors = av_strdup(s->colors);
101  if (!colors)
102  return;
103 
104  memset(out->data[0], 0, s->h * out->linesize[0]);
105 
106  for (ch = 0; ch < inlink->channels; ch++) {
107  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
108  int prev_v = -1;
109  double f;
110 
111  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
112  if (color)
113  av_parse_color(fg, color, -1, ctx);
114 
115  for (f = 0; f < s->w; f++) {
116  double zr, zi, zr2, zi2;
117  double Hr, Hi;
118  double Hmag = 1;
119  double w;
120  int v, y, x;
121 
122  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
123  zr = cos(w);
124  zr2 = zr * zr;
125  zi = -sin(w);
126  zi2 = zi * zi;
127 
128  for (n = 0; n < s->nb_filters; n++) {
129  if (s->filters[n].channel != ch ||
130  s->filters[n].ignore)
131  continue;
132 
133  for (i = 0; i < FILTER_ORDER / 2; i++) {
134  FoSection *S = &s->filters[n].section[i];
135 
136  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
137  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
138 
139  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
140  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
141  Hmag *= hypot(Hr, Hi);
142  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
143  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
144  Hmag /= hypot(Hr, Hi);
145  }
146  }
147 
148  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
149  x = lrint(f);
150  if (prev_v == -1)
151  prev_v = v;
152  if (v <= prev_v) {
153  for (y = v; y <= prev_v; y++)
154  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
155  } else {
156  for (y = prev_v; y <= v; y++)
157  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
158  }
159 
160  prev_v = v;
161  }
162  }
163 
164  av_free(colors);
165 }
166 
167 static int config_video(AVFilterLink *outlink)
168 {
169  AVFilterContext *ctx = outlink->src;
170  AudioNEqualizerContext *s = ctx->priv;
171  AVFilterLink *inlink = ctx->inputs[0];
172  AVFrame *out;
173 
174  outlink->w = s->w;
175  outlink->h = s->h;
176 
177  av_frame_free(&s->video);
178  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
179  if (!out)
180  return AVERROR(ENOMEM);
181  outlink->sample_aspect_ratio = (AVRational){1,1};
182 
183  draw_curves(ctx, inlink, out);
184 
185  return 0;
186 }
187 
189 {
190  AudioNEqualizerContext *s = ctx->priv;
191  AVFilterPad pad, vpad;
192  int ret;
193 
194  pad = (AVFilterPad){
195  .name = "out0",
196  .type = AVMEDIA_TYPE_AUDIO,
197  };
198 
199  ret = ff_insert_outpad(ctx, 0, &pad);
200  if (ret < 0)
201  return ret;
202 
203  if (s->draw_curves) {
204  vpad = (AVFilterPad){
205  .name = "out1",
206  .type = AVMEDIA_TYPE_VIDEO,
207  .config_props = config_video,
208  };
209  ret = ff_insert_outpad(ctx, 1, &vpad);
210  if (ret < 0)
211  return ret;
212  }
213 
214  return 0;
215 }
216 
218 {
219  AVFilterLink *inlink = ctx->inputs[0];
220  AVFilterLink *outlink = ctx->outputs[0];
221  AudioNEqualizerContext *s = ctx->priv;
224  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
225  static const enum AVSampleFormat sample_fmts[] = {
228  };
229  int ret;
230 
231  if (s->draw_curves) {
232  AVFilterLink *videolink = ctx->outputs[1];
233  formats = ff_make_format_list(pix_fmts);
234  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
235  return ret;
236  }
237 
238  formats = ff_make_format_list(sample_fmts);
239  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0 ||
240  (ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
241  return ret;
242 
243  layouts = ff_all_channel_counts();
244  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0 ||
245  (ret = ff_channel_layouts_ref(layouts, &outlink->incfg.channel_layouts)) < 0)
246  return ret;
247 
248  formats = ff_all_samplerates();
249  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
250  (ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
251  return ret;
252 
253  return 0;
254 }
255 
257 {
258  AudioNEqualizerContext *s = ctx->priv;
259 
260  av_frame_free(&s->video);
261  av_freep(&s->filters);
262  s->nb_filters = 0;
263  s->nb_allocated = 0;
264 }
265 
266 static void butterworth_fo_section(FoSection *S, double beta,
267  double si, double g, double g0,
268  double D, double c0)
269 {
270  if (c0 == 1 || c0 == -1) {
271  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
272  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
273  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
274  S->b3 = 0;
275  S->b4 = 0;
276 
277  S->a0 = 1;
278  S->a1 = 2*c0*(beta*beta - 1)/D;
279  S->a2 = (beta*beta - 2*beta*si + 1)/D;
280  S->a3 = 0;
281  S->a4 = 0;
282  } else {
283  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
284  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
285  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
286  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
287  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
288 
289  S->a0 = 1;
290  S->a1 = -4*c0*(1 + si*beta)/D;
291  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
292  S->a3 = -4*c0*(1 - si*beta)/D;
293  S->a4 = (beta*beta - 2*si*beta + 1)/D;
294  }
295 }
296 
298  int N, double w0, double wb,
299  double G, double Gb, double G0)
300 {
301  double g, c0, g0, beta;
302  double epsilon;
303  int r = N % 2;
304  int L = (N - r) / 2;
305  int i;
306 
307  if (G == 0 && G0 == 0) {
308  f->section[0].a0 = 1;
309  f->section[0].b0 = 1;
310  f->section[1].a0 = 1;
311  f->section[1].b0 = 1;
312  return;
313  }
314 
315  G = ff_exp10(G/20);
316  Gb = ff_exp10(Gb/20);
317  G0 = ff_exp10(G0/20);
318 
319  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
320  g = pow(G, 1.0 / N);
321  g0 = pow(G0, 1.0 / N);
322  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
323  c0 = cos(w0);
324 
325  for (i = 1; i <= L; i++) {
326  double ui = (2.0 * i - 1) / N;
327  double si = sin(M_PI * ui / 2.0);
328  double Di = beta * beta + 2 * si * beta + 1;
329 
330  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
331  }
332 }
333 
334 static void chebyshev1_fo_section(FoSection *S, double a,
335  double c, double tetta_b,
336  double g0, double si, double b,
337  double D, double c0)
338 {
339  if (c0 == 1 || c0 == -1) {
340  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
341  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
342  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
343  S->b3 = 0;
344  S->b4 = 0;
345 
346  S->a0 = 1;
347  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
348  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
349  S->a3 = 0;
350  S->a4 = 0;
351  } else {
352  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
353  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
354  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
355  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
356  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
357 
358  S->a0 = 1;
359  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
360  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
361  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
362  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
363  }
364 }
365 
367  int N, double w0, double wb,
368  double G, double Gb, double G0)
369 {
370  double a, b, c0, g0, alfa, beta, tetta_b;
371  double epsilon;
372  int r = N % 2;
373  int L = (N - r) / 2;
374  int i;
375 
376  if (G == 0 && G0 == 0) {
377  f->section[0].a0 = 1;
378  f->section[0].b0 = 1;
379  f->section[1].a0 = 1;
380  f->section[1].b0 = 1;
381  return;
382  }
383 
384  G = ff_exp10(G/20);
385  Gb = ff_exp10(Gb/20);
386  G0 = ff_exp10(G0/20);
387 
388  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
389  g0 = pow(G0,1.0/N);
390  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
391  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
392  a = 0.5 * (alfa - 1.0/alfa);
393  b = 0.5 * (beta - g0*g0*(1/beta));
394  tetta_b = tan(wb/2);
395  c0 = cos(w0);
396 
397  for (i = 1; i <= L; i++) {
398  double ui = (2.0*i-1.0)/N;
399  double ci = cos(M_PI*ui/2.0);
400  double si = sin(M_PI*ui/2.0);
401  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
402 
403  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
404  }
405 }
406 
407 static void chebyshev2_fo_section(FoSection *S, double a,
408  double c, double tetta_b,
409  double g, double si, double b,
410  double D, double c0)
411 {
412  if (c0 == 1 || c0 == -1) {
413  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
414  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
415  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
416  S->b3 = 0;
417  S->b4 = 0;
418 
419  S->a0 = 1;
420  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
421  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
422  S->a3 = 0;
423  S->a4 = 0;
424  } else {
425  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
426  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
427  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
428  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
429  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
430 
431  S->a0 = 1;
432  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
433  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
434  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
435  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
436  }
437 }
438 
440  int N, double w0, double wb,
441  double G, double Gb, double G0)
442 {
443  double a, b, c0, tetta_b;
444  double epsilon, g, eu, ew;
445  int r = N % 2;
446  int L = (N - r) / 2;
447  int i;
448 
449  if (G == 0 && G0 == 0) {
450  f->section[0].a0 = 1;
451  f->section[0].b0 = 1;
452  f->section[1].a0 = 1;
453  f->section[1].b0 = 1;
454  return;
455  }
456 
457  G = ff_exp10(G/20);
458  Gb = ff_exp10(Gb/20);
459  G0 = ff_exp10(G0/20);
460 
461  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
462  g = pow(G, 1.0 / N);
463  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
464  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
465  a = (eu - 1.0/eu)/2.0;
466  b = (ew - g*g/ew)/2.0;
467  tetta_b = tan(wb/2);
468  c0 = cos(w0);
469 
470  for (i = 1; i <= L; i++) {
471  double ui = (2.0 * i - 1.0)/N;
472  double ci = cos(M_PI * ui / 2.0);
473  double si = sin(M_PI * ui / 2.0);
474  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
475 
476  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
477  }
478 }
479 
480 static double butterworth_compute_bw_gain_db(double gain)
481 {
482  double bw_gain = 0;
483 
484  if (gain <= -6)
485  bw_gain = gain + 3;
486  else if(gain > -6 && gain < 6)
487  bw_gain = gain * 0.5;
488  else if(gain >= 6)
489  bw_gain = gain - 3;
490 
491  return bw_gain;
492 }
493 
494 static double chebyshev1_compute_bw_gain_db(double gain)
495 {
496  double bw_gain = 0;
497 
498  if (gain <= -6)
499  bw_gain = gain + 1;
500  else if(gain > -6 && gain < 6)
501  bw_gain = gain * 0.9;
502  else if(gain >= 6)
503  bw_gain = gain - 1;
504 
505  return bw_gain;
506 }
507 
508 static double chebyshev2_compute_bw_gain_db(double gain)
509 {
510  double bw_gain = 0;
511 
512  if (gain <= -6)
513  bw_gain = -3;
514  else if(gain > -6 && gain < 6)
515  bw_gain = gain * 0.3;
516  else if(gain >= 6)
517  bw_gain = 3;
518 
519  return bw_gain;
520 }
521 
522 static inline double hz_2_rad(double x, double fs)
523 {
524  return 2 * M_PI * x / fs;
525 }
526 
528 {
529  double w0 = hz_2_rad(f->freq, sample_rate);
530  double wb = hz_2_rad(f->width, sample_rate);
531  double bw_gain;
532 
533  switch (f->type) {
534  case BUTTERWORTH:
535  bw_gain = butterworth_compute_bw_gain_db(f->gain);
536  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
537  break;
538  case CHEBYSHEV1:
539  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
540  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
541  break;
542  case CHEBYSHEV2:
543  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
544  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
545  break;
546  }
547 
548 }
549 
551 {
552  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
553  if (s->nb_filters >= s->nb_allocated - 1) {
555 
556  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
557  if (!filters)
558  return AVERROR(ENOMEM);
559  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
560  av_free(s->filters);
561  s->filters = filters;
562  s->nb_allocated *= 2;
563  }
564  s->nb_filters++;
565 
566  return 0;
567 }
568 
570 {
571  AVFilterContext *ctx = inlink->dst;
572  AudioNEqualizerContext *s = ctx->priv;
573  char *args = av_strdup(s->args);
574  char *saveptr = NULL;
575  int ret = 0;
576 
577  if (!args)
578  return AVERROR(ENOMEM);
579 
580  s->nb_allocated = 32 * inlink->channels;
581  s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
582  if (!s->filters) {
583  s->nb_allocated = 0;
584  av_free(args);
585  return AVERROR(ENOMEM);
586  }
587 
588  while (1) {
589  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
590 
591  if (!arg)
592  break;
593 
594  s->filters[s->nb_filters].type = 0;
595  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
596  &s->filters[s->nb_filters].freq,
597  &s->filters[s->nb_filters].width,
598  &s->filters[s->nb_filters].gain,
599  &s->filters[s->nb_filters].type) != 5 &&
600  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
601  &s->filters[s->nb_filters].freq,
602  &s->filters[s->nb_filters].width,
603  &s->filters[s->nb_filters].gain) != 4 ) {
604  av_free(args);
605  return AVERROR(EINVAL);
606  }
607 
608  if (s->filters[s->nb_filters].freq < 0 ||
609  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
610  s->filters[s->nb_filters].ignore = 1;
611 
612  if (s->filters[s->nb_filters].channel < 0 ||
613  s->filters[s->nb_filters].channel >= inlink->channels)
614  s->filters[s->nb_filters].ignore = 1;
615 
616  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
617  ret = add_filter(s, inlink);
618  if (ret < 0)
619  break;
620  }
621 
622  av_free(args);
623 
624  return ret;
625 }
626 
627 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
628  char *res, int res_len, int flags)
629 {
630  AudioNEqualizerContext *s = ctx->priv;
631  AVFilterLink *inlink = ctx->inputs[0];
632  int ret = AVERROR(ENOSYS);
633 
634  if (!strcmp(cmd, "change")) {
635  double freq, width, gain;
636  int filter;
637 
638  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
639  return AVERROR(EINVAL);
640 
641  if (filter < 0 || filter >= s->nb_filters)
642  return AVERROR(EINVAL);
643 
644  if (freq < 0 || freq > inlink->sample_rate / 2.0)
645  return AVERROR(EINVAL);
646 
647  s->filters[filter].freq = freq;
648  s->filters[filter].width = width;
649  s->filters[filter].gain = gain;
650  equalizer(&s->filters[filter], inlink->sample_rate);
651  if (s->draw_curves)
652  draw_curves(ctx, inlink, s->video);
653 
654  ret = 0;
655  }
656 
657  return ret;
658 }
659 
660 static inline double section_process(FoSection *S, double in)
661 {
662  double out;
663 
664  out = S->b0 * in;
665  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
666  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
667  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
668  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
669 
670  S->num[3] = S->num[2];
671  S->num[2] = S->num[1];
672  S->num[1] = S->num[0];
673  S->num[0] = in;
674 
675  S->denum[3] = S->denum[2];
676  S->denum[2] = S->denum[1];
677  S->denum[1] = S->denum[0];
678  S->denum[0] = out;
679 
680  return out;
681 }
682 
683 static double process_sample(FoSection *s1, double in)
684 {
685  double p0 = in, p1;
686  int i;
687 
688  for (i = 0; i < FILTER_ORDER / 2; i++) {
689  p1 = section_process(&s1[i], p0);
690  p0 = p1;
691  }
692 
693  return p1;
694 }
695 
697 {
698  AVFilterContext *ctx = inlink->dst;
699  AudioNEqualizerContext *s = ctx->priv;
700  AVFilterLink *outlink = ctx->outputs[0];
701  double *bptr;
702  int i, n;
703 
704  for (i = 0; i < s->nb_filters; i++) {
705  EqualizatorFilter *f = &s->filters[i];
706 
707  if (f->gain == 0. || f->ignore)
708  continue;
709 
710  bptr = (double *)buf->extended_data[f->channel];
711  for (n = 0; n < buf->nb_samples; n++) {
712  double sample = bptr[n];
713 
714  sample = process_sample(f->section, sample);
715  bptr[n] = sample;
716  }
717  }
718 
719  if (s->draw_curves) {
720  AVFrame *clone;
721 
722  const int64_t pts = buf->pts +
723  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
724  outlink->time_base);
725  int ret;
726 
727  s->video->pts = pts;
728  clone = av_frame_clone(s->video);
729  if (!clone)
730  return AVERROR(ENOMEM);
731  ret = ff_filter_frame(ctx->outputs[1], clone);
732  if (ret < 0)
733  return ret;
734  }
735 
736  return ff_filter_frame(outlink, buf);
737 }
738 
739 static const AVFilterPad inputs[] = {
740  {
741  .name = "default",
742  .type = AVMEDIA_TYPE_AUDIO,
743  .config_props = config_input,
744  .filter_frame = filter_frame,
745  .needs_writable = 1,
746  },
747  { NULL }
748 };
749 
751  .name = "anequalizer",
752  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
753  .priv_size = sizeof(AudioNEqualizerContext),
754  .priv_class = &anequalizer_class,
755  .init = init,
756  .uninit = uninit,
758  .inputs = inputs,
759  .outputs = NULL,
762 };
#define NULL
Definition: coverity.c:32
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVOption.
Definition: opt.h:248
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Main libavfilter public API header.
const char * g
Definition: vf_curves.c:115
static double butterworth_compute_bw_gain_db(double gain)
static int config_video(AVFilterLink *outlink)
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
double, planar
Definition: samplefmt.h:70
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
FoSection section[2]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVFILTER_DEFINE_CLASS(anequalizer)
#define sample
#define N
Definition: af_mcompand.c:54
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
FilterType
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:462
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
uint8_t
#define av_cold
Definition: attributes.h:88
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
AVOptions.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
static const AVOption anequalizer_options[]
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
static double hz_2_rad(double x, double fs)
#define FILTER_ORDER
#define A
static av_cold void uninit(AVFilterContext *ctx)
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
A filter pad used for either input or output.
Definition: internal.h:54
#define F
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
static int config_input(AVFilterLink *inlink)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
#define S(s, c, i)
static void equalizer(EqualizatorFilter *f, double sample_rate)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const char * r
Definition: vf_curves.c:114
void * priv
private data for use by the filter
Definition: avfilter.h:356
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
const char * arg
Definition: jacosubdec.c:66
#define V
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
static int query_formats(AVFilterContext *ctx)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
static av_const double hypot(double x, double y)
Definition: libm.h:366
#define b
Definition: input.c:41
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:455
#define width
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
static const AVFilterPad inputs[]
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:450
#define L(x)
Definition: vp56_arith.h:36
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:553
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static double process_sample(FoSection *s1, double in)
A list of supported channel layouts.
Definition: formats.h:86
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
sample_rate
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static double chebyshev2_compute_bw_gain_db(double gain)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
AVFilter ff_af_anequalizer
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const char * name
Filter name.
Definition: avfilter.h:149
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
#define s1
Definition: regdef.h:38
offset must point to two consecutive integers
Definition: opt.h:235
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:422
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:560
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:186
static double section_process(FoSection *S, double in)
double denum[4]
internal math functions header
static double chebyshev1_compute_bw_gain_db(double gain)
#define G
Definition: huffyuvdsp.h:33
D(D(float, sse)
Definition: rematrix_init.c:28
#define ui(width, name)
Definition: cbs_mpeg2.c:43
static av_cold int init(AVFilterContext *ctx)
#define OFFSET(x)
#define av_free(p)
static const struct PPFilter filters[]
Definition: postprocess.c:134
A list of supported formats for one end of a filter link.
Definition: formats.h:65
#define lrint
Definition: tablegen.h:53
An instance of a filter.
Definition: avfilter.h:341
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:884
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
formats
Definition: signature.h:48
EqualizatorFilter * filters
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:437
double num[4]
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:248
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define AV_WL32(p, v)
Definition: intreadwrite.h:426