FFmpeg
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/parseutils.h"
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "internal.h"
31 #include "audio.h"
32 #include "video.h"
33 
34 #define FILTER_ORDER 4
35 
36 enum FilterType {
41 };
42 
43 typedef struct FoSection {
44  double a0, a1, a2, a3, a4;
45  double b0, b1, b2, b3, b4;
46 
47  double num[4];
48  double denum[4];
49 } FoSection;
50 
51 typedef struct EqualizatorFilter {
52  int ignore;
53  int channel;
54  int type;
55 
56  double freq;
57  double gain;
58  double width;
59 
62 
63 typedef struct AudioNEqualizerContext {
64  const AVClass *class;
65  char *args;
66  char *colors;
68  int w, h;
69 
70  double mag;
71  int fscale;
77 
78 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
79 #define A AV_OPT_FLAG_AUDIO_PARAM
80 #define V AV_OPT_FLAG_VIDEO_PARAM
81 #define F AV_OPT_FLAG_FILTERING_PARAM
82 
83 static const AVOption anequalizer_options[] = {
84  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
85  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
86  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
87  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
88  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, .unit = "fscale" },
89  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, .unit = "fscale" },
90  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, .unit = "fscale" },
91  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
92  { NULL }
93 };
94 
95 AVFILTER_DEFINE_CLASS(anequalizer);
96 
98 {
99  AudioNEqualizerContext *s = ctx->priv;
100  char *colors, *color, *saveptr = NULL;
101  int ch, i, n;
102 
103  colors = av_strdup(s->colors);
104  if (!colors)
105  return;
106 
107  memset(out->data[0], 0, s->h * out->linesize[0]);
108 
109  for (ch = 0; ch < inlink->ch_layout.nb_channels; ch++) {
110  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
111  int prev_v = -1;
112  double f;
113 
114  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
115  if (color)
116  av_parse_color(fg, color, -1, ctx);
117 
118  for (f = 0; f < s->w; f++) {
119  double zr, zi, zr2, zi2;
120  double Hr, Hi;
121  double Hmag = 1;
122  double w;
123  int v, y, x;
124 
125  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
126  zr = cos(w);
127  zr2 = zr * zr;
128  zi = -sin(w);
129  zi2 = zi * zi;
130 
131  for (n = 0; n < s->nb_filters; n++) {
132  if (s->filters[n].channel != ch ||
133  s->filters[n].ignore)
134  continue;
135 
136  for (i = 0; i < FILTER_ORDER / 2; i++) {
137  FoSection *S = &s->filters[n].section[i];
138 
139  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
140  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
141 
142  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
143  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
144  Hmag *= hypot(Hr, Hi);
145  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
146  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
147  Hmag /= hypot(Hr, Hi);
148  }
149  }
150 
151  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
152  x = lrint(f);
153  if (prev_v == -1)
154  prev_v = v;
155  if (v <= prev_v) {
156  for (y = v; y <= prev_v; y++)
157  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
158  } else {
159  for (y = prev_v; y <= v; y++)
160  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
161  }
162 
163  prev_v = v;
164  }
165  }
166 
167  av_free(colors);
168 }
169 
170 static int config_video(AVFilterLink *outlink)
171 {
172  AVFilterContext *ctx = outlink->src;
173  AudioNEqualizerContext *s = ctx->priv;
174  AVFilterLink *inlink = ctx->inputs[0];
175  AVFrame *out;
176 
177  outlink->w = s->w;
178  outlink->h = s->h;
179 
180  av_frame_free(&s->video);
181  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
182  if (!out)
183  return AVERROR(ENOMEM);
184  outlink->sample_aspect_ratio = (AVRational){1,1};
185 
187 
188  return 0;
189 }
190 
192 {
193  AudioNEqualizerContext *s = ctx->priv;
194  AVFilterPad pad, vpad;
195  int ret;
196 
197  pad = (AVFilterPad){
198  .name = "out0",
199  .type = AVMEDIA_TYPE_AUDIO,
200  };
201 
202  ret = ff_append_outpad(ctx, &pad);
203  if (ret < 0)
204  return ret;
205 
206  if (s->draw_curves) {
207  vpad = (AVFilterPad){
208  .name = "out1",
209  .type = AVMEDIA_TYPE_VIDEO,
210  .config_props = config_video,
211  };
212  ret = ff_append_outpad(ctx, &vpad);
213  if (ret < 0)
214  return ret;
215  }
216 
217  return 0;
218 }
219 
221 {
222  AVFilterLink *inlink = ctx->inputs[0];
223  AVFilterLink *outlink = ctx->outputs[0];
224  AudioNEqualizerContext *s = ctx->priv;
227  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
228  static const enum AVSampleFormat sample_fmts[] = {
231  };
232  int ret;
233 
234  if (s->draw_curves) {
235  AVFilterLink *videolink = ctx->outputs[1];
237  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
238  return ret;
239  }
240 
242  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0 ||
243  (ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
244  return ret;
245 
247  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0 ||
249  return ret;
250 
252  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
253  (ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
254  return ret;
255 
256  return 0;
257 }
258 
260 {
261  AudioNEqualizerContext *s = ctx->priv;
262 
263  av_frame_free(&s->video);
264  av_freep(&s->filters);
265  s->nb_filters = 0;
266  s->nb_allocated = 0;
267 }
268 
269 static void butterworth_fo_section(FoSection *S, double beta,
270  double si, double g, double g0,
271  double D, double c0)
272 {
273  if (c0 == 1 || c0 == -1) {
274  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
275  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
276  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
277  S->b3 = 0;
278  S->b4 = 0;
279 
280  S->a0 = 1;
281  S->a1 = 2*c0*(beta*beta - 1)/D;
282  S->a2 = (beta*beta - 2*beta*si + 1)/D;
283  S->a3 = 0;
284  S->a4 = 0;
285  } else {
286  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
287  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
288  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
289  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
290  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
291 
292  S->a0 = 1;
293  S->a1 = -4*c0*(1 + si*beta)/D;
294  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
295  S->a3 = -4*c0*(1 - si*beta)/D;
296  S->a4 = (beta*beta - 2*si*beta + 1)/D;
297  }
298 }
299 
301  int N, double w0, double wb,
302  double G, double Gb, double G0)
303 {
304  double g, c0, g0, beta;
305  double epsilon;
306  int r = N % 2;
307  int L = (N - r) / 2;
308  int i;
309 
310  if (G == 0 && G0 == 0) {
311  f->section[0].a0 = 1;
312  f->section[0].b0 = 1;
313  f->section[1].a0 = 1;
314  f->section[1].b0 = 1;
315  return;
316  }
317 
318  G = ff_exp10(G/20);
319  Gb = ff_exp10(Gb/20);
320  G0 = ff_exp10(G0/20);
321 
322  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
323  g = pow(G, 1.0 / N);
324  g0 = pow(G0, 1.0 / N);
325  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
326  c0 = cos(w0);
327 
328  for (i = 1; i <= L; i++) {
329  double ui = (2.0 * i - 1) / N;
330  double si = sin(M_PI * ui / 2.0);
331  double Di = beta * beta + 2 * si * beta + 1;
332 
333  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
334  }
335 }
336 
337 static void chebyshev1_fo_section(FoSection *S, double a,
338  double c, double tetta_b,
339  double g0, double si, double b,
340  double D, double c0)
341 {
342  if (c0 == 1 || c0 == -1) {
343  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
344  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
345  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
346  S->b3 = 0;
347  S->b4 = 0;
348 
349  S->a0 = 1;
350  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
351  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
352  S->a3 = 0;
353  S->a4 = 0;
354  } else {
355  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
356  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
357  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
358  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
359  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
360 
361  S->a0 = 1;
362  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
363  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
364  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
365  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
366  }
367 }
368 
370  int N, double w0, double wb,
371  double G, double Gb, double G0)
372 {
373  double a, b, c0, g0, alfa, beta, tetta_b;
374  double epsilon;
375  int r = N % 2;
376  int L = (N - r) / 2;
377  int i;
378 
379  if (G == 0 && G0 == 0) {
380  f->section[0].a0 = 1;
381  f->section[0].b0 = 1;
382  f->section[1].a0 = 1;
383  f->section[1].b0 = 1;
384  return;
385  }
386 
387  G = ff_exp10(G/20);
388  Gb = ff_exp10(Gb/20);
389  G0 = ff_exp10(G0/20);
390 
391  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
392  g0 = pow(G0,1.0/N);
393  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
394  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
395  a = 0.5 * (alfa - 1.0/alfa);
396  b = 0.5 * (beta - g0*g0*(1/beta));
397  tetta_b = tan(wb/2);
398  c0 = cos(w0);
399 
400  for (i = 1; i <= L; i++) {
401  double ui = (2.0*i-1.0)/N;
402  double ci = cos(M_PI*ui/2.0);
403  double si = sin(M_PI*ui/2.0);
404  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
405 
406  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
407  }
408 }
409 
410 static void chebyshev2_fo_section(FoSection *S, double a,
411  double c, double tetta_b,
412  double g, double si, double b,
413  double D, double c0)
414 {
415  if (c0 == 1 || c0 == -1) {
416  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
417  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
418  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
419  S->b3 = 0;
420  S->b4 = 0;
421 
422  S->a0 = 1;
423  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
424  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
425  S->a3 = 0;
426  S->a4 = 0;
427  } else {
428  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
429  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
430  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
431  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
432  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
433 
434  S->a0 = 1;
435  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
436  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
437  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
438  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
439  }
440 }
441 
443  int N, double w0, double wb,
444  double G, double Gb, double G0)
445 {
446  double a, b, c0, tetta_b;
447  double epsilon, g, eu, ew;
448  int r = N % 2;
449  int L = (N - r) / 2;
450  int i;
451 
452  if (G == 0 && G0 == 0) {
453  f->section[0].a0 = 1;
454  f->section[0].b0 = 1;
455  f->section[1].a0 = 1;
456  f->section[1].b0 = 1;
457  return;
458  }
459 
460  G = ff_exp10(G/20);
461  Gb = ff_exp10(Gb/20);
462  G0 = ff_exp10(G0/20);
463 
464  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
465  g = pow(G, 1.0 / N);
466  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
467  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
468  a = (eu - 1.0/eu)/2.0;
469  b = (ew - g*g/ew)/2.0;
470  tetta_b = tan(wb/2);
471  c0 = cos(w0);
472 
473  for (i = 1; i <= L; i++) {
474  double ui = (2.0 * i - 1.0)/N;
475  double ci = cos(M_PI * ui / 2.0);
476  double si = sin(M_PI * ui / 2.0);
477  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
478 
479  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
480  }
481 }
482 
483 static double butterworth_compute_bw_gain_db(double gain)
484 {
485  double bw_gain = 0;
486 
487  if (gain <= -6)
488  bw_gain = gain + 3;
489  else if(gain > -6 && gain < 6)
490  bw_gain = gain * 0.5;
491  else if(gain >= 6)
492  bw_gain = gain - 3;
493 
494  return bw_gain;
495 }
496 
497 static double chebyshev1_compute_bw_gain_db(double gain)
498 {
499  double bw_gain = 0;
500 
501  if (gain <= -6)
502  bw_gain = gain + 1;
503  else if(gain > -6 && gain < 6)
504  bw_gain = gain * 0.9;
505  else if(gain >= 6)
506  bw_gain = gain - 1;
507 
508  return bw_gain;
509 }
510 
511 static double chebyshev2_compute_bw_gain_db(double gain)
512 {
513  double bw_gain = 0;
514 
515  if (gain <= -6)
516  bw_gain = -3;
517  else if(gain > -6 && gain < 6)
518  bw_gain = gain * 0.3;
519  else if(gain >= 6)
520  bw_gain = 3;
521 
522  return bw_gain;
523 }
524 
525 static inline double hz_2_rad(double x, double fs)
526 {
527  return 2 * M_PI * x / fs;
528 }
529 
531 {
532  double w0 = hz_2_rad(f->freq, sample_rate);
533  double wb = hz_2_rad(f->width, sample_rate);
534  double bw_gain;
535 
536  switch (f->type) {
537  case BUTTERWORTH:
538  bw_gain = butterworth_compute_bw_gain_db(f->gain);
539  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
540  break;
541  case CHEBYSHEV1:
542  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
543  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
544  break;
545  case CHEBYSHEV2:
546  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
547  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
548  break;
549  }
550 
551 }
552 
554 {
555  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
556  if (s->nb_filters >= s->nb_allocated - 1) {
558 
559  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
560  if (!filters)
561  return AVERROR(ENOMEM);
562  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
563  av_free(s->filters);
564  s->filters = filters;
565  s->nb_allocated *= 2;
566  }
567  s->nb_filters++;
568 
569  return 0;
570 }
571 
573 {
574  AVFilterContext *ctx = inlink->dst;
575  AudioNEqualizerContext *s = ctx->priv;
576  char *args = av_strdup(s->args);
577  char *saveptr = NULL;
578  int ret = 0;
579 
580  if (!args)
581  return AVERROR(ENOMEM);
582 
583  s->nb_allocated = 32 * inlink->ch_layout.nb_channels;
584  s->filters = av_calloc(inlink->ch_layout.nb_channels, 32 * sizeof(*s->filters));
585  if (!s->filters) {
586  s->nb_allocated = 0;
587  av_free(args);
588  return AVERROR(ENOMEM);
589  }
590 
591  while (1) {
592  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
593 
594  if (!arg)
595  break;
596 
597  s->filters[s->nb_filters].type = 0;
598  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
599  &s->filters[s->nb_filters].freq,
600  &s->filters[s->nb_filters].width,
601  &s->filters[s->nb_filters].gain,
602  &s->filters[s->nb_filters].type) != 5 &&
603  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
604  &s->filters[s->nb_filters].freq,
605  &s->filters[s->nb_filters].width,
606  &s->filters[s->nb_filters].gain) != 4 ) {
607  av_free(args);
608  return AVERROR(EINVAL);
609  }
610 
611  if (s->filters[s->nb_filters].freq < 0 ||
612  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
613  s->filters[s->nb_filters].ignore = 1;
614 
615  if (s->filters[s->nb_filters].channel < 0 ||
616  s->filters[s->nb_filters].channel >= inlink->ch_layout.nb_channels)
617  s->filters[s->nb_filters].ignore = 1;
618 
619  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
620  ret = add_filter(s, inlink);
621  if (ret < 0)
622  break;
623  }
624 
625  av_free(args);
626 
627  return ret;
628 }
629 
630 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
631  char *res, int res_len, int flags)
632 {
633  AudioNEqualizerContext *s = ctx->priv;
634  AVFilterLink *inlink = ctx->inputs[0];
635  int ret = AVERROR(ENOSYS);
636 
637  if (!strcmp(cmd, "change")) {
638  double freq, width, gain;
639  int filter;
640 
641  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
642  return AVERROR(EINVAL);
643 
644  if (filter < 0 || filter >= s->nb_filters)
645  return AVERROR(EINVAL);
646 
647  if (freq < 0 || freq > inlink->sample_rate / 2.0)
648  return AVERROR(EINVAL);
649 
650  s->filters[filter].freq = freq;
651  s->filters[filter].width = width;
652  s->filters[filter].gain = gain;
653  equalizer(&s->filters[filter], inlink->sample_rate);
654  if (s->draw_curves)
655  draw_curves(ctx, inlink, s->video);
656 
657  ret = 0;
658  }
659 
660  return ret;
661 }
662 
663 static inline double section_process(FoSection *S, double in)
664 {
665  double out;
666 
667  out = S->b0 * in;
668  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
669  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
670  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
671  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
672 
673  S->num[3] = S->num[2];
674  S->num[2] = S->num[1];
675  S->num[1] = S->num[0];
676  S->num[0] = in;
677 
678  S->denum[3] = S->denum[2];
679  S->denum[2] = S->denum[1];
680  S->denum[1] = S->denum[0];
681  S->denum[0] = out;
682 
683  return out;
684 }
685 
686 static double process_sample(FoSection *s1, double in)
687 {
688  double p0 = in, p1;
689  int i;
690 
691  for (i = 0; i < FILTER_ORDER / 2; i++) {
692  p1 = section_process(&s1[i], p0);
693  p0 = p1;
694  }
695 
696  return p1;
697 }
698 
700  int jobnr, int nb_jobs)
701 {
702  AudioNEqualizerContext *s = ctx->priv;
703  AVFrame *buf = arg;
704  const int start = (buf->ch_layout.nb_channels * jobnr) / nb_jobs;
705  const int end = (buf->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
706 
707  for (int i = 0; i < s->nb_filters; i++) {
708  EqualizatorFilter *f = &s->filters[i];
709  double *bptr;
710 
711  if (f->gain == 0. || f->ignore)
712  continue;
713  if (f->channel < start ||
714  f->channel >= end)
715  continue;
716 
717  bptr = (double *)buf->extended_data[f->channel];
718  for (int n = 0; n < buf->nb_samples; n++) {
719  double sample = bptr[n];
720 
721  sample = process_sample(f->section, sample);
722  bptr[n] = sample;
723  }
724  }
725 
726  return 0;
727 }
728 
730 {
731  AVFilterContext *ctx = inlink->dst;
732  AudioNEqualizerContext *s = ctx->priv;
733  AVFilterLink *outlink = ctx->outputs[0];
734 
735  if (!ctx->is_disabled)
737  FFMIN(inlink->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
738 
739  if (s->draw_curves) {
740  AVFrame *clone;
741 
742  const int64_t pts = buf->pts +
743  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
744  outlink->time_base);
745  int ret;
746 
747  s->video->pts = pts;
748  clone = av_frame_clone(s->video);
749  if (!clone)
750  return AVERROR(ENOMEM);
751  ret = ff_filter_frame(ctx->outputs[1], clone);
752  if (ret < 0)
753  return ret;
754  }
755 
756  return ff_filter_frame(outlink, buf);
757 }
758 
759 static const AVFilterPad inputs[] = {
760  {
761  .name = "default",
762  .type = AVMEDIA_TYPE_AUDIO,
764  .config_props = config_input,
765  .filter_frame = filter_frame,
766  },
767 };
768 
770  .name = "anequalizer",
771  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
772  .priv_size = sizeof(AudioNEqualizerContext),
773  .priv_class = &anequalizer_class,
774  .init = init,
775  .uninit = uninit,
777  .outputs = NULL,
779  .process_command = process_command,
783 };
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_anequalizer.c:170
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AudioNEqualizerContext::args
char * args
Definition: af_anequalizer.c:65
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVFilterFormatsConfig::samplerates
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:515
ff_exp10
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
av_clip
#define av_clip
Definition: common.h:99
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:436
butterworth_fo_section
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
Definition: af_anequalizer.c:269
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:424
AVFilterFormatsConfig::channel_layouts
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:520
out
FILE * out
Definition: movenc.c:55
F
#define F
Definition: af_anequalizer.c:81
color
Definition: vf_paletteuse.c:512
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:674
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:337
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:356
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:622
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
EqualizatorFilter
Definition: af_anequalizer.c:51
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
filter_channels
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_anequalizer.c:699
CHEBYSHEV2
@ CHEBYSHEV2
Definition: af_anequalizer.c:39
EqualizatorFilter::width
double width
Definition: af_anequalizer.c:58
chebyshev1_bp_filter
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:369
EqualizatorFilter::gain
double gain
Definition: af_anequalizer.c:57
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
video.h
chebyshev2_bp_filter
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:442
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
anequalizer_options
static const AVOption anequalizer_options[]
Definition: af_anequalizer.c:83
AudioNEqualizerContext::mag
double mag
Definition: af_anequalizer.c:70
D
D(D(float, sse)
Definition: rematrix_init.c:30
AudioNEqualizerContext::nb_filters
int nb_filters
Definition: af_anequalizer.c:72
FILTER_ORDER
#define FILTER_ORDER
Definition: af_anequalizer.c:34
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
Definition: af_anequalizer.c:729
FoSection::b1
double b1
Definition: af_anequalizer.c:45
AudioNEqualizerContext::h
int h
Definition: af_anequalizer.c:68
AudioNEqualizerContext::fscale
int fscale
Definition: af_anequalizer.c:71
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:775
CHEBYSHEV1
@ CHEBYSHEV1
Definition: af_anequalizer.c:38
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_anequalizer.c:572
pts
static int64_t pts
Definition: transcode_aac.c:644
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(anequalizer)
BUTTERWORTH
@ BUTTERWORTH
Definition: af_anequalizer.c:37
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
FoSection::denum
double denum[4]
Definition: af_anequalizer.c:48
FilterType
FilterType
Definition: af_adenorm.c:26
lrint
#define lrint
Definition: tablegen.h:53
FoSection::a2
double a2
Definition: af_anequalizer.c:44
av_cold
#define av_cold
Definition: attributes.h:90
equalizer
static void equalizer(EqualizatorFilter *f, double sample_rate)
Definition: af_anequalizer.c:530
chebyshev2_fo_section
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
Definition: af_anequalizer.c:410
FoSection::num
double num[4]
Definition: af_anequalizer.c:47
width
#define width
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
FoSection::a0
double a0
Definition: af_anequalizer.c:44
g
const char * g
Definition: vf_curves.c:128
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:237
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
s1
#define s1
Definition: regdef.h:38
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:679
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
chebyshev1_fo_section
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
Definition: af_anequalizer.c:337
filters
#define filters(fmt, type, inverse, clp, inverset, clip, one, clip_fn, packed)
Definition: af_crystalizer.c:54
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_anequalizer.c:220
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:593
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
A
#define A
Definition: af_anequalizer.c:79
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
hz_2_rad
static double hz_2_rad(double x, double fs)
Definition: af_anequalizer.c:525
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
arg
const char * arg
Definition: jacosubdec.c:67
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_anequalizer.c:259
AudioNEqualizerContext
Definition: af_anequalizer.c:63
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
inputs
static const AVFilterPad inputs[]
Definition: af_anequalizer.c:759
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:245
FoSection::b3
double b3
Definition: af_anequalizer.c:45
parseutils.h
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_anequalizer.c:630
AudioNEqualizerContext::video
AVFrame * video
Definition: af_anequalizer.c:75
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
f
f
Definition: af_crystalizer.c:121
draw_curves
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
Definition: af_anequalizer.c:97
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:94
section
Definition: ffprobe.c:241
FoSection::b4
double b4
Definition: af_anequalizer.c:45
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FoSection
Definition: af_anequalizer.c:43
N
#define N
Definition: af_mcompand.c:54
add_filter
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
Definition: af_anequalizer.c:553
M_PI
#define M_PI
Definition: mathematics.h:67
internal.h
EqualizatorFilter::channel
int channel
Definition: af_anequalizer.c:53
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:454
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
FoSection::b2
double b2
Definition: af_anequalizer.c:45
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:435
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:827
FoSection::b0
double b0
Definition: af_anequalizer.c:45
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
section_process
static double section_process(FoSection *S, double in)
Definition: af_anequalizer.c:663
NB_TYPES
@ NB_TYPES
Definition: af_anequalizer.c:40
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
chebyshev1_compute_bw_gain_db
static double chebyshev1_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:497
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
chebyshev2_compute_bw_gain_db
static double chebyshev2_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:511
V
#define V
Definition: af_anequalizer.c:80
OFFSET
#define OFFSET(x)
Definition: af_anequalizer.c:78
AVFilter
Filter definition.
Definition: avfilter.h:166
FoSection::a3
double a3
Definition: af_anequalizer.c:44
ret
ret
Definition: filter_design.txt:187
FoSection::a1
double a1
Definition: af_anequalizer.c:44
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:607
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
butterworth_bp_filter
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:300
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
EqualizatorFilter::type
int type
Definition: af_anequalizer.c:54
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
EqualizatorFilter::freq
double freq
Definition: af_anequalizer.c:56
L
#define L(x)
Definition: vpx_arith.h:36
ffmath.h
G
#define G
Definition: huffyuv.h:43
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
audio.h
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:510
AudioNEqualizerContext::w
int w
Definition: af_anequalizer.c:68
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:138
process_sample
static double process_sample(FoSection *s1, double in)
Definition: af_anequalizer.c:686
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
ff_af_anequalizer
const AVFilter ff_af_anequalizer
Definition: af_anequalizer.c:769
AudioNEqualizerContext::filters
EqualizatorFilter * filters
Definition: af_anequalizer.c:74
AudioNEqualizerContext::nb_allocated
int nb_allocated
Definition: af_anequalizer.c:73
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AudioNEqualizerContext::colors
char * colors
Definition: af_anequalizer.c:66
EqualizatorFilter::ignore
int ignore
Definition: af_anequalizer.c:52
AudioNEqualizerContext::draw_curves
int draw_curves
Definition: af_anequalizer.c:67
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_anequalizer.c:191
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
FoSection::a4
double a4
Definition: af_anequalizer.c:44
ui
#define ui(width, name)
Definition: cbs_mpeg2.c:113
butterworth_compute_bw_gain_db
static double butterworth_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:483
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: internal.h:52