FFmpeg
af_apsyclip.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014 - 2021 Jason Jang
3  * Copyright (c) 2021 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public License
9  * as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public License
18  * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/mem.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/tx.h"
25 #include "audio.h"
26 #include "avfilter.h"
27 #include "filters.h"
28 #include "internal.h"
29 
30 typedef struct AudioPsyClipContext {
31  const AVClass *class;
32 
33  double level_in;
34  double level_out;
35  double clip_level;
36  double adaptive;
38  int diff_only;
41  double *protections;
42 
44  int fft_size;
45  int overlap;
46  int channels;
47 
52 
61 
67 
68 #define OFFSET(x) offsetof(AudioPsyClipContext, x)
69 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
70 
71 static const AVOption apsyclip_options[] = {
72  { "level_in", "set input level", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, FLAGS },
73  { "level_out", "set output level", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, FLAGS },
74  { "clip", "set clip level", OFFSET(clip_level), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 1, FLAGS },
75  { "diff", "enable difference", OFFSET(diff_only), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
76  { "adaptive", "set adaptive distortion", OFFSET(adaptive), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, FLAGS },
77  { "iterations", "set iterations", OFFSET(iterations), AV_OPT_TYPE_INT, {.i64=10}, 1, 20, FLAGS },
78  { "level", "set auto level", OFFSET(auto_level), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
79  {NULL}
80 };
81 
82 AVFILTER_DEFINE_CLASS(apsyclip);
83 
84 static void generate_hann_window(float *window, float *inv_window, int size)
85 {
86  for (int i = 0; i < size; i++) {
87  float value = 0.5f * (1.f - cosf(2.f * M_PI * i / size));
88 
89  window[i] = value;
90  // 1/window to calculate unwindowed peak.
91  inv_window[i] = value > 0.1f ? 1.f / value : 0.f;
92  }
93 }
94 
96  const int (*points)[2], int num_points, int sample_rate)
97 {
98  int j = 0;
99 
100  s->margin_curve[0] = points[0][1];
101 
102  for (int i = 0; i < num_points - 1; i++) {
103  while (j < s->fft_size / 2 + 1 && j * sample_rate / s->fft_size < points[i + 1][0]) {
104  // linearly interpolate between points
105  int binHz = j * sample_rate / s->fft_size;
106  s->margin_curve[j] = points[i][1] + (binHz - points[i][0]) * (points[i + 1][1] - points[i][1]) / (points[i + 1][0] - points[i][0]);
107  j++;
108  }
109  }
110  // handle bins after the last point
111  while (j < s->fft_size / 2 + 1) {
112  s->margin_curve[j] = points[num_points - 1][1];
113  j++;
114  }
115 
116  // convert margin curve to linear amplitude scale
117  for (j = 0; j < s->fft_size / 2 + 1; j++)
118  s->margin_curve[j] = powf(10.f, s->margin_curve[j] / 20.f);
119 }
120 
122 {
123  // Calculate tent-shape function in log-log scale.
124 
125  // As an optimization, only consider bins close to "bin"
126  // This reduces the number of multiplications needed in calculate_mask_curve
127  // The masking contribution at faraway bins is negligeable
128 
129  // Another optimization to save memory and speed up the calculation of the
130  // spread table is to calculate and store only 2 spread functions per
131  // octave, and reuse the same spread function for multiple bins.
132  int table_index = 0;
133  int bin = 0;
134  int increment = 1;
135 
136  while (bin < s->num_psy_bins) {
137  float sum = 0;
138  int base_idx = table_index * s->num_psy_bins;
139  int start_bin = bin * 3 / 4;
140  int end_bin = FFMIN(s->num_psy_bins, ((bin + 1) * 4 + 2) / 3);
141  int next_bin;
142 
143  for (int j = start_bin; j < end_bin; j++) {
144  // add 0.5 so i=0 doesn't get log(0)
145  float rel_idx_log = FFABS(logf((j + 0.5f) / (bin + 0.5f)));
146  float value;
147  if (j >= bin) {
148  // mask up
149  value = expf(-rel_idx_log * 40.f);
150  } else {
151  // mask down
152  value = expf(-rel_idx_log * 80.f);
153  }
154  // the spreading function is centred in the row
155  sum += value;
156  s->spread_table[base_idx + s->num_psy_bins / 2 + j - bin] = value;
157  }
158  // now normalize it
159  for (int j = start_bin; j < end_bin; j++) {
160  s->spread_table[base_idx + s->num_psy_bins / 2 + j - bin] /= sum;
161  }
162 
163  s->spread_table_range[table_index][0] = start_bin - bin;
164  s->spread_table_range[table_index][1] = end_bin - bin;
165 
166  if (bin <= 1) {
167  next_bin = bin + 1;
168  } else {
169  if ((bin & (bin - 1)) == 0) {
170  // power of 2
171  increment = bin / 2;
172  }
173 
174  next_bin = bin + increment;
175  }
176 
177  // set bins between "bin" and "next_bin" to use this table_index
178  for (int i = bin; i < next_bin; i++)
179  s->spread_table_index[i] = table_index;
180 
181  bin = next_bin;
182  table_index++;
183  }
184 }
185 
187 {
188  AVFilterContext *ctx = inlink->dst;
189  AudioPsyClipContext *s = ctx->priv;
190  static const int points[][2] = { {0,14}, {125,14}, {250,16}, {500,18}, {1000,20}, {2000,20}, {4000,20}, {8000,17}, {16000,14}, {20000,-10} };
191  static const int num_points = 10;
192  float scale = 1.f;
193  int ret;
194 
195  s->fft_size = inlink->sample_rate > 100000 ? 1024 : inlink->sample_rate > 50000 ? 512 : 256;
196  s->overlap = s->fft_size / 4;
197 
198  // The psy masking calculation is O(n^2),
199  // so skip it for frequencies not covered by base sampling rantes (i.e. 44k)
200  if (inlink->sample_rate <= 50000) {
201  s->num_psy_bins = s->fft_size / 2;
202  } else if (inlink->sample_rate <= 100000) {
203  s->num_psy_bins = s->fft_size / 4;
204  } else {
205  s->num_psy_bins = s->fft_size / 8;
206  }
207 
208  s->window = av_calloc(s->fft_size, sizeof(*s->window));
209  s->inv_window = av_calloc(s->fft_size, sizeof(*s->inv_window));
210  if (!s->window || !s->inv_window)
211  return AVERROR(ENOMEM);
212 
213  s->in_buffer = ff_get_audio_buffer(inlink, s->fft_size * 2);
214  s->in_frame = ff_get_audio_buffer(inlink, s->fft_size * 2);
215  s->out_dist_frame = ff_get_audio_buffer(inlink, s->fft_size * 2);
216  s->windowed_frame = ff_get_audio_buffer(inlink, s->fft_size * 2);
217  s->clipping_delta = ff_get_audio_buffer(inlink, s->fft_size * 2);
218  s->spectrum_buf = ff_get_audio_buffer(inlink, s->fft_size * 2);
219  s->mask_curve = ff_get_audio_buffer(inlink, s->fft_size / 2 + 1);
220  if (!s->in_buffer || !s->in_frame ||
221  !s->out_dist_frame || !s->windowed_frame ||
222  !s->clipping_delta || !s->spectrum_buf || !s->mask_curve)
223  return AVERROR(ENOMEM);
224 
225  generate_hann_window(s->window, s->inv_window, s->fft_size);
226 
227  s->margin_curve = av_calloc(s->fft_size / 2 + 1, sizeof(*s->margin_curve));
228  if (!s->margin_curve)
229  return AVERROR(ENOMEM);
230 
231  s->spread_table_rows = av_log2(s->num_psy_bins) * 2;
232  s->spread_table = av_calloc(s->spread_table_rows * s->num_psy_bins, sizeof(*s->spread_table));
233  if (!s->spread_table)
234  return AVERROR(ENOMEM);
235 
236  s->spread_table_range = av_calloc(s->spread_table_rows * 2, sizeof(*s->spread_table_range));
237  if (!s->spread_table_range)
238  return AVERROR(ENOMEM);
239 
240  s->spread_table_index = av_calloc(s->num_psy_bins, sizeof(*s->spread_table_index));
241  if (!s->spread_table_index)
242  return AVERROR(ENOMEM);
243 
244  set_margin_curve(s, points, num_points, inlink->sample_rate);
245 
247 
248  s->channels = inlink->ch_layout.nb_channels;
249 
250  s->tx_ctx = av_calloc(s->channels, sizeof(*s->tx_ctx));
251  s->itx_ctx = av_calloc(s->channels, sizeof(*s->itx_ctx));
252  if (!s->tx_ctx || !s->itx_ctx)
253  return AVERROR(ENOMEM);
254 
255  for (int ch = 0; ch < s->channels; ch++) {
256  ret = av_tx_init(&s->tx_ctx[ch], &s->tx_fn, AV_TX_FLOAT_FFT, 0, s->fft_size, &scale, 0);
257  if (ret < 0)
258  return ret;
259 
260  ret = av_tx_init(&s->itx_ctx[ch], &s->itx_fn, AV_TX_FLOAT_FFT, 1, s->fft_size, &scale, 0);
261  if (ret < 0)
262  return ret;
263  }
264 
265  return 0;
266 }
267 
269  const float *in_frame, float *out_frame, const int add_to_out_frame)
270 {
271  const float *window = s->window;
272 
273  for (int i = 0; i < s->fft_size; i++) {
274  if (add_to_out_frame) {
275  out_frame[i] += in_frame[i] * window[i];
276  } else {
277  out_frame[i] = in_frame[i] * window[i];
278  }
279  }
280 }
281 
283  const float *spectrum, float *mask_curve)
284 {
285  for (int i = 0; i < s->fft_size / 2 + 1; i++)
286  mask_curve[i] = 0;
287 
288  for (int i = 0; i < s->num_psy_bins; i++) {
289  int base_idx, start_bin, end_bin, table_idx;
290  float magnitude;
291  int range[2];
292 
293  if (i == 0) {
294  magnitude = FFABS(spectrum[0]);
295  } else if (i == s->fft_size / 2) {
296  magnitude = FFABS(spectrum[s->fft_size]);
297  } else {
298  // Because the input signal is real, the + and - frequencies are redundant.
299  // Multiply the magnitude by 2 to simulate adding up the + and - frequencies.
300  magnitude = hypotf(spectrum[2 * i], spectrum[2 * i + 1]) * 2;
301  }
302 
303  table_idx = s->spread_table_index[i];
304  range[0] = s->spread_table_range[table_idx][0];
305  range[1] = s->spread_table_range[table_idx][1];
306  base_idx = table_idx * s->num_psy_bins;
307  start_bin = FFMAX(0, i + range[0]);
308  end_bin = FFMIN(s->num_psy_bins, i + range[1]);
309 
310  for (int j = start_bin; j < end_bin; j++)
311  mask_curve[j] += s->spread_table[base_idx + s->num_psy_bins / 2 + j - i] * magnitude;
312  }
313 
314  // for ultrasonic frequencies, skip the O(n^2) spread calculation and just copy the magnitude
315  for (int i = s->num_psy_bins; i < s->fft_size / 2 + 1; i++) {
316  float magnitude;
317  if (i == s->fft_size / 2) {
318  magnitude = FFABS(spectrum[s->fft_size]);
319  } else {
320  // Because the input signal is real, the + and - frequencies are redundant.
321  // Multiply the magnitude by 2 to simulate adding up the + and - frequencies.
322  magnitude = hypotf(spectrum[2 * i], spectrum[2 * i + 1]) * 2;
323  }
324 
325  mask_curve[i] = magnitude;
326  }
327 
328  for (int i = 0; i < s->fft_size / 2 + 1; i++)
329  mask_curve[i] = mask_curve[i] / s->margin_curve[i];
330 }
331 
333  const float *windowed_frame, float *clipping_delta, float delta_boost)
334 {
335  const float *window = s->window;
336 
337  for (int i = 0; i < s->fft_size; i++) {
338  const float limit = s->clip_level * window[i];
339  const float effective_value = windowed_frame[i] + clipping_delta[i];
340 
341  if (effective_value > limit) {
342  clipping_delta[i] += (limit - effective_value) * delta_boost;
343  } else if (effective_value < -limit) {
344  clipping_delta[i] += (-limit - effective_value) * delta_boost;
345  }
346  }
347 }
348 
350  float *clip_spectrum, const float *mask_curve)
351 {
352  // bin 0
353  float relative_distortion_level = FFABS(clip_spectrum[0]) / mask_curve[0];
354 
355  if (relative_distortion_level > 1.f)
356  clip_spectrum[0] /= relative_distortion_level;
357 
358  // bin 1..N/2-1
359  for (int i = 1; i < s->fft_size / 2; i++) {
360  float real = clip_spectrum[i * 2];
361  float imag = clip_spectrum[i * 2 + 1];
362  // Because the input signal is real, the + and - frequencies are redundant.
363  // Multiply the magnitude by 2 to simulate adding up the + and - frequencies.
364  relative_distortion_level = hypotf(real, imag) * 2 / mask_curve[i];
365  if (relative_distortion_level > 1.0) {
366  clip_spectrum[i * 2] /= relative_distortion_level;
367  clip_spectrum[i * 2 + 1] /= relative_distortion_level;
368  clip_spectrum[s->fft_size * 2 - i * 2] /= relative_distortion_level;
369  clip_spectrum[s->fft_size * 2 - i * 2 + 1] /= relative_distortion_level;
370  }
371  }
372  // bin N/2
373  relative_distortion_level = FFABS(clip_spectrum[s->fft_size]) / mask_curve[s->fft_size / 2];
374  if (relative_distortion_level > 1.f)
375  clip_spectrum[s->fft_size] /= relative_distortion_level;
376 }
377 
378 static void r2c(float *buffer, int size)
379 {
380  for (int i = size - 1; i >= 0; i--)
381  buffer[2 * i] = buffer[i];
382 
383  for (int i = size - 1; i >= 0; i--)
384  buffer[2 * i + 1] = 0.f;
385 }
386 
387 static void c2r(float *buffer, int size)
388 {
389  for (int i = 0; i < size; i++)
390  buffer[i] = buffer[2 * i];
391 
392  for (int i = 0; i < size; i++)
393  buffer[i + size] = 0.f;
394 }
395 
396 static void feed(AVFilterContext *ctx, int ch,
397  const float *in_samples, float *out_samples, int diff_only,
398  float *in_frame, float *out_dist_frame,
399  float *windowed_frame, float *clipping_delta,
400  float *spectrum_buf, float *mask_curve)
401 {
402  AudioPsyClipContext *s = ctx->priv;
403  const float clip_level_inv = 1.f / s->clip_level;
404  const float level_out = s->level_out;
405  float orig_peak = 0;
406  float peak;
407 
408  // shift in/out buffers
409  for (int i = 0; i < s->fft_size - s->overlap; i++) {
410  in_frame[i] = in_frame[i + s->overlap];
411  out_dist_frame[i] = out_dist_frame[i + s->overlap];
412  }
413 
414  for (int i = 0; i < s->overlap; i++) {
415  in_frame[i + s->fft_size - s->overlap] = in_samples[i];
416  out_dist_frame[i + s->fft_size - s->overlap] = 0.f;
417  }
418 
419  apply_window(s, in_frame, windowed_frame, 0);
420  r2c(windowed_frame, s->fft_size);
421  s->tx_fn(s->tx_ctx[ch], spectrum_buf, windowed_frame, sizeof(AVComplexFloat));
422  c2r(windowed_frame, s->fft_size);
423  calculate_mask_curve(s, spectrum_buf, mask_curve);
424 
425  // It would be easier to calculate the peak from the unwindowed input.
426  // This is just for consistency with the clipped peak calculateion
427  // because the inv_window zeros out samples on the edge of the window.
428  for (int i = 0; i < s->fft_size; i++)
429  orig_peak = FFMAX(orig_peak, FFABS(windowed_frame[i] * s->inv_window[i]));
430  orig_peak *= clip_level_inv;
431  peak = orig_peak;
432 
433  // clear clipping_delta
434  for (int i = 0; i < s->fft_size * 2; i++)
435  clipping_delta[i] = 0.f;
436 
437  // repeat clipping-filtering process a few times to control both the peaks and the spectrum
438  for (int i = 0; i < s->iterations; i++) {
439  float mask_curve_shift = 1.122f; // 1.122 is 1dB
440  // The last 1/3 of rounds have boosted delta to help reach the peak target faster
441  float delta_boost = 1.f;
442  if (i >= s->iterations - s->iterations / 3) {
443  // boosting the delta when largs peaks are still present is dangerous
444  if (peak < 2.f)
445  delta_boost = 2.f;
446  }
447 
448  clip_to_window(s, windowed_frame, clipping_delta, delta_boost);
449 
450  r2c(clipping_delta, s->fft_size);
451  s->tx_fn(s->tx_ctx[ch], spectrum_buf, clipping_delta, sizeof(AVComplexFloat));
452 
453  limit_clip_spectrum(s, spectrum_buf, mask_curve);
454 
455  s->itx_fn(s->itx_ctx[ch], clipping_delta, spectrum_buf, sizeof(AVComplexFloat));
456  c2r(clipping_delta, s->fft_size);
457 
458  for (int i = 0; i < s->fft_size; i++)
459  clipping_delta[i] /= s->fft_size;
460 
461  peak = 0;
462  for (int i = 0; i < s->fft_size; i++)
463  peak = FFMAX(peak, FFABS((windowed_frame[i] + clipping_delta[i]) * s->inv_window[i]));
464  peak *= clip_level_inv;
465 
466  // Automatically adjust mask_curve as necessary to reach peak target
467  if (orig_peak > 1.f && peak > 1.f) {
468  float diff_achieved = orig_peak - peak;
469  if (i + 1 < s->iterations - s->iterations / 3 && diff_achieved > 0) {
470  float diff_needed = orig_peak - 1.f;
471  float diff_ratio = diff_needed / diff_achieved;
472  // If a good amount of peak reduction was already achieved,
473  // don't shift the mask_curve by the full peak value
474  // On the other hand, if only a little peak reduction was achieved,
475  // don't shift the mask_curve by the enormous diff_ratio.
476  diff_ratio = FFMIN(diff_ratio, peak);
477  mask_curve_shift = FFMAX(mask_curve_shift, diff_ratio);
478  } else {
479  // If the peak got higher than the input or we are in the last 1/3 rounds,
480  // go back to the heavy-handed peak heuristic.
481  mask_curve_shift = FFMAX(mask_curve_shift, peak);
482  }
483  }
484 
485  mask_curve_shift = 1.f + (mask_curve_shift - 1.f) * s->adaptive;
486 
487  // Be less strict in the next iteration.
488  // This helps with peak control.
489  for (int i = 0; i < s->fft_size / 2 + 1; i++)
490  mask_curve[i] *= mask_curve_shift;
491  }
492 
493  // do overlap & add
494  apply_window(s, clipping_delta, out_dist_frame, 1);
495 
496  for (int i = 0; i < s->overlap; i++) {
497  // 4 times overlap with squared hanning window results in 1.5 time increase in amplitude
498  if (!ctx->is_disabled) {
499  out_samples[i] = out_dist_frame[i] / 1.5f;
500  if (!diff_only)
501  out_samples[i] += in_frame[i];
502  if (s->auto_level)
503  out_samples[i] *= clip_level_inv;
504  out_samples[i] *= level_out;
505  } else {
506  out_samples[i] = in_frame[i];
507  }
508  }
509 }
510 
511 static int psy_channel(AVFilterContext *ctx, AVFrame *in, AVFrame *out, int ch)
512 {
513  AudioPsyClipContext *s = ctx->priv;
514  const float *src = (const float *)in->extended_data[ch];
515  float *in_buffer = (float *)s->in_buffer->extended_data[ch];
516  float *dst = (float *)out->extended_data[ch];
517 
518  for (int n = 0; n < s->overlap; n++)
519  in_buffer[n] = src[n] * s->level_in;
520 
521  feed(ctx, ch, in_buffer, dst, s->diff_only,
522  (float *)(s->in_frame->extended_data[ch]),
523  (float *)(s->out_dist_frame->extended_data[ch]),
524  (float *)(s->windowed_frame->extended_data[ch]),
525  (float *)(s->clipping_delta->extended_data[ch]),
526  (float *)(s->spectrum_buf->extended_data[ch]),
527  (float *)(s->mask_curve->extended_data[ch]));
528 
529  return 0;
530 }
531 
532 static int psy_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
533 {
534  AudioPsyClipContext *s = ctx->priv;
535  AVFrame *out = arg;
536  const int start = (out->ch_layout.nb_channels * jobnr) / nb_jobs;
537  const int end = (out->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
538 
539  for (int ch = start; ch < end; ch++)
540  psy_channel(ctx, s->in, out, ch);
541 
542  return 0;
543 }
544 
546 {
547  AVFilterContext *ctx = inlink->dst;
548  AVFilterLink *outlink = ctx->outputs[0];
549  AudioPsyClipContext *s = ctx->priv;
550  AVFrame *out;
551  int ret;
552 
553  out = ff_get_audio_buffer(outlink, s->overlap);
554  if (!out) {
555  ret = AVERROR(ENOMEM);
556  goto fail;
557  }
558 
559  s->in = in;
563 
564  out->pts = in->pts;
565  out->nb_samples = in->nb_samples;
566  ret = ff_filter_frame(outlink, out);
567 fail:
568  av_frame_free(&in);
569  s->in = NULL;
570  return ret < 0 ? ret : 0;
571 }
572 
574 {
575  AVFilterLink *inlink = ctx->inputs[0];
576  AVFilterLink *outlink = ctx->outputs[0];
577  AudioPsyClipContext *s = ctx->priv;
578  AVFrame *in = NULL;
579  int ret = 0, status;
580  int64_t pts;
581 
583 
584  ret = ff_inlink_consume_samples(inlink, s->overlap, s->overlap, &in);
585  if (ret < 0)
586  return ret;
587 
588  if (ret > 0) {
589  return filter_frame(inlink, in);
590  } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
591  ff_outlink_set_status(outlink, status, pts);
592  return 0;
593  } else {
594  if (ff_inlink_queued_samples(inlink) >= s->overlap) {
596  } else if (ff_outlink_frame_wanted(outlink)) {
598  }
599  return 0;
600  }
601 }
602 
604 {
605  AudioPsyClipContext *s = ctx->priv;
606 
607  av_freep(&s->window);
608  av_freep(&s->inv_window);
609  av_freep(&s->spread_table);
610  av_freep(&s->spread_table_range);
611  av_freep(&s->spread_table_index);
612  av_freep(&s->margin_curve);
613 
614  av_frame_free(&s->in_buffer);
615  av_frame_free(&s->in_frame);
616  av_frame_free(&s->out_dist_frame);
617  av_frame_free(&s->windowed_frame);
618  av_frame_free(&s->clipping_delta);
619  av_frame_free(&s->spectrum_buf);
620  av_frame_free(&s->mask_curve);
621 
622  for (int ch = 0; ch < s->channels; ch++) {
623  if (s->tx_ctx)
624  av_tx_uninit(&s->tx_ctx[ch]);
625  if (s->itx_ctx)
626  av_tx_uninit(&s->itx_ctx[ch]);
627  }
628 
629  av_freep(&s->tx_ctx);
630  av_freep(&s->itx_ctx);
631 }
632 
633 static const AVFilterPad inputs[] = {
634  {
635  .name = "default",
636  .type = AVMEDIA_TYPE_AUDIO,
637  .config_props = config_input,
638  },
639 };
640 
642  .name = "apsyclip",
643  .description = NULL_IF_CONFIG_SMALL("Audio Psychoacoustic Clipper."),
644  .priv_size = sizeof(AudioPsyClipContext),
645  .priv_class = &apsyclip_class,
646  .uninit = uninit,
652  .activate = activate,
653  .process_command = ff_filter_process_command,
654 };
ff_af_apsyclip
const AVFilter ff_af_apsyclip
Definition: af_apsyclip.c:641
generate_hann_window
static void generate_hann_window(float *window, float *inv_window, int size)
Definition: af_apsyclip.c:84
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(apsyclip)
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:97
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
set_margin_curve
static void set_margin_curve(AudioPsyClipContext *s, const int(*points)[2], int num_points, int sample_rate)
Definition: af_apsyclip.c:95
psy_channel
static int psy_channel(AVFilterContext *ctx, AVFrame *in, AVFrame *out, int ch)
Definition: af_apsyclip.c:511
out
FILE * out
Definition: movenc.c:55
limit_clip_spectrum
static void limit_clip_spectrum(AudioPsyClipContext *s, float *clip_spectrum, const float *mask_curve)
Definition: af_apsyclip.c:349
AudioPsyClipContext::protections
double * protections
Definition: af_apsyclip.c:41
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
AVTXContext
Definition: tx_priv.h:235
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: internal.h:175
AudioPsyClipContext::clip_level
double clip_level
Definition: af_apsyclip.c:35
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AudioPsyClipContext::spread_table_rows
int spread_table_rows
Definition: af_apsyclip.c:48
AudioPsyClipContext::tx_ctx
AVTXContext ** tx_ctx
Definition: af_apsyclip.c:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:487
AVOption
AVOption.
Definition: opt.h:346
AudioPsyClipContext::protections_str
char * protections_str
Definition: af_apsyclip.c:40
expf
#define expf(x)
Definition: libm.h:283
AVComplexFloat
Definition: tx.h:27
apsyclip_options
static const AVOption apsyclip_options[]
Definition: af_apsyclip.c:71
activate
static int activate(AVFilterContext *ctx)
Definition: af_apsyclip.c:573
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
AudioPsyClipContext::window
float * window
Definition: af_apsyclip.c:51
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
calculate_mask_curve
static void calculate_mask_curve(AudioPsyClipContext *s, const float *spectrum, float *mask_curve)
Definition: af_apsyclip.c:282
AudioPsyClipContext::in_frame
AVFrame * in_frame
Definition: af_apsyclip.c:55
AudioPsyClipContext
Definition: af_apsyclip.c:30
AudioPsyClipContext::itx_ctx
AVTXContext ** itx_ctx
Definition: af_apsyclip.c:64
window
static SDL_Window * window
Definition: ffplay.c:361
AudioPsyClipContext::num_psy_bins
int num_psy_bins
Definition: af_apsyclip.c:43
AudioPsyClipContext::windowed_frame
AVFrame * windowed_frame
Definition: af_apsyclip.c:57
cosf
#define cosf(x)
Definition: libm.h:78
fail
#define fail()
Definition: checkasm.h:179
apply_window
static void apply_window(AudioPsyClipContext *s, const float *in_frame, float *out_frame, const int add_to_out_frame)
Definition: af_apsyclip.c:268
pts
static int64_t pts
Definition: transcode_aac.c:644
AudioPsyClipContext::channels
int channels
Definition: af_apsyclip.c:46
FLAGS
#define FLAGS
Definition: af_apsyclip.c:69
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
av_cold
#define av_cold
Definition: attributes.h:90
AudioPsyClipContext::auto_level
int auto_level
Definition: af_apsyclip.c:37
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
AudioPsyClipContext::inv_window
float * inv_window
Definition: af_apsyclip.c:51
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
inputs
static const AVFilterPad inputs[]
Definition: af_apsyclip.c:633
AudioPsyClipContext::tx_fn
av_tx_fn tx_fn
Definition: af_apsyclip.c:63
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1568
c2r
static void c2r(float *buffer, int size)
Definition: af_apsyclip.c:387
s
#define s(width, name)
Definition: cbs_vp9.c:198
AudioPsyClipContext::level_out
double level_out
Definition: af_apsyclip.c:34
AudioPsyClipContext::mask_curve
AVFrame * mask_curve
Definition: af_apsyclip.c:60
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:237
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
filters.h
AudioPsyClipContext::margin_curve
float * margin_curve
Definition: af_apsyclip.c:51
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFFSET
#define OFFSET(x)
Definition: af_apsyclip.c:68
AudioPsyClipContext::out_dist_frame
AVFrame * out_dist_frame
Definition: af_apsyclip.c:56
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
arg
const char * arg
Definition: jacosubdec.c:67
feed
static void feed(AVFilterContext *ctx, int ch, const float *in_samples, float *out_samples, int diff_only, float *in_frame, float *out_dist_frame, float *windowed_frame, float *clipping_delta, float *spectrum_buf, float *mask_curve)
Definition: af_apsyclip.c:396
AudioPsyClipContext::in
AVFrame * in
Definition: af_apsyclip.c:53
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:73
AudioPsyClipContext::itx_fn
av_tx_fn itx_fn
Definition: af_apsyclip.c:65
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1462
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
AudioPsyClipContext::fft_size
int fft_size
Definition: af_apsyclip.c:44
AudioPsyClipContext::adaptive
double adaptive
Definition: af_apsyclip.c:36
AudioPsyClipContext::spread_table_index
int * spread_table_index
Definition: af_apsyclip.c:49
AudioPsyClipContext::level_in
double level_in
Definition: af_apsyclip.c:33
AudioPsyClipContext::spread_table_range
int(* spread_table_range)[2]
Definition: af_apsyclip.c:50
AudioPsyClipContext::clipping_delta
AVFrame * clipping_delta
Definition: af_apsyclip.c:58
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_apsyclip.c:603
r2c
static void r2c(float *buffer, int size)
Definition: af_apsyclip.c:378
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:33
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1389
AudioPsyClipContext::in_buffer
AVFrame * in_buffer
Definition: af_apsyclip.c:54
AudioPsyClipContext::iterations
int iterations
Definition: af_apsyclip.c:39
f
f
Definition: af_crystalizer.c:121
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
powf
#define powf(x, y)
Definition: libm.h:50
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
size
int size
Definition: twinvq_data.h:10344
clip_to_window
static void clip_to_window(AudioPsyClipContext *s, const float *windowed_frame, float *clipping_delta, float delta_boost)
Definition: af_apsyclip.c:332
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2557
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:887
M_PI
#define M_PI
Definition: mathematics.h:67
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
internal.h
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:455
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:436
increment
#define increment(name, min, max)
Definition: cbs_av1.c:614
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:827
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1417
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
psy_channels
static int psy_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_apsyclip.c:532
status
ov_status_e status
Definition: dnn_backend_openvino.c:121
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
generate_spread_table
static void generate_spread_table(AudioPsyClipContext *s)
Definition: af_apsyclip.c:121
AudioPsyClipContext::overlap
int overlap
Definition: af_apsyclip.c:45
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
mem.h
audio.h
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
AudioPsyClipContext::spread_table
float * spread_table
Definition: af_apsyclip.c:51
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
AudioPsyClipContext::diff_only
int diff_only
Definition: af_apsyclip.c:38
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AudioPsyClipContext::spectrum_buf
AVFrame * spectrum_buf
Definition: af_apsyclip.c:59
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
int
int
Definition: ffmpeg_filter.c:424
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_apsyclip.c:186
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_apsyclip.c:545
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:235
tx.h