FFmpeg
af_dynaudnorm.c
Go to the documentation of this file.
1 /*
2  * Dynamic Audio Normalizer
3  * Copyright (c) 2015 LoRd_MuldeR <mulder2@gmx.de>. Some rights reserved.
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Dynamic Audio Normalizer
25  */
26 
27 #include <float.h>
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/opt.h"
31 
32 #define MIN_FILTER_SIZE 3
33 #define MAX_FILTER_SIZE 301
34 
35 #define FF_BUFQUEUE_SIZE (MAX_FILTER_SIZE + 1)
37 
38 #include "audio.h"
39 #include "avfilter.h"
40 #include "filters.h"
41 #include "internal.h"
42 
43 typedef struct local_gain {
44  double max_gain;
45  double threshold;
46 } local_gain;
47 
48 typedef struct cqueue {
49  double *elements;
50  int size;
51  int max_size;
53 } cqueue;
54 
56  const AVClass *class;
57 
58  struct FFBufQueue queue;
59 
60  int frame_len;
66 
67  double peak_value;
69  double target_rms;
71  double threshold;
75  double *weights;
76 
77  int channels;
78  int eof;
79  int64_t pts;
80 
85 
88 
89 #define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x)
90 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
91 
92 static const AVOption dynaudnorm_options[] = {
93  { "framelen", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
94  { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
95  { "gausssize", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
96  { "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
97  { "peak", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
98  { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
99  { "maxgain", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
100  { "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
101  { "targetrms", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
102  { "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
103  { "coupling", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
104  { "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
105  { "correctdc", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
106  { "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
107  { "altboundary", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
108  { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
109  { "compress", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
110  { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
111  { "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
112  { "t", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
113  { NULL }
114 };
115 
116 AVFILTER_DEFINE_CLASS(dynaudnorm);
117 
119 {
121 
122  if (!(s->filter_size & 1)) {
123  av_log(ctx, AV_LOG_WARNING, "filter size %d is invalid. Changing to an odd value.\n", s->filter_size);
124  s->filter_size |= 1;
125  }
126 
127  return 0;
128 }
129 
130 static inline int frame_size(int sample_rate, int frame_len_msec)
131 {
132  const int frame_size = lrint((double)sample_rate * (frame_len_msec / 1000.0));
133  return frame_size + (frame_size % 2);
134 }
135 
136 static cqueue *cqueue_create(int size, int max_size)
137 {
138  cqueue *q;
139 
140  if (max_size < size)
141  return NULL;
142 
143  q = av_malloc(sizeof(cqueue));
144  if (!q)
145  return NULL;
146 
147  q->max_size = max_size;
148  q->size = size;
149  q->nb_elements = 0;
150 
151  q->elements = av_malloc_array(max_size, sizeof(double));
152  if (!q->elements) {
153  av_free(q);
154  return NULL;
155  }
156 
157  return q;
158 }
159 
160 static void cqueue_free(cqueue *q)
161 {
162  if (q)
163  av_free(q->elements);
164  av_free(q);
165 }
166 
167 static int cqueue_size(cqueue *q)
168 {
169  return q->nb_elements;
170 }
171 
172 static int cqueue_empty(cqueue *q)
173 {
174  return q->nb_elements <= 0;
175 }
176 
177 static int cqueue_enqueue(cqueue *q, double element)
178 {
180 
181  q->elements[q->nb_elements] = element;
182  q->nb_elements++;
183 
184  return 0;
185 }
186 
187 static double cqueue_peek(cqueue *q, int index)
188 {
189  av_assert2(index < q->nb_elements);
190  return q->elements[index];
191 }
192 
193 static int cqueue_dequeue(cqueue *q, double *element)
194 {
196 
197  *element = q->elements[0];
198  memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
199  q->nb_elements--;
200 
201  return 0;
202 }
203 
204 static int cqueue_pop(cqueue *q)
205 {
207 
208  memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
209  q->nb_elements--;
210 
211  return 0;
212 }
213 
214 static void cqueue_resize(cqueue *q, int new_size)
215 {
216  av_assert2(q->max_size >= new_size);
217  av_assert2(MIN_FILTER_SIZE <= new_size);
218 
219  if (new_size > q->nb_elements) {
220  const int side = (new_size - q->nb_elements) / 2;
221 
222  memmove(q->elements + side, q->elements, sizeof(double) * q->nb_elements);
223  for (int i = 0; i < side; i++)
224  q->elements[i] = q->elements[side];
225  q->nb_elements = new_size - 1 - side;
226  } else {
227  int count = (q->size - new_size + 1) / 2;
228 
229  while (count-- > 0)
230  cqueue_pop(q);
231  }
232 
233  q->size = new_size;
234 }
235 
237 {
238  double total_weight = 0.0;
239  const double sigma = (((s->filter_size / 2.0) - 1.0) / 3.0) + (1.0 / 3.0);
240  double adjust;
241  int i;
242 
243  // Pre-compute constants
244  const int offset = s->filter_size / 2;
245  const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
246  const double c2 = 2.0 * sigma * sigma;
247 
248  // Compute weights
249  for (i = 0; i < s->filter_size; i++) {
250  const int x = i - offset;
251 
252  s->weights[i] = c1 * exp(-x * x / c2);
253  total_weight += s->weights[i];
254  }
255 
256  // Adjust weights
257  adjust = 1.0 / total_weight;
258  for (i = 0; i < s->filter_size; i++) {
259  s->weights[i] *= adjust;
260  }
261 }
262 
264 {
266  int c;
267 
268  av_freep(&s->prev_amplification_factor);
269  av_freep(&s->dc_correction_value);
270  av_freep(&s->compress_threshold);
271 
272  for (c = 0; c < s->channels; c++) {
273  if (s->gain_history_original)
274  cqueue_free(s->gain_history_original[c]);
275  if (s->gain_history_minimum)
276  cqueue_free(s->gain_history_minimum[c]);
277  if (s->gain_history_smoothed)
278  cqueue_free(s->gain_history_smoothed[c]);
279  if (s->threshold_history)
280  cqueue_free(s->threshold_history[c]);
281  }
282 
283  av_freep(&s->gain_history_original);
284  av_freep(&s->gain_history_minimum);
285  av_freep(&s->gain_history_smoothed);
286  av_freep(&s->threshold_history);
287 
288  cqueue_free(s->is_enabled);
289  s->is_enabled = NULL;
290 
291  av_freep(&s->weights);
292 
293  ff_bufqueue_discard_all(&s->queue);
294 }
295 
297 {
298  AVFilterContext *ctx = inlink->dst;
300  int c;
301 
302  uninit(ctx);
303 
304  s->channels = inlink->channels;
305  s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
306  av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len);
307 
308  s->prev_amplification_factor = av_malloc_array(inlink->channels, sizeof(*s->prev_amplification_factor));
309  s->dc_correction_value = av_calloc(inlink->channels, sizeof(*s->dc_correction_value));
310  s->compress_threshold = av_calloc(inlink->channels, sizeof(*s->compress_threshold));
311  s->gain_history_original = av_calloc(inlink->channels, sizeof(*s->gain_history_original));
312  s->gain_history_minimum = av_calloc(inlink->channels, sizeof(*s->gain_history_minimum));
313  s->gain_history_smoothed = av_calloc(inlink->channels, sizeof(*s->gain_history_smoothed));
314  s->threshold_history = av_calloc(inlink->channels, sizeof(*s->threshold_history));
315  s->weights = av_malloc_array(MAX_FILTER_SIZE, sizeof(*s->weights));
316  s->is_enabled = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
317  if (!s->prev_amplification_factor || !s->dc_correction_value ||
318  !s->compress_threshold ||
319  !s->gain_history_original || !s->gain_history_minimum ||
320  !s->gain_history_smoothed || !s->threshold_history ||
321  !s->is_enabled || !s->weights)
322  return AVERROR(ENOMEM);
323 
324  for (c = 0; c < inlink->channels; c++) {
325  s->prev_amplification_factor[c] = 1.0;
326 
327  s->gain_history_original[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
328  s->gain_history_minimum[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
329  s->gain_history_smoothed[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
330  s->threshold_history[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
331 
332  if (!s->gain_history_original[c] || !s->gain_history_minimum[c] ||
333  !s->gain_history_smoothed[c] || !s->threshold_history[c])
334  return AVERROR(ENOMEM);
335  }
336 
338 
339  return 0;
340 }
341 
342 static inline double fade(double prev, double next, int pos, int length)
343 {
344  const double step_size = 1.0 / length;
345  const double f0 = 1.0 - (step_size * (pos + 1.0));
346  const double f1 = 1.0 - f0;
347  return f0 * prev + f1 * next;
348 }
349 
350 static inline double pow_2(const double value)
351 {
352  return value * value;
353 }
354 
355 static inline double bound(const double threshold, const double val)
356 {
357  const double CONST = 0.8862269254527580136490837416705725913987747280611935; //sqrt(PI) / 2.0
358  return erf(CONST * (val / threshold)) * threshold;
359 }
360 
362 {
363  double max = DBL_EPSILON;
364  int c, i;
365 
366  if (channel == -1) {
367  for (c = 0; c < frame->channels; c++) {
368  double *data_ptr = (double *)frame->extended_data[c];
369 
370  for (i = 0; i < frame->nb_samples; i++)
371  max = FFMAX(max, fabs(data_ptr[i]));
372  }
373  } else {
374  double *data_ptr = (double *)frame->extended_data[channel];
375 
376  for (i = 0; i < frame->nb_samples; i++)
377  max = FFMAX(max, fabs(data_ptr[i]));
378  }
379 
380  return max;
381 }
382 
384 {
385  double rms_value = 0.0;
386  int c, i;
387 
388  if (channel == -1) {
389  for (c = 0; c < frame->channels; c++) {
390  const double *data_ptr = (double *)frame->extended_data[c];
391 
392  for (i = 0; i < frame->nb_samples; i++) {
393  rms_value += pow_2(data_ptr[i]);
394  }
395  }
396 
397  rms_value /= frame->nb_samples * frame->channels;
398  } else {
399  const double *data_ptr = (double *)frame->extended_data[channel];
400  for (i = 0; i < frame->nb_samples; i++) {
401  rms_value += pow_2(data_ptr[i]);
402  }
403 
404  rms_value /= frame->nb_samples;
405  }
406 
407  return FFMAX(sqrt(rms_value), DBL_EPSILON);
408 }
409 
411  int channel)
412 {
413  const double peak_magnitude = find_peak_magnitude(frame, channel);
414  const double maximum_gain = s->peak_value / peak_magnitude;
415  const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX;
416  local_gain gain;
417 
418  gain.threshold = peak_magnitude > s->threshold;
419  gain.max_gain = bound(s->max_amplification, FFMIN(maximum_gain, rms_gain));
420 
421  return gain;
422 }
423 
424 static double minimum_filter(cqueue *q)
425 {
426  double min = DBL_MAX;
427  int i;
428 
429  for (i = 0; i < cqueue_size(q); i++) {
430  min = FFMIN(min, cqueue_peek(q, i));
431  }
432 
433  return min;
434 }
435 
437 {
438  double result = 0.0, tsum = 0.0;
439  int i;
440 
441  for (i = 0; i < cqueue_size(q); i++) {
442  tsum += cqueue_peek(tq, i) * s->weights[i];
443  result += cqueue_peek(q, i) * s->weights[i] * cqueue_peek(tq, i);
444  }
445 
446  if (tsum == 0.0)
447  result = 1.0;
448 
449  return result;
450 }
451 
453  local_gain gain)
454 {
455  if (cqueue_empty(s->gain_history_original[channel])) {
456  const int pre_fill_size = s->filter_size / 2;
457  const double initial_value = s->alt_boundary_mode ? gain.max_gain : FFMIN(1.0, gain.max_gain);
458 
459  s->prev_amplification_factor[channel] = initial_value;
460 
461  while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) {
462  cqueue_enqueue(s->gain_history_original[channel], initial_value);
463  cqueue_enqueue(s->threshold_history[channel], gain.threshold);
464  }
465  }
466 
467  cqueue_enqueue(s->gain_history_original[channel], gain.max_gain);
468 
469  while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) {
470  double minimum;
471 
472  if (cqueue_empty(s->gain_history_minimum[channel])) {
473  const int pre_fill_size = s->filter_size / 2;
474  double initial_value = s->alt_boundary_mode ? cqueue_peek(s->gain_history_original[channel], 0) : 1.0;
475  int input = pre_fill_size;
476 
477  while (cqueue_size(s->gain_history_minimum[channel]) < pre_fill_size) {
478  input++;
479  initial_value = FFMIN(initial_value, cqueue_peek(s->gain_history_original[channel], input));
480  cqueue_enqueue(s->gain_history_minimum[channel], initial_value);
481  }
482  }
483 
484  minimum = minimum_filter(s->gain_history_original[channel]);
485 
486  cqueue_enqueue(s->gain_history_minimum[channel], minimum);
487 
488  cqueue_enqueue(s->threshold_history[channel], gain.threshold);
489 
490  cqueue_pop(s->gain_history_original[channel]);
491  }
492 
493  while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) {
494  double smoothed, limit;
495 
496  smoothed = gaussian_filter(s, s->gain_history_minimum[channel], s->threshold_history[channel]);
497  limit = cqueue_peek(s->gain_history_original[channel], 0);
498  smoothed = FFMIN(smoothed, limit);
499 
500  cqueue_enqueue(s->gain_history_smoothed[channel], smoothed);
501 
502  cqueue_pop(s->gain_history_minimum[channel]);
503  cqueue_pop(s->threshold_history[channel]);
504  }
505 }
506 
507 static inline double update_value(double new, double old, double aggressiveness)
508 {
509  av_assert0((aggressiveness >= 0.0) && (aggressiveness <= 1.0));
510  return aggressiveness * new + (1.0 - aggressiveness) * old;
511 }
512 
514 {
515  const double diff = 1.0 / frame->nb_samples;
516  int is_first_frame = cqueue_empty(s->gain_history_original[0]);
517  int c, i;
518 
519  for (c = 0; c < s->channels; c++) {
520  double *dst_ptr = (double *)frame->extended_data[c];
521  double current_average_value = 0.0;
522  double prev_value;
523 
524  for (i = 0; i < frame->nb_samples; i++)
525  current_average_value += dst_ptr[i] * diff;
526 
527  prev_value = is_first_frame ? current_average_value : s->dc_correction_value[c];
528  s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
529 
530  for (i = 0; i < frame->nb_samples; i++) {
531  dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, frame->nb_samples);
532  }
533  }
534 }
535 
536 static double setup_compress_thresh(double threshold)
537 {
538  if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) {
539  double current_threshold = threshold;
540  double step_size = 1.0;
541 
542  while (step_size > DBL_EPSILON) {
543  while ((llrint((current_threshold + step_size) * (UINT64_C(1) << 63)) >
544  llrint(current_threshold * (UINT64_C(1) << 63))) &&
545  (bound(current_threshold + step_size, 1.0) <= threshold)) {
546  current_threshold += step_size;
547  }
548 
549  step_size /= 2.0;
550  }
551 
552  return current_threshold;
553  } else {
554  return threshold;
555  }
556 }
557 
559  AVFrame *frame, int channel)
560 {
561  double variance = 0.0;
562  int i, c;
563 
564  if (channel == -1) {
565  for (c = 0; c < s->channels; c++) {
566  const double *data_ptr = (double *)frame->extended_data[c];
567 
568  for (i = 0; i < frame->nb_samples; i++) {
569  variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
570  }
571  }
572  variance /= (s->channels * frame->nb_samples) - 1;
573  } else {
574  const double *data_ptr = (double *)frame->extended_data[channel];
575 
576  for (i = 0; i < frame->nb_samples; i++) {
577  variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
578  }
579  variance /= frame->nb_samples - 1;
580  }
581 
582  return FFMAX(sqrt(variance), DBL_EPSILON);
583 }
584 
586 {
587  int is_first_frame = cqueue_empty(s->gain_history_original[0]);
588  int c, i;
589 
590  if (s->channels_coupled) {
591  const double standard_deviation = compute_frame_std_dev(s, frame, -1);
592  const double current_threshold = FFMIN(1.0, s->compress_factor * standard_deviation);
593 
594  const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[0];
595  double prev_actual_thresh, curr_actual_thresh;
596  s->compress_threshold[0] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[0], (1.0/3.0));
597 
598  prev_actual_thresh = setup_compress_thresh(prev_value);
599  curr_actual_thresh = setup_compress_thresh(s->compress_threshold[0]);
600 
601  for (c = 0; c < s->channels; c++) {
602  double *const dst_ptr = (double *)frame->extended_data[c];
603  for (i = 0; i < frame->nb_samples; i++) {
604  const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
605  dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
606  }
607  }
608  } else {
609  for (c = 0; c < s->channels; c++) {
610  const double standard_deviation = compute_frame_std_dev(s, frame, c);
611  const double current_threshold = setup_compress_thresh(FFMIN(1.0, s->compress_factor * standard_deviation));
612 
613  const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[c];
614  double prev_actual_thresh, curr_actual_thresh;
615  double *dst_ptr;
616  s->compress_threshold[c] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[c], 1.0/3.0);
617 
618  prev_actual_thresh = setup_compress_thresh(prev_value);
619  curr_actual_thresh = setup_compress_thresh(s->compress_threshold[c]);
620 
621  dst_ptr = (double *)frame->extended_data[c];
622  for (i = 0; i < frame->nb_samples; i++) {
623  const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
624  dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
625  }
626  }
627  }
628 }
629 
631 {
632  if (s->dc_correction) {
634  }
635 
636  if (s->compress_factor > DBL_EPSILON) {
638  }
639 
640  if (s->channels_coupled) {
641  const local_gain gain = get_max_local_gain(s, frame, -1);
642  int c;
643 
644  for (c = 0; c < s->channels; c++)
645  update_gain_history(s, c, gain);
646  } else {
647  int c;
648 
649  for (c = 0; c < s->channels; c++)
651  }
652 }
653 
655  AVFrame *frame, int enabled)
656 {
657  int c, i;
658 
659  for (c = 0; c < s->channels; c++) {
660  const double *src_ptr = (const double *)in->extended_data[c];
661  double *dst_ptr = (double *)frame->extended_data[c];
662  double current_amplification_factor;
663 
664  cqueue_dequeue(s->gain_history_smoothed[c], &current_amplification_factor);
665 
666  for (i = 0; i < frame->nb_samples && enabled; i++) {
667  const double amplification_factor = fade(s->prev_amplification_factor[c],
668  current_amplification_factor, i,
669  frame->nb_samples);
670 
671  dst_ptr[i] = src_ptr[i] * amplification_factor;
672  }
673 
674  s->prev_amplification_factor[c] = current_amplification_factor;
675  }
676 }
677 
679 {
680  AVFilterContext *ctx = inlink->dst;
682  AVFilterLink *outlink = ctx->outputs[0];
683  int ret = 1;
684 
685  while (((s->queue.available >= s->filter_size) ||
686  (s->eof && s->queue.available)) &&
687  !cqueue_empty(s->gain_history_smoothed[0])) {
688  AVFrame *in = ff_bufqueue_get(&s->queue);
689  AVFrame *out;
690  double is_enabled;
691 
692  cqueue_dequeue(s->is_enabled, &is_enabled);
693 
694  if (av_frame_is_writable(in)) {
695  out = in;
696  } else {
697  out = ff_get_audio_buffer(outlink, in->nb_samples);
698  if (!out) {
699  av_frame_free(&in);
700  return AVERROR(ENOMEM);
701  }
703  }
704 
705  amplify_frame(s, in, out, is_enabled > 0.);
706  s->pts = out->pts + av_rescale_q(out->nb_samples, av_make_q(1, outlink->sample_rate),
707  outlink->time_base);
708  if (out != in)
709  av_frame_free(&in);
710  ret = ff_filter_frame(outlink, out);
711  }
712 
713  analyze_frame(s, in);
714  if (!s->eof) {
715  ff_bufqueue_add(ctx, &s->queue, in);
716  cqueue_enqueue(s->is_enabled, !ctx->is_disabled);
717  } else {
718  av_frame_free(&in);
719  }
720 
721  return ret;
722 }
723 
725  AVFilterLink *outlink)
726 {
727  AVFrame *out = ff_get_audio_buffer(outlink, s->frame_len);
728  int c, i;
729 
730  if (!out)
731  return AVERROR(ENOMEM);
732 
733  for (c = 0; c < s->channels; c++) {
734  double *dst_ptr = (double *)out->extended_data[c];
735 
736  for (i = 0; i < out->nb_samples; i++) {
737  dst_ptr[i] = s->alt_boundary_mode ? DBL_EPSILON : ((s->target_rms > DBL_EPSILON) ? FFMIN(s->peak_value, s->target_rms) : s->peak_value);
738  if (s->dc_correction) {
739  dst_ptr[i] *= ((i % 2) == 1) ? -1 : 1;
740  dst_ptr[i] += s->dc_correction_value[c];
741  }
742  }
743  }
744 
745  return filter_frame(inlink, out);
746 }
747 
748 static int flush(AVFilterLink *outlink)
749 {
750  AVFilterContext *ctx = outlink->src;
751  AVFilterLink *inlink = ctx->inputs[0];
753  int ret = 0;
754 
755  if (!cqueue_empty(s->gain_history_smoothed[0])) {
756  ret = flush_buffer(s, inlink, outlink);
757  } else if (s->queue.available) {
758  AVFrame *out = ff_bufqueue_get(&s->queue);
759 
760  s->pts = out->pts + av_rescale_q(out->nb_samples, av_make_q(1, outlink->sample_rate),
761  outlink->time_base);
762  ret = ff_filter_frame(outlink, out);
763  }
764 
765  return ret;
766 }
767 
769 {
770  AVFilterLink *inlink = ctx->inputs[0];
771  AVFilterLink *outlink = ctx->outputs[0];
773  AVFrame *in = NULL;
774  int ret = 0, status;
775  int64_t pts;
776 
778 
779  if (!s->eof) {
780  ret = ff_inlink_consume_samples(inlink, s->frame_len, s->frame_len, &in);
781  if (ret < 0)
782  return ret;
783  if (ret > 0) {
784  ret = filter_frame(inlink, in);
785  if (ret <= 0)
786  return ret;
787  }
788 
789  if (ff_inlink_check_available_samples(inlink, s->frame_len) > 0) {
791  return 0;
792  }
793  }
794 
795  if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
796  if (status == AVERROR_EOF)
797  s->eof = 1;
798  }
799 
800  if (s->eof && s->queue.available)
801  return flush(outlink);
802 
803  if (s->eof && !s->queue.available) {
804  ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
805  return 0;
806  }
807 
808  if (!s->eof)
810 
811  return FFERROR_NOT_READY;
812 }
813 
814 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
815  char *res, int res_len, int flags)
816 {
818  AVFilterLink *inlink = ctx->inputs[0];
819  int prev_filter_size = s->filter_size;
820  int ret;
821 
822  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
823  if (ret < 0)
824  return ret;
825 
826  s->filter_size |= 1;
827  if (prev_filter_size != s->filter_size) {
829 
830  for (int c = 0; c < s->channels; c++) {
831  cqueue_resize(s->gain_history_original[c], s->filter_size);
832  cqueue_resize(s->gain_history_minimum[c], s->filter_size);
833  cqueue_resize(s->threshold_history[c], s->filter_size);
834  }
835  }
836 
837  s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
838 
839  return 0;
840 }
841 
843  {
844  .name = "default",
845  .type = AVMEDIA_TYPE_AUDIO,
846  .config_props = config_input,
847  },
848 };
849 
851  {
852  .name = "default",
853  .type = AVMEDIA_TYPE_AUDIO,
854  },
855 };
856 
858  .name = "dynaudnorm",
859  .description = NULL_IF_CONFIG_SMALL("Dynamic Audio Normalizer."),
860  .priv_size = sizeof(DynamicAudioNormalizerContext),
861  .init = init,
862  .uninit = uninit,
863  .activate = activate,
867  .priv_class = &dynaudnorm_class,
869  .process_command = process_command,
870 };
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_dynaudnorm.c:296
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
flush_buffer
static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink, AVFilterLink *outlink)
Definition: af_dynaudnorm.c:724
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
OFFSET
#define OFFSET(x)
Definition: af_dynaudnorm.c:89
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
analyze_frame
static void analyze_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:630
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: internal.h:184
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dynaudnorm)
amplify_frame
static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *in, AVFrame *frame, int enabled)
Definition: af_dynaudnorm.c:654
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
MAX_FILTER_SIZE
#define MAX_FILTER_SIZE
Definition: af_dynaudnorm.c:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
index
fg index
Definition: ffmpeg_filter.c:167
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:118
AVOption
AVOption.
Definition: opt.h:247
DynamicAudioNormalizerContext::dc_correction_value
double * dc_correction_value
Definition: af_dynaudnorm.c:73
float.h
cqueue_resize
static void cqueue_resize(cqueue *q, int new_size)
Definition: af_dynaudnorm.c:214
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
c1
static const uint64_t c1
Definition: murmur3.c:51
cqueue::max_size
int max_size
Definition: af_dynaudnorm.c:51
avfilter_af_dynaudnorm_inputs
static const AVFilterPad avfilter_af_dynaudnorm_inputs[]
Definition: af_dynaudnorm.c:842
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
sample_rate
sample_rate
Definition: ffmpeg_filter.c:153
find_peak_magnitude
static double find_peak_magnitude(AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:361
ff_bufqueue_get
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
Definition: bufferqueue.h:98
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
DynamicAudioNormalizerContext
Definition: af_dynaudnorm.c:55
local_gain
Definition: af_dynaudnorm.c:43
cqueue::size
int size
Definition: af_dynaudnorm.c:50
val
static double val(void *priv, double ch)
Definition: aeval.c:76
cqueue_create
static cqueue * cqueue_create(int size, int max_size)
Definition: af_dynaudnorm.c:136
pts
static int64_t pts
Definition: transcode_aac.c:653
activate
static int activate(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:768
update_value
static double update_value(double new, double old, double aggressiveness)
Definition: af_dynaudnorm.c:507
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
avfilter_af_dynaudnorm_outputs
static const AVFilterPad avfilter_af_dynaudnorm_outputs[]
Definition: af_dynaudnorm.c:850
DynamicAudioNormalizerContext::is_enabled
cqueue * is_enabled
Definition: af_dynaudnorm.c:86
DynamicAudioNormalizerContext::queue
struct FFBufQueue queue
Definition: af_dynaudnorm.c:58
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
cqueue::nb_elements
int nb_elements
Definition: af_dynaudnorm.c:52
ff_inlink_check_available_samples
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1401
av_cold
#define av_cold
Definition: attributes.h:90
frame_size
static int frame_size(int sample_rate, int frame_len_msec)
Definition: af_dynaudnorm.c:130
minimum_filter
static double minimum_filter(cqueue *q)
Definition: af_dynaudnorm.c:424
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
s
#define s(width, name)
Definition: cbs_vp9.c:257
cqueue_empty
static int cqueue_empty(cqueue *q)
Definition: af_dynaudnorm.c:172
adjust
static int adjust(int x, int size)
Definition: mobiclip.c:514
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
filters.h
DynamicAudioNormalizerContext::channels
int channels
Definition: af_dynaudnorm.c:77
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
copysign
static av_always_inline double copysign(double x, double y)
Definition: libm.h:68
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
cqueue_size
static int cqueue_size(cqueue *q)
Definition: af_dynaudnorm.c:167
DynamicAudioNormalizerContext::weights
double * weights
Definition: af_dynaudnorm.c:75
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:263
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
FLAGS
#define FLAGS
Definition: af_dynaudnorm.c:90
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
DynamicAudioNormalizerContext::peak_value
double peak_value
Definition: af_dynaudnorm.c:67
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1436
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
get_max_local_gain
static local_gain get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:410
pow_2
static double pow_2(const double value)
Definition: af_dynaudnorm.c:350
perform_dc_correction
static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:513
ff_bufqueue_discard_all
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
Definition: bufferqueue.h:111
flush
static int flush(AVFilterLink *outlink)
Definition: af_dynaudnorm.c:748
DynamicAudioNormalizerContext::frame_len_msec
int frame_len_msec
Definition: af_dynaudnorm.c:61
exp
int8_t exp
Definition: eval.c:72
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1371
DynamicAudioNormalizerContext::threshold_history
cqueue ** threshold_history
Definition: af_dynaudnorm.c:84
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
cqueue_enqueue
static int cqueue_enqueue(cqueue *q, double element)
Definition: af_dynaudnorm.c:177
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
bufferqueue.h
minimum
static float minimum(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:47
compute_frame_std_dev
static double compute_frame_std_dev(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:558
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
DynamicAudioNormalizerContext::target_rms
double target_rms
Definition: af_dynaudnorm.c:69
DynamicAudioNormalizerContext::prev_amplification_factor
double * prev_amplification_factor
Definition: af_dynaudnorm.c:72
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
cqueue_pop
static int cqueue_pop(cqueue *q)
Definition: af_dynaudnorm.c:204
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
cqueue_free
static void cqueue_free(cqueue *q)
Definition: af_dynaudnorm.c:160
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_dynaudnorm.c:814
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_dynaudnorm.c:678
ff_af_dynaudnorm
const AVFilter ff_af_dynaudnorm
Definition: af_dynaudnorm.c:857
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
ff_bufqueue_add
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
Definition: bufferqueue.h:71
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
DynamicAudioNormalizerContext::compress_threshold
double * compress_threshold
Definition: af_dynaudnorm.c:74
update_gain_history
static void update_gain_history(DynamicAudioNormalizerContext *s, int channel, local_gain gain)
Definition: af_dynaudnorm.c:452
DynamicAudioNormalizerContext::filter_size
int filter_size
Definition: af_dynaudnorm.c:62
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
DynamicAudioNormalizerContext::pts
int64_t pts
Definition: af_dynaudnorm.c:79
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:397
DynamicAudioNormalizerContext::threshold
double threshold
Definition: af_dynaudnorm.c:71
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
FFBufQueue
Structure holding the queue.
Definition: bufferqueue.h:49
DynamicAudioNormalizerContext::dc_correction
int dc_correction
Definition: af_dynaudnorm.c:63
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:378
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
DynamicAudioNormalizerContext::gain_history_minimum
cqueue ** gain_history_minimum
Definition: af_dynaudnorm.c:82
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
gaussian_filter
static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q, cqueue *tq)
Definition: af_dynaudnorm.c:436
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
erf
static double erf(double z)
erf function Algorithm taken from the Boost project, source: http://www.boost.org/doc/libs/1_46_1/boo...
Definition: libm.h:121
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
AVFilter
Filter definition.
Definition: avfilter.h:165
bound
static double bound(const double threshold, const double val)
Definition: af_dynaudnorm.c:355
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
cqueue_peek
static double cqueue_peek(cqueue *q, int index)
Definition: af_dynaudnorm.c:187
cqueue::elements
double * elements
Definition: af_dynaudnorm.c:49
pos
unsigned int pos
Definition: spdifenc.c:412
compute_frame_rms
static double compute_frame_rms(AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:383
cqueue
Definition: af_dynaudnorm.c:48
init_gaussian_filter
static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
Definition: af_dynaudnorm.c:236
c2
static const uint64_t c2
Definition: murmur3.c:52
dynaudnorm_options
static const AVOption dynaudnorm_options[]
Definition: af_dynaudnorm.c:92
DynamicAudioNormalizerContext::gain_history_original
cqueue ** gain_history_original
Definition: af_dynaudnorm.c:81
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
DynamicAudioNormalizerContext::alt_boundary_mode
int alt_boundary_mode
Definition: af_dynaudnorm.c:65
DynamicAudioNormalizerContext::compress_factor
double compress_factor
Definition: af_dynaudnorm.c:70
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
MIN_FILTER_SIZE
#define MIN_FILTER_SIZE
Definition: af_dynaudnorm.c:32
CONST
#define CONST(name, help, val, unit)
Definition: vf_bwdif.c:363
DynamicAudioNormalizerContext::frame_len
int frame_len
Definition: af_dynaudnorm.c:60
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
DynamicAudioNormalizerContext::eof
int eof
Definition: af_dynaudnorm.c:78
audio.h
local_gain::max_gain
double max_gain
Definition: af_dynaudnorm.c:44
llrint
#define llrint(x)
Definition: libm.h:394
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DynamicAudioNormalizerContext::channels_coupled
int channels_coupled
Definition: af_dynaudnorm.c:64
fade
static double fade(double prev, double next, int pos, int length)
Definition: af_dynaudnorm.c:342
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:154
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
perform_compression
static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:585
setup_compress_thresh
static double setup_compress_thresh(double threshold)
Definition: af_dynaudnorm.c:536
DynamicAudioNormalizerContext::max_amplification
double max_amplification
Definition: af_dynaudnorm.c:68
DynamicAudioNormalizerContext::gain_history_smoothed
cqueue ** gain_history_smoothed
Definition: af_dynaudnorm.c:83
cqueue_dequeue
static int cqueue_dequeue(cqueue *q, double *element)
Definition: af_dynaudnorm.c:193
channel
channel
Definition: ebur128.h:39
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:211
local_gain::threshold
double threshold
Definition: af_dynaudnorm.c:45
min
float min
Definition: vorbis_enc_data.h:429