FFmpeg
af_alimiter.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Lookahead limiter filter
25  */
26 
28 #include "libavutil/common.h"
29 #include "libavutil/fifo.h"
30 #include "libavutil/opt.h"
31 
32 #include "audio.h"
33 #include "avfilter.h"
34 #include "formats.h"
35 #include "internal.h"
36 
37 typedef struct MetaItem {
38  int64_t pts;
40 } MetaItem;
41 
42 typedef struct AudioLimiterContext {
43  const AVClass *class;
44 
45  double limit;
46  double attack;
47  double release;
48  double att;
49  double level_in;
50  double level_out;
53  double asc;
54  int asc_c;
55  int asc_pos;
56  double asc_coeff;
57 
58  double *buffer;
60  int pos;
61  int *nextpos;
62  double *nextdelta;
63 
64  int in_trim;
65  int out_pad;
66  int64_t next_in_pts;
67  int64_t next_out_pts;
68  int latency;
69 
71 
72  double delta;
73  int nextiter;
74  int nextlen;
77 
78 #define OFFSET(x) offsetof(AudioLimiterContext, x)
79 #define AF AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
80 
81 static const AVOption alimiter_options[] = {
82  { "level_in", "set input level", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, AF },
83  { "level_out", "set output level", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, AF },
84  { "limit", "set limit", OFFSET(limit), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0625, 1, AF },
85  { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=5}, 0.1, 80, AF },
86  { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=50}, 1, 8000, AF },
87  { "asc", "enable asc", OFFSET(auto_release), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AF },
88  { "asc_level", "set asc level", OFFSET(asc_coeff), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, AF },
89  { "level", "auto level", OFFSET(auto_level), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
90  { "latency", "compensate delay", OFFSET(latency), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AF },
91  { NULL }
92 };
93 
94 AVFILTER_DEFINE_CLASS(alimiter);
95 
97 {
98  AudioLimiterContext *s = ctx->priv;
99 
100  s->attack /= 1000.;
101  s->release /= 1000.;
102  s->att = 1.;
103  s->asc_pos = -1;
104  s->asc_coeff = pow(0.5, s->asc_coeff - 0.5) * 2 * -1;
105 
106  return 0;
107 }
108 
109 static double get_rdelta(AudioLimiterContext *s, double release, int sample_rate,
110  double peak, double limit, double patt, int asc)
111 {
112  double rdelta = (1.0 - patt) / (sample_rate * release);
113 
114  if (asc && s->auto_release && s->asc_c > 0) {
115  double a_att = limit / (s->asc_coeff * s->asc) * (double)s->asc_c;
116 
117  if (a_att > patt) {
118  double delta = FFMAX((a_att - patt) / (sample_rate * release), rdelta / 10);
119 
120  if (delta < rdelta)
121  rdelta = delta;
122  }
123  }
124 
125  return rdelta;
126 }
127 
129 {
130  AVFilterContext *ctx = inlink->dst;
131  AudioLimiterContext *s = ctx->priv;
132  AVFilterLink *outlink = ctx->outputs[0];
133  const double *src = (const double *)in->data[0];
134  const int channels = inlink->ch_layout.nb_channels;
135  const int buffer_size = s->buffer_size;
136  double *dst, *buffer = s->buffer;
137  const double release = s->release;
138  const double limit = s->limit;
139  double *nextdelta = s->nextdelta;
140  double level = s->auto_level ? 1 / limit : 1;
141  const double level_out = s->level_out;
142  const double level_in = s->level_in;
143  int *nextpos = s->nextpos;
144  AVFrame *out;
145  double *buf;
146  int n, c, i;
147  int new_out_samples;
148  int64_t out_duration;
149  int64_t in_duration;
150  int64_t in_pts;
151  MetaItem meta;
152 
153  if (av_frame_is_writable(in)) {
154  out = in;
155  } else {
156  out = ff_get_audio_buffer(outlink, in->nb_samples);
157  if (!out) {
158  av_frame_free(&in);
159  return AVERROR(ENOMEM);
160  }
162  }
163  dst = (double *)out->data[0];
164 
165  for (n = 0; n < in->nb_samples; n++) {
166  double peak = 0;
167 
168  for (c = 0; c < channels; c++) {
169  double sample = src[c] * level_in;
170 
171  buffer[s->pos + c] = sample;
172  peak = FFMAX(peak, fabs(sample));
173  }
174 
175  if (s->auto_release && peak > limit) {
176  s->asc += peak;
177  s->asc_c++;
178  }
179 
180  if (peak > limit) {
181  double patt = FFMIN(limit / peak, 1.);
182  double rdelta = get_rdelta(s, release, inlink->sample_rate,
183  peak, limit, patt, 0);
184  double delta = (limit / peak - s->att) / buffer_size * channels;
185  int found = 0;
186 
187  if (delta < s->delta) {
188  s->delta = delta;
189  nextpos[0] = s->pos;
190  nextpos[1] = -1;
191  nextdelta[0] = rdelta;
192  s->nextlen = 1;
193  s->nextiter= 0;
194  } else {
195  for (i = s->nextiter; i < s->nextiter + s->nextlen; i++) {
196  int j = i % buffer_size;
197  double ppeak, pdelta;
198 
199  ppeak = fabs(buffer[nextpos[j]]) > fabs(buffer[nextpos[j] + 1]) ?
200  fabs(buffer[nextpos[j]]) : fabs(buffer[nextpos[j] + 1]);
201  pdelta = (limit / peak - limit / ppeak) / (((buffer_size - nextpos[j] + s->pos) % buffer_size) / channels);
202  if (pdelta < nextdelta[j]) {
203  nextdelta[j] = pdelta;
204  found = 1;
205  break;
206  }
207  }
208  if (found) {
209  s->nextlen = i - s->nextiter + 1;
210  nextpos[(s->nextiter + s->nextlen) % buffer_size] = s->pos;
211  nextdelta[(s->nextiter + s->nextlen) % buffer_size] = rdelta;
212  nextpos[(s->nextiter + s->nextlen + 1) % buffer_size] = -1;
213  s->nextlen++;
214  }
215  }
216  }
217 
218  buf = &s->buffer[(s->pos + channels) % buffer_size];
219  peak = 0;
220  for (c = 0; c < channels; c++) {
221  double sample = buf[c];
222 
223  peak = FFMAX(peak, fabs(sample));
224  }
225 
226  if (s->pos == s->asc_pos && !s->asc_changed)
227  s->asc_pos = -1;
228 
229  if (s->auto_release && s->asc_pos == -1 && peak > limit) {
230  s->asc -= peak;
231  s->asc_c--;
232  }
233 
234  s->att += s->delta;
235 
236  for (c = 0; c < channels; c++)
237  dst[c] = buf[c] * s->att;
238 
239  if ((s->pos + channels) % buffer_size == nextpos[s->nextiter]) {
240  if (s->auto_release) {
241  s->delta = get_rdelta(s, release, inlink->sample_rate,
242  peak, limit, s->att, 1);
243  if (s->nextlen > 1) {
244  int pnextpos = nextpos[(s->nextiter + 1) % buffer_size];
245  double ppeak = fabs(buffer[pnextpos]) > fabs(buffer[pnextpos + 1]) ?
246  fabs(buffer[pnextpos]) :
247  fabs(buffer[pnextpos + 1]);
248  double pdelta = (limit / ppeak - s->att) /
249  (((buffer_size + pnextpos -
250  ((s->pos + channels) % buffer_size)) %
251  buffer_size) / channels);
252  if (pdelta < s->delta)
253  s->delta = pdelta;
254  }
255  } else {
256  s->delta = nextdelta[s->nextiter];
257  s->att = limit / peak;
258  }
259 
260  s->nextlen -= 1;
261  nextpos[s->nextiter] = -1;
262  s->nextiter = (s->nextiter + 1) % buffer_size;
263  }
264 
265  if (s->att > 1.) {
266  s->att = 1.;
267  s->delta = 0.;
268  s->nextiter = 0;
269  s->nextlen = 0;
270  nextpos[0] = -1;
271  }
272 
273  if (s->att <= 0.) {
274  s->att = 0.0000000000001;
275  s->delta = (1.0 - s->att) / (inlink->sample_rate * release);
276  }
277 
278  if (s->att != 1. && (1. - s->att) < 0.0000000000001)
279  s->att = 1.;
280 
281  if (s->delta != 0. && fabs(s->delta) < 0.00000000000001)
282  s->delta = 0.;
283 
284  for (c = 0; c < channels; c++)
285  dst[c] = av_clipd(dst[c], -limit, limit) * level * level_out;
286 
287  s->pos = (s->pos + channels) % buffer_size;
288  src += channels;
289  dst += channels;
290  }
291 
292  in_duration = av_rescale_q(in->nb_samples, inlink->time_base, av_make_q(1, in->sample_rate));
293  in_pts = in->pts;
294  meta = (MetaItem){ in->pts, in->nb_samples };
295  av_fifo_write(s->fifo, &meta, 1);
296  if (in != out)
297  av_frame_free(&in);
298 
299  new_out_samples = out->nb_samples;
300  if (s->in_trim > 0) {
301  int trim = FFMIN(new_out_samples, s->in_trim);
302  new_out_samples -= trim;
303  s->in_trim -= trim;
304  }
305 
306  if (new_out_samples <= 0) {
307  av_frame_free(&out);
308  return 0;
309  } else if (new_out_samples < out->nb_samples) {
310  int offset = out->nb_samples - new_out_samples;
311  memmove(out->extended_data[0], out->extended_data[0] + sizeof(double) * offset * out->ch_layout.nb_channels,
312  sizeof(double) * new_out_samples * out->ch_layout.nb_channels);
313  out->nb_samples = new_out_samples;
314  s->in_trim = 0;
315  }
316 
317  av_fifo_read(s->fifo, &meta, 1);
318 
319  out_duration = av_rescale_q(out->nb_samples, inlink->time_base, av_make_q(1, out->sample_rate));
320  in_duration = av_rescale_q(meta.nb_samples, inlink->time_base, av_make_q(1, out->sample_rate));
321  in_pts = meta.pts;
322 
323  if (s->next_out_pts != AV_NOPTS_VALUE && out->pts != s->next_out_pts &&
324  s->next_in_pts != AV_NOPTS_VALUE && in_pts == s->next_in_pts) {
325  out->pts = s->next_out_pts;
326  } else {
327  out->pts = in_pts;
328  }
329  s->next_in_pts = in_pts + in_duration;
330  s->next_out_pts = out->pts + out_duration;
331 
332  return ff_filter_frame(outlink, out);
333 }
334 
335 static int request_frame(AVFilterLink* outlink)
336 {
337  AVFilterContext *ctx = outlink->src;
339  int ret;
340 
341  ret = ff_request_frame(ctx->inputs[0]);
342 
343  if (ret == AVERROR_EOF && s->out_pad > 0) {
344  AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(1024, s->out_pad));
345  if (!frame)
346  return AVERROR(ENOMEM);
347 
348  s->out_pad -= frame->nb_samples;
349  frame->pts = s->next_in_pts;
350  return filter_frame(ctx->inputs[0], frame);
351  }
352  return ret;
353 }
354 
356 {
357  AVFilterContext *ctx = inlink->dst;
358  AudioLimiterContext *s = ctx->priv;
359  int obuffer_size;
360 
361  obuffer_size = inlink->sample_rate * inlink->ch_layout.nb_channels * 100 / 1000. + inlink->ch_layout.nb_channels;
362  if (obuffer_size < inlink->ch_layout.nb_channels)
363  return AVERROR(EINVAL);
364 
365  s->buffer = av_calloc(obuffer_size, sizeof(*s->buffer));
366  s->nextdelta = av_calloc(obuffer_size, sizeof(*s->nextdelta));
367  s->nextpos = av_malloc_array(obuffer_size, sizeof(*s->nextpos));
368  if (!s->buffer || !s->nextdelta || !s->nextpos)
369  return AVERROR(ENOMEM);
370 
371  memset(s->nextpos, -1, obuffer_size * sizeof(*s->nextpos));
372  s->buffer_size = inlink->sample_rate * s->attack * inlink->ch_layout.nb_channels;
373  s->buffer_size -= s->buffer_size % inlink->ch_layout.nb_channels;
374  if (s->latency)
375  s->in_trim = s->out_pad = s->buffer_size / inlink->ch_layout.nb_channels - 1;
376  s->next_out_pts = AV_NOPTS_VALUE;
377  s->next_in_pts = AV_NOPTS_VALUE;
378 
379  s->fifo = av_fifo_alloc2(8, sizeof(MetaItem), AV_FIFO_FLAG_AUTO_GROW);
380  if (!s->fifo) {
381  return AVERROR(ENOMEM);
382  }
383 
384  if (s->buffer_size <= 0) {
385  av_log(ctx, AV_LOG_ERROR, "Attack is too small.\n");
386  return AVERROR(EINVAL);
387  }
388 
389  return 0;
390 }
391 
393 {
394  AudioLimiterContext *s = ctx->priv;
395 
396  av_freep(&s->buffer);
397  av_freep(&s->nextdelta);
398  av_freep(&s->nextpos);
399 
400  av_fifo_freep2(&s->fifo);
401 }
402 
403 static const AVFilterPad alimiter_inputs[] = {
404  {
405  .name = "main",
406  .type = AVMEDIA_TYPE_AUDIO,
407  .filter_frame = filter_frame,
408  .config_props = config_input,
409  },
410 };
411 
412 static const AVFilterPad alimiter_outputs[] = {
413  {
414  .name = "default",
415  .type = AVMEDIA_TYPE_AUDIO,
416  .request_frame = request_frame,
417  },
418 };
419 
421  .name = "alimiter",
422  .description = NULL_IF_CONFIG_SMALL("Audio lookahead limiter."),
423  .priv_size = sizeof(AudioLimiterContext),
424  .priv_class = &alimiter_class,
425  .init = init,
426  .uninit = uninit,
430  .process_command = ff_filter_process_command,
432 };
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:100
alimiter_options
static const AVOption alimiter_options[]
Definition: af_alimiter.c:81
AudioLimiterContext::next_out_pts
int64_t next_out_pts
Definition: af_alimiter.c:67
level
uint8_t level
Definition: svq3.c:206
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_alimiter.c:96
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:999
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AudioLimiterContext::asc_c
int asc_c
Definition: af_alimiter.c:54
AudioLimiterContext::attack
double attack
Definition: af_alimiter.c:46
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: internal.h:183
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:432
alimiter_inputs
static const AVFilterPad alimiter_inputs[]
Definition: af_alimiter.c:403
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:58
AVOption
AVOption.
Definition: opt.h:251
alimiter_outputs
static const AVFilterPad alimiter_outputs[]
Definition: af_alimiter.c:412
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:400
AudioLimiterContext::auto_level
int auto_level
Definition: af_alimiter.c:52
ff_af_alimiter
const AVFilter ff_af_alimiter
Definition: af_alimiter.c:420
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
AudioLimiterContext::pos
int pos
Definition: af_alimiter.c:60
sample_rate
sample_rate
Definition: ffmpeg_filter.c:153
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
formats.h
fifo.h
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
AudioLimiterContext::asc_coeff
double asc_coeff
Definition: af_alimiter.c:56
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:256
AudioLimiterContext::fifo
AVFifo * fifo
Definition: af_alimiter.c:70
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_alimiter.c:392
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:32
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_alimiter.c:128
if
if(ret)
Definition: filter_design.txt:179
AudioLimiterContext::level_in
double level_in
Definition: af_alimiter.c:49
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:596
AudioLimiterContext
Definition: af_alimiter.c:42
MetaItem::pts
int64_t pts
Definition: af_alimiter.c:38
AudioLimiterContext::nextlen
int nextlen
Definition: af_alimiter.c:74
AudioLimiterContext::out_pad
int out_pad
Definition: af_alimiter.c:65
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AudioLimiterContext::asc
double asc
Definition: af_alimiter.c:53
AVFifo
Definition: fifo.c:35
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:502
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AudioLimiterContext::buffer
double * buffer
Definition: af_alimiter.c:58
sample
#define sample
Definition: flacdsp_template.c:44
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:523
AF
#define AF
Definition: af_alimiter.c:79
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:863
AudioLimiterContext::release
double release
Definition: af_alimiter.c:47
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:152
AudioLimiterContext::delta
double delta
Definition: af_alimiter.c:72
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:405
AudioLimiterContext::in_trim
int in_trim
Definition: af_alimiter.c:64
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AudioLimiterContext::buffer_size
int buffer_size
Definition: af_alimiter.c:59
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AudioLimiterContext::nextpos
int * nextpos
Definition: af_alimiter.c:61
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
AVFilter
Filter definition.
Definition: avfilter.h:171
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: af_alimiter.c:335
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AudioLimiterContext::nextiter
int nextiter
Definition: af_alimiter.c:73
OFFSET
#define OFFSET(x)
Definition: af_alimiter.c:78
channel_layout.h
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
patt
static const int8_t patt[4]
Definition: vf_noise.c:67
avfilter.h
AudioLimiterContext::asc_pos
int asc_pos
Definition: af_alimiter.c:55
MetaItem
Definition: af_alimiter.c:37
AudioLimiterContext::level_out
double level_out
Definition: af_alimiter.c:50
get_rdelta
static double get_rdelta(AudioLimiterContext *s, double release, int sample_rate, double peak, double limit, double patt, int asc)
Definition: af_alimiter.c:109
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
MetaItem::nb_samples
int nb_samples
Definition: af_alimiter.c:39
AudioLimiterContext::auto_release
int auto_release
Definition: af_alimiter.c:51
audio.h
AudioLimiterContext::asc_changed
int asc_changed
Definition: af_alimiter.c:75
AudioLimiterContext::att
double att
Definition: af_alimiter.c:48
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(alimiter)
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AudioLimiterContext::limit
double limit
Definition: af_alimiter.c:45
AudioLimiterContext::latency
int latency
Definition: af_alimiter.c:68
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AudioLimiterContext::next_in_pts
int64_t next_in_pts
Definition: af_alimiter.c:66
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_alimiter.c:355
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:61
AudioLimiterContext::nextdelta
double * nextdelta
Definition: af_alimiter.c:62
av_clipd
av_clipd
Definition: af_crystalizer.c:132