FFmpeg
af_compand.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 1999 Chris Bagwell
3  * Copyright (c) 1999 Nick Bailey
4  * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
5  * Copyright (c) 2013 Paul B Mahol
6  * Copyright (c) 2014 Andrew Kelley
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * audio compand filter
28  */
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/ffmath.h"
33 #include "libavutil/mem.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/samplefmt.h"
36 #include "audio.h"
37 #include "avfilter.h"
38 #include "internal.h"
39 
40 typedef struct ChanParam {
41  double attack;
42  double decay;
43  double volume;
44 } ChanParam;
45 
46 typedef struct CompandSegment {
47  double x, y;
48  double a, b;
50 
51 typedef struct CompandContext {
52  const AVClass *class;
54  char *attacks, *decays, *points;
57  double in_min_lin;
58  double out_min_lin;
59  double curve_dB;
60  double gain_dB;
62  double delay;
68 
71 
72 #define OFFSET(x) offsetof(CompandContext, x)
73 #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
74 
75 static const AVOption compand_options[] = {
76  { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, A },
77  { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
78  { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20|1/0" }, 0, 0, A },
79  { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
80  { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
81  { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
82  { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
83  { NULL }
84 };
85 
86 AVFILTER_DEFINE_CLASS(compand);
87 
89 {
90  CompandContext *s = ctx->priv;
91  s->pts = AV_NOPTS_VALUE;
92  return 0;
93 }
94 
96 {
97  CompandContext *s = ctx->priv;
98 
99  av_freep(&s->channels);
100  av_freep(&s->segments);
101  av_frame_free(&s->delay_frame);
102 }
103 
104 static void count_items(char *item_str, int *nb_items)
105 {
106  char *p;
107 
108  *nb_items = 1;
109  for (p = item_str; *p; p++) {
110  if (*p == ' ' || *p == '|')
111  (*nb_items)++;
112  }
113 }
114 
115 static void update_volume(ChanParam *cp, double in)
116 {
117  double delta = in - cp->volume;
118 
119  if (delta > 0.0)
120  cp->volume += delta * cp->attack;
121  else
122  cp->volume += delta * cp->decay;
123 }
124 
125 static double get_volume(CompandContext *s, double in_lin)
126 {
127  CompandSegment *cs;
128  double in_log, out_log;
129  int i;
130 
131  if (in_lin < s->in_min_lin)
132  return s->out_min_lin;
133 
134  in_log = log(in_lin);
135 
136  for (i = 1; i < s->nb_segments; i++)
137  if (in_log <= s->segments[i].x)
138  break;
139  cs = &s->segments[i - 1];
140  in_log -= cs->x;
141  out_log = cs->y + in_log * (cs->a * in_log + cs->b);
142 
143  return exp(out_log);
144 }
145 
147 {
148  CompandContext *s = ctx->priv;
149  AVFilterLink *inlink = ctx->inputs[0];
150  const int channels = inlink->ch_layout.nb_channels;
151  const int nb_samples = frame->nb_samples;
152  AVFrame *out_frame;
153  int chan, i;
154  int err;
155 
157  out_frame = frame;
158  } else {
159  out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
160  if (!out_frame) {
162  return AVERROR(ENOMEM);
163  }
164  err = av_frame_copy_props(out_frame, frame);
165  if (err < 0) {
166  av_frame_free(&out_frame);
168  return err;
169  }
170  }
171 
172  for (chan = 0; chan < channels; chan++) {
173  const double *src = (double *)frame->extended_data[chan];
174  double *dst = (double *)out_frame->extended_data[chan];
175  ChanParam *cp = &s->channels[chan];
176 
177  for (i = 0; i < nb_samples; i++) {
178  update_volume(cp, fabs(src[i]));
179 
180  dst[i] = src[i] * get_volume(s, cp->volume);
181  }
182  }
183 
184  if (frame != out_frame)
186 
187  return ff_filter_frame(ctx->outputs[0], out_frame);
188 }
189 
190 #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
191 
193 {
194  CompandContext *s = ctx->priv;
195  AVFilterLink *inlink = ctx->inputs[0];
196  const int channels = inlink->ch_layout.nb_channels;
197  const int nb_samples = frame->nb_samples;
198  int chan, i, av_uninit(dindex), oindex, av_uninit(count);
199  AVFrame *out_frame = NULL;
200  int err;
201 
202  if (s->pts == AV_NOPTS_VALUE) {
203  s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
204  }
205 
206  av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
207 
208  for (chan = 0; chan < channels; chan++) {
209  AVFrame *delay_frame = s->delay_frame;
210  const double *src = (double *)frame->extended_data[chan];
211  double *dbuf = (double *)delay_frame->extended_data[chan];
212  ChanParam *cp = &s->channels[chan];
213  double *dst;
214 
215  count = s->delay_count;
216  dindex = s->delay_index;
217  for (i = 0, oindex = 0; i < nb_samples; i++) {
218  const double in = src[i];
219  update_volume(cp, fabs(in));
220 
221  if (count >= s->delay_samples) {
222  if (!out_frame) {
223  out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples - i);
224  if (!out_frame) {
226  return AVERROR(ENOMEM);
227  }
228  err = av_frame_copy_props(out_frame, frame);
229  if (err < 0) {
230  av_frame_free(&out_frame);
232  return err;
233  }
234  out_frame->pts = s->pts;
235  s->pts += av_rescale_q(nb_samples - i,
236  (AVRational){ 1, inlink->sample_rate },
237  inlink->time_base);
238  }
239 
240  dst = (double *)out_frame->extended_data[chan];
241  dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
242  } else {
243  count++;
244  }
245 
246  dbuf[dindex] = in;
247  dindex = MOD(dindex + 1, s->delay_samples);
248  }
249  }
250 
251  s->delay_count = count;
252  s->delay_index = dindex;
253 
255 
256  if (out_frame) {
257  err = ff_filter_frame(ctx->outputs[0], out_frame);
258  return err;
259  }
260 
261  return 0;
262 }
263 
264 static int compand_drain(AVFilterLink *outlink)
265 {
266  AVFilterContext *ctx = outlink->src;
267  CompandContext *s = ctx->priv;
268  const int channels = outlink->ch_layout.nb_channels;
269  AVFrame *frame = NULL;
270  int chan, i, dindex;
271 
272  /* 2048 is to limit output frame size during drain */
273  frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
274  if (!frame)
275  return AVERROR(ENOMEM);
276  frame->pts = s->pts;
277  s->pts += av_rescale_q(frame->nb_samples,
278  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
279 
280  av_assert0(channels > 0);
281  for (chan = 0; chan < channels; chan++) {
282  AVFrame *delay_frame = s->delay_frame;
283  double *dbuf = (double *)delay_frame->extended_data[chan];
284  double *dst = (double *)frame->extended_data[chan];
285  ChanParam *cp = &s->channels[chan];
286 
287  dindex = s->delay_index;
288  for (i = 0; i < frame->nb_samples; i++) {
289  dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
290  dindex = MOD(dindex + 1, s->delay_samples);
291  }
292  }
293  s->delay_count -= frame->nb_samples;
294  s->delay_index = dindex;
295 
296  return ff_filter_frame(outlink, frame);
297 }
298 
299 static int config_output(AVFilterLink *outlink)
300 {
301  AVFilterContext *ctx = outlink->src;
302  CompandContext *s = ctx->priv;
303  const int sample_rate = outlink->sample_rate;
304  double radius = s->curve_dB * M_LN10 / 20.0;
305  char *p, *saveptr = NULL;
306  const int channels = outlink->ch_layout.nb_channels;
307  int nb_attacks, nb_decays, nb_points;
308  int new_nb_items, num;
309  int i;
310 
311  count_items(s->attacks, &nb_attacks);
312  count_items(s->decays, &nb_decays);
313  count_items(s->points, &nb_points);
314 
315  if (channels <= 0) {
316  av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
317  return AVERROR(EINVAL);
318  }
319 
320  if (nb_attacks > channels || nb_decays > channels) {
322  "Number of attacks/decays bigger than number of channels. Ignoring rest of entries.\n");
323  nb_attacks = FFMIN(nb_attacks, channels);
324  nb_decays = FFMIN(nb_decays, channels);
325  }
326 
327  uninit(ctx);
328 
329  s->channels = av_calloc(channels, sizeof(*s->channels));
330  s->nb_segments = (nb_points + 4) * 2;
331  s->segments = av_calloc(s->nb_segments, sizeof(*s->segments));
332 
333  if (!s->channels || !s->segments) {
334  uninit(ctx);
335  return AVERROR(ENOMEM);
336  }
337 
338  p = s->attacks;
339  for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
340  char *tstr = av_strtok(p, " |", &saveptr);
341  if (!tstr) {
342  uninit(ctx);
343  return AVERROR(EINVAL);
344  }
345  p = NULL;
346  new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
347  if (s->channels[i].attack < 0) {
348  uninit(ctx);
349  return AVERROR(EINVAL);
350  }
351  }
352  nb_attacks = new_nb_items;
353 
354  p = s->decays;
355  for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
356  char *tstr = av_strtok(p, " |", &saveptr);
357  if (!tstr) {
358  uninit(ctx);
359  return AVERROR(EINVAL);
360  }
361  p = NULL;
362  new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
363  if (s->channels[i].decay < 0) {
364  uninit(ctx);
365  return AVERROR(EINVAL);
366  }
367  }
368  nb_decays = new_nb_items;
369 
370  if (nb_attacks != nb_decays) {
372  "Number of attacks %d differs from number of decays %d.\n",
373  nb_attacks, nb_decays);
374  uninit(ctx);
375  return AVERROR(EINVAL);
376  }
377 
378  for (i = nb_decays; i < channels; i++) {
379  s->channels[i].attack = s->channels[nb_decays - 1].attack;
380  s->channels[i].decay = s->channels[nb_decays - 1].decay;
381  }
382 
383 #define S(x) s->segments[2 * ((x) + 1)]
384  p = s->points;
385  for (i = 0, new_nb_items = 0; i < nb_points; i++) {
386  char *tstr = av_strtok(p, " |", &saveptr);
387  p = NULL;
388  if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
390  "Invalid and/or missing input/output value.\n");
391  uninit(ctx);
392  return AVERROR(EINVAL);
393  }
394  if (i && S(i - 1).x > S(i).x) {
396  "Transfer function input values must be increasing.\n");
397  uninit(ctx);
398  return AVERROR(EINVAL);
399  }
400  S(i).y -= S(i).x;
401  av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
402  new_nb_items++;
403  }
404  num = new_nb_items;
405 
406  /* Add 0,0 if necessary */
407  if (num == 0 || S(num - 1).x)
408  num++;
409 
410 #undef S
411 #define S(x) s->segments[2 * (x)]
412  /* Add a tail off segment at the start */
413  S(0).x = S(1).x - 2 * s->curve_dB;
414  S(0).y = S(1).y;
415  num++;
416 
417  /* Join adjacent colinear segments */
418  for (i = 2; i < num; i++) {
419  double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
420  double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
421  int j;
422 
423  if (fabs(g1 - g2))
424  continue;
425  num--;
426  for (j = --i; j < num; j++)
427  S(j) = S(j + 1);
428  }
429 
430  for (i = 0; i < s->nb_segments; i += 2) {
431  s->segments[i].y += s->gain_dB;
432  s->segments[i].x *= M_LN10 / 20;
433  s->segments[i].y *= M_LN10 / 20;
434  }
435 
436 #define L(x) s->segments[i - (x)]
437  for (i = 4; i < s->nb_segments; i += 2) {
438  double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
439 
440  L(4).a = 0;
441  L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
442 
443  L(2).a = 0;
444  L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
445 
446  theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
447  len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
448  r = FFMIN(radius, len);
449  L(3).x = L(2).x - r * cos(theta);
450  L(3).y = L(2).y - r * sin(theta);
451 
452  theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
453  len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
454  r = FFMIN(radius, len / 2);
455  x = L(2).x + r * cos(theta);
456  y = L(2).y + r * sin(theta);
457 
458  cx = (L(3).x + L(2).x + x) / 3;
459  cy = (L(3).y + L(2).y + y) / 3;
460 
461  L(2).x = x;
462  L(2).y = y;
463 
464  in1 = cx - L(3).x;
465  out1 = cy - L(3).y;
466  in2 = L(2).x - L(3).x;
467  out2 = L(2).y - L(3).y;
468  L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
469  L(3).b = out1 / in1 - L(3).a * in1;
470  }
471  L(3).x = 0;
472  L(3).y = L(2).y;
473 
474  s->in_min_lin = exp(s->segments[1].x);
475  s->out_min_lin = exp(s->segments[1].y);
476 
477  for (i = 0; i < channels; i++) {
478  ChanParam *cp = &s->channels[i];
479 
480  if (cp->attack > 1.0 / sample_rate)
481  cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
482  else
483  cp->attack = 1.0;
484  if (cp->decay > 1.0 / sample_rate)
485  cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
486  else
487  cp->decay = 1.0;
488  cp->volume = ff_exp10(s->initial_volume / 20);
489  }
490 
491  s->delay_samples = s->delay * sample_rate;
492  if (s->delay_samples <= 0) {
493  s->compand = compand_nodelay;
494  return 0;
495  }
496 
497  s->delay_frame = ff_get_audio_buffer(outlink, s->delay_samples);
498  if (!s->delay_frame)
499  return AVERROR(ENOMEM);
500 
501  s->compand = compand_delay;
502  return 0;
503 }
504 
506 {
507  AVFilterContext *ctx = inlink->dst;
508  CompandContext *s = ctx->priv;
509 
510  return s->compand(ctx, frame);
511 }
512 
513 static int request_frame(AVFilterLink *outlink)
514 {
515  AVFilterContext *ctx = outlink->src;
516  CompandContext *s = ctx->priv;
517  int ret = 0;
518 
519  ret = ff_request_frame(ctx->inputs[0]);
520 
521  if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
522  ret = compand_drain(outlink);
523 
524  return ret;
525 }
526 
527 static const AVFilterPad compand_inputs[] = {
528  {
529  .name = "default",
530  .type = AVMEDIA_TYPE_AUDIO,
531  .filter_frame = filter_frame,
532  },
533 };
534 
535 static const AVFilterPad compand_outputs[] = {
536  {
537  .name = "default",
538  .request_frame = request_frame,
539  .config_props = config_output,
540  .type = AVMEDIA_TYPE_AUDIO,
541  },
542 };
543 
544 
546  .name = "compand",
547  .description = NULL_IF_CONFIG_SMALL(
548  "Compress or expand audio dynamic range."),
549  .priv_size = sizeof(CompandContext),
550  .priv_class = &compand_class,
551  .init = init,
552  .uninit = uninit,
556 };
CompandContext::delay
double delay
Definition: af_compand.c:62
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:97
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
ff_exp10
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
compand_delay
static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:192
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
ff_af_compand
const AVFilter ff_af_compand
Definition: af_compand.c:545
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: internal.h:175
CompandSegment::b
double b
Definition: af_compand.c:48
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
normalize.log
log
Definition: normalize.py:21
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
AVOption
AVOption.
Definition: opt.h:357
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:463
OFFSET
#define OFFSET(x)
Definition: af_compand.c:72
CompandContext::out_min_lin
double out_min_lin
Definition: af_compand.c:58
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_compand.c:88
CompandSegment::x
double x
Definition: af_compand.c:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
CompandSegment
Definition: af_compand.c:46
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(compand)
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
CompandContext
Definition: af_compand.c:51
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: af_compand.c:513
samplefmt.h
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_compand.c:299
CompandContext::attacks
char * attacks
Definition: af_compand.c:54
ChanParam::attack
double attack
Definition: af_compand.c:41
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
compand_options
static const AVOption compand_options[]
Definition: af_compand.c:75
CompandContext::delay_frame
AVFrame * delay_frame
Definition: af_compand.c:63
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
CompandContext::curve_dB
double curve_dB
Definition: af_compand.c:59
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:247
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:49
channels
channels
Definition: aptx.h:31
CompandContext::delay_samples
int delay_samples
Definition: af_compand.c:64
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
compand_nodelay
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:146
compand_inputs
static const AVFilterPad compand_inputs[]
Definition: af_compand.c:527
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
CompandContext::channels
ChanParam * channels
Definition: af_compand.c:56
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
get_volume
static double get_volume(CompandContext *s, double in_lin)
Definition: af_compand.c:125
CompandContext::decays
char * decays
Definition: af_compand.c:54
update_volume
static void update_volume(ChanParam *cp, double in)
Definition: af_compand.c:115
CompandContext::compand
int(* compand)(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:69
exp
int8_t exp
Definition: eval.c:73
CompandContext::segments
CompandSegment * segments
Definition: af_compand.c:55
compand_drain
static int compand_drain(AVFilterLink *outlink)
Definition: af_compand.c:264
CompandContext::in_min_lin
double in_min_lin
Definition: af_compand.c:57
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
L
#define L(x)
MOD
#define MOD(a, b)
Definition: af_compand.c:190
CompandContext::nb_segments
int nb_segments
Definition: af_compand.c:53
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
CompandContext::delay_index
int delay_index
Definition: af_compand.c:66
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: af_compand.c:505
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:645
CompandContext::delay_count
int delay_count
Definition: af_compand.c:65
CompandContext::points
char * points
Definition: af_compand.c:54
A
#define A
Definition: af_compand.c:73
CompandContext::gain_dB
double gain_dB
Definition: af_compand.c:60
internal.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
CompandContext::pts
int64_t pts
Definition: af_compand.c:67
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:435
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
len
int len
Definition: vorbis_enc_data.h:426
CompandSegment::y
double y
Definition: af_compand.c:47
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AVFilter
Filter definition.
Definition: avfilter.h:166
av_uninit
#define av_uninit(x)
Definition: attributes.h:154
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
S
#define S(x)
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_compand.c:95
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
ffmath.h
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
count_items
static void count_items(char *item_str, int *nb_items)
Definition: af_compand.c:104
mem.h
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:49
ChanParam
Definition: af_compand.c:40
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:249
int
int
Definition: ffmpeg_filter.c:424
ChanParam::decay
double decay
Definition: af_compand.c:42
CompandSegment::a
double a
Definition: af_compand.c:48
CompandContext::initial_volume
double initial_volume
Definition: af_compand.c:61
ChanParam::volume
double volume
Definition: af_compand.c:43
compand_outputs
static const AVFilterPad compand_outputs[]
Definition: af_compand.c:535