FFmpeg
af_compand.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 1999 Chris Bagwell
3  * Copyright (c) 1999 Nick Bailey
4  * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
5  * Copyright (c) 2013 Paul B Mahol
6  * Copyright (c) 2014 Andrew Kelley
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * audio compand filter
28  */
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/ffmath.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/samplefmt.h"
35 #include "audio.h"
36 #include "avfilter.h"
37 #include "internal.h"
38 
39 typedef struct ChanParam {
40  double attack;
41  double decay;
42  double volume;
43 } ChanParam;
44 
45 typedef struct CompandSegment {
46  double x, y;
47  double a, b;
49 
50 typedef struct CompandContext {
51  const AVClass *class;
53  char *attacks, *decays, *points;
56  double in_min_lin;
57  double out_min_lin;
58  double curve_dB;
59  double gain_dB;
61  double delay;
66  int64_t pts;
67 
70 
71 #define OFFSET(x) offsetof(CompandContext, x)
72 #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
73 
74 static const AVOption compand_options[] = {
75  { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, A },
76  { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
77  { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20|1/0" }, 0, 0, A },
78  { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
79  { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
80  { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
81  { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
82  { NULL }
83 };
84 
85 AVFILTER_DEFINE_CLASS(compand);
86 
88 {
89  CompandContext *s = ctx->priv;
90  s->pts = AV_NOPTS_VALUE;
91  return 0;
92 }
93 
95 {
96  CompandContext *s = ctx->priv;
97 
98  av_freep(&s->channels);
99  av_freep(&s->segments);
100  av_frame_free(&s->delay_frame);
101 }
102 
103 static void count_items(char *item_str, int *nb_items)
104 {
105  char *p;
106 
107  *nb_items = 1;
108  for (p = item_str; *p; p++) {
109  if (*p == ' ' || *p == '|')
110  (*nb_items)++;
111  }
112 }
113 
114 static void update_volume(ChanParam *cp, double in)
115 {
116  double delta = in - cp->volume;
117 
118  if (delta > 0.0)
119  cp->volume += delta * cp->attack;
120  else
121  cp->volume += delta * cp->decay;
122 }
123 
124 static double get_volume(CompandContext *s, double in_lin)
125 {
126  CompandSegment *cs;
127  double in_log, out_log;
128  int i;
129 
130  if (in_lin < s->in_min_lin)
131  return s->out_min_lin;
132 
133  in_log = log(in_lin);
134 
135  for (i = 1; i < s->nb_segments; i++)
136  if (in_log <= s->segments[i].x)
137  break;
138  cs = &s->segments[i - 1];
139  in_log -= cs->x;
140  out_log = cs->y + in_log * (cs->a * in_log + cs->b);
141 
142  return exp(out_log);
143 }
144 
146 {
147  CompandContext *s = ctx->priv;
148  AVFilterLink *inlink = ctx->inputs[0];
149  const int channels = inlink->channels;
150  const int nb_samples = frame->nb_samples;
151  AVFrame *out_frame;
152  int chan, i;
153  int err;
154 
156  out_frame = frame;
157  } else {
158  out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
159  if (!out_frame) {
161  return AVERROR(ENOMEM);
162  }
163  err = av_frame_copy_props(out_frame, frame);
164  if (err < 0) {
165  av_frame_free(&out_frame);
167  return err;
168  }
169  }
170 
171  for (chan = 0; chan < channels; chan++) {
172  const double *src = (double *)frame->extended_data[chan];
173  double *dst = (double *)out_frame->extended_data[chan];
174  ChanParam *cp = &s->channels[chan];
175 
176  for (i = 0; i < nb_samples; i++) {
177  update_volume(cp, fabs(src[i]));
178 
179  dst[i] = src[i] * get_volume(s, cp->volume);
180  }
181  }
182 
183  if (frame != out_frame)
185 
186  return ff_filter_frame(ctx->outputs[0], out_frame);
187 }
188 
189 #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
190 
192 {
193  CompandContext *s = ctx->priv;
194  AVFilterLink *inlink = ctx->inputs[0];
195  const int channels = inlink->channels;
196  const int nb_samples = frame->nb_samples;
197  int chan, i, av_uninit(dindex), oindex, av_uninit(count);
198  AVFrame *out_frame = NULL;
199  int err;
200 
201  if (s->pts == AV_NOPTS_VALUE) {
202  s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
203  }
204 
205  av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
206 
207  for (chan = 0; chan < channels; chan++) {
208  AVFrame *delay_frame = s->delay_frame;
209  const double *src = (double *)frame->extended_data[chan];
210  double *dbuf = (double *)delay_frame->extended_data[chan];
211  ChanParam *cp = &s->channels[chan];
212  double *dst;
213 
214  count = s->delay_count;
215  dindex = s->delay_index;
216  for (i = 0, oindex = 0; i < nb_samples; i++) {
217  const double in = src[i];
218  update_volume(cp, fabs(in));
219 
220  if (count >= s->delay_samples) {
221  if (!out_frame) {
222  out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples - i);
223  if (!out_frame) {
225  return AVERROR(ENOMEM);
226  }
227  err = av_frame_copy_props(out_frame, frame);
228  if (err < 0) {
229  av_frame_free(&out_frame);
231  return err;
232  }
233  out_frame->pts = s->pts;
234  s->pts += av_rescale_q(nb_samples - i,
235  (AVRational){ 1, inlink->sample_rate },
236  inlink->time_base);
237  }
238 
239  dst = (double *)out_frame->extended_data[chan];
240  dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
241  } else {
242  count++;
243  }
244 
245  dbuf[dindex] = in;
246  dindex = MOD(dindex + 1, s->delay_samples);
247  }
248  }
249 
250  s->delay_count = count;
251  s->delay_index = dindex;
252 
254 
255  if (out_frame) {
256  err = ff_filter_frame(ctx->outputs[0], out_frame);
257  return err;
258  }
259 
260  return 0;
261 }
262 
263 static int compand_drain(AVFilterLink *outlink)
264 {
265  AVFilterContext *ctx = outlink->src;
266  CompandContext *s = ctx->priv;
267  const int channels = outlink->channels;
268  AVFrame *frame = NULL;
269  int chan, i, dindex;
270 
271  /* 2048 is to limit output frame size during drain */
272  frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
273  if (!frame)
274  return AVERROR(ENOMEM);
275  frame->pts = s->pts;
276  s->pts += av_rescale_q(frame->nb_samples,
277  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
278 
279  av_assert0(channels > 0);
280  for (chan = 0; chan < channels; chan++) {
281  AVFrame *delay_frame = s->delay_frame;
282  double *dbuf = (double *)delay_frame->extended_data[chan];
283  double *dst = (double *)frame->extended_data[chan];
284  ChanParam *cp = &s->channels[chan];
285 
286  dindex = s->delay_index;
287  for (i = 0; i < frame->nb_samples; i++) {
288  dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
289  dindex = MOD(dindex + 1, s->delay_samples);
290  }
291  }
292  s->delay_count -= frame->nb_samples;
293  s->delay_index = dindex;
294 
295  return ff_filter_frame(outlink, frame);
296 }
297 
298 static int config_output(AVFilterLink *outlink)
299 {
300  AVFilterContext *ctx = outlink->src;
301  CompandContext *s = ctx->priv;
302  const int sample_rate = outlink->sample_rate;
303  double radius = s->curve_dB * M_LN10 / 20.0;
304  char *p, *saveptr = NULL;
305  const int channels = outlink->channels;
306  int nb_attacks, nb_decays, nb_points;
307  int new_nb_items, num;
308  int i;
309  int err;
310 
311 
312  count_items(s->attacks, &nb_attacks);
313  count_items(s->decays, &nb_decays);
314  count_items(s->points, &nb_points);
315 
316  if (channels <= 0) {
317  av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
318  return AVERROR(EINVAL);
319  }
320 
321  if (nb_attacks > channels || nb_decays > channels) {
323  "Number of attacks/decays bigger than number of channels. Ignoring rest of entries.\n");
324  nb_attacks = FFMIN(nb_attacks, channels);
325  nb_decays = FFMIN(nb_decays, channels);
326  }
327 
328  uninit(ctx);
329 
330  s->channels = av_calloc(channels, sizeof(*s->channels));
331  s->nb_segments = (nb_points + 4) * 2;
332  s->segments = av_calloc(s->nb_segments, sizeof(*s->segments));
333 
334  if (!s->channels || !s->segments) {
335  uninit(ctx);
336  return AVERROR(ENOMEM);
337  }
338 
339  p = s->attacks;
340  for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
341  char *tstr = av_strtok(p, " |", &saveptr);
342  if (!tstr) {
343  uninit(ctx);
344  return AVERROR(EINVAL);
345  }
346  p = NULL;
347  new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
348  if (s->channels[i].attack < 0) {
349  uninit(ctx);
350  return AVERROR(EINVAL);
351  }
352  }
353  nb_attacks = new_nb_items;
354 
355  p = s->decays;
356  for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
357  char *tstr = av_strtok(p, " |", &saveptr);
358  if (!tstr) {
359  uninit(ctx);
360  return AVERROR(EINVAL);
361  }
362  p = NULL;
363  new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
364  if (s->channels[i].decay < 0) {
365  uninit(ctx);
366  return AVERROR(EINVAL);
367  }
368  }
369  nb_decays = new_nb_items;
370 
371  if (nb_attacks != nb_decays) {
373  "Number of attacks %d differs from number of decays %d.\n",
374  nb_attacks, nb_decays);
375  uninit(ctx);
376  return AVERROR(EINVAL);
377  }
378 
379  for (i = nb_decays; i < channels; i++) {
380  s->channels[i].attack = s->channels[nb_decays - 1].attack;
381  s->channels[i].decay = s->channels[nb_decays - 1].decay;
382  }
383 
384 #define S(x) s->segments[2 * ((x) + 1)]
385  p = s->points;
386  for (i = 0, new_nb_items = 0; i < nb_points; i++) {
387  char *tstr = av_strtok(p, " |", &saveptr);
388  p = NULL;
389  if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
391  "Invalid and/or missing input/output value.\n");
392  uninit(ctx);
393  return AVERROR(EINVAL);
394  }
395  if (i && S(i - 1).x > S(i).x) {
397  "Transfer function input values must be increasing.\n");
398  uninit(ctx);
399  return AVERROR(EINVAL);
400  }
401  S(i).y -= S(i).x;
402  av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
403  new_nb_items++;
404  }
405  num = new_nb_items;
406 
407  /* Add 0,0 if necessary */
408  if (num == 0 || S(num - 1).x)
409  num++;
410 
411 #undef S
412 #define S(x) s->segments[2 * (x)]
413  /* Add a tail off segment at the start */
414  S(0).x = S(1).x - 2 * s->curve_dB;
415  S(0).y = S(1).y;
416  num++;
417 
418  /* Join adjacent colinear segments */
419  for (i = 2; i < num; i++) {
420  double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
421  double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
422  int j;
423 
424  if (fabs(g1 - g2))
425  continue;
426  num--;
427  for (j = --i; j < num; j++)
428  S(j) = S(j + 1);
429  }
430 
431  for (i = 0; i < s->nb_segments; i += 2) {
432  s->segments[i].y += s->gain_dB;
433  s->segments[i].x *= M_LN10 / 20;
434  s->segments[i].y *= M_LN10 / 20;
435  }
436 
437 #define L(x) s->segments[i - (x)]
438  for (i = 4; i < s->nb_segments; i += 2) {
439  double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
440 
441  L(4).a = 0;
442  L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
443 
444  L(2).a = 0;
445  L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
446 
447  theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
448  len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
449  r = FFMIN(radius, len);
450  L(3).x = L(2).x - r * cos(theta);
451  L(3).y = L(2).y - r * sin(theta);
452 
453  theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
454  len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
455  r = FFMIN(radius, len / 2);
456  x = L(2).x + r * cos(theta);
457  y = L(2).y + r * sin(theta);
458 
459  cx = (L(3).x + L(2).x + x) / 3;
460  cy = (L(3).y + L(2).y + y) / 3;
461 
462  L(2).x = x;
463  L(2).y = y;
464 
465  in1 = cx - L(3).x;
466  out1 = cy - L(3).y;
467  in2 = L(2).x - L(3).x;
468  out2 = L(2).y - L(3).y;
469  L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
470  L(3).b = out1 / in1 - L(3).a * in1;
471  }
472  L(3).x = 0;
473  L(3).y = L(2).y;
474 
475  s->in_min_lin = exp(s->segments[1].x);
476  s->out_min_lin = exp(s->segments[1].y);
477 
478  for (i = 0; i < channels; i++) {
479  ChanParam *cp = &s->channels[i];
480 
481  if (cp->attack > 1.0 / sample_rate)
482  cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
483  else
484  cp->attack = 1.0;
485  if (cp->decay > 1.0 / sample_rate)
486  cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
487  else
488  cp->decay = 1.0;
489  cp->volume = ff_exp10(s->initial_volume / 20);
490  }
491 
492  s->delay_samples = s->delay * sample_rate;
493  if (s->delay_samples <= 0) {
494  s->compand = compand_nodelay;
495  return 0;
496  }
497 
498  s->delay_frame = av_frame_alloc();
499  if (!s->delay_frame) {
500  uninit(ctx);
501  return AVERROR(ENOMEM);
502  }
503 
504  s->delay_frame->format = outlink->format;
505  s->delay_frame->nb_samples = s->delay_samples;
506  s->delay_frame->channel_layout = outlink->channel_layout;
507 
508  err = av_frame_get_buffer(s->delay_frame, 0);
509  if (err)
510  return err;
511 
512  s->compand = compand_delay;
513  return 0;
514 }
515 
517 {
518  AVFilterContext *ctx = inlink->dst;
519  CompandContext *s = ctx->priv;
520 
521  return s->compand(ctx, frame);
522 }
523 
524 static int request_frame(AVFilterLink *outlink)
525 {
526  AVFilterContext *ctx = outlink->src;
527  CompandContext *s = ctx->priv;
528  int ret = 0;
529 
530  ret = ff_request_frame(ctx->inputs[0]);
531 
532  if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
533  ret = compand_drain(outlink);
534 
535  return ret;
536 }
537 
538 static const AVFilterPad compand_inputs[] = {
539  {
540  .name = "default",
541  .type = AVMEDIA_TYPE_AUDIO,
542  .filter_frame = filter_frame,
543  },
544 };
545 
546 static const AVFilterPad compand_outputs[] = {
547  {
548  .name = "default",
549  .request_frame = request_frame,
550  .config_props = config_output,
551  .type = AVMEDIA_TYPE_AUDIO,
552  },
553 };
554 
555 
557  .name = "compand",
558  .description = NULL_IF_CONFIG_SMALL(
559  "Compress or expand audio dynamic range."),
560  .priv_size = sizeof(CompandContext),
561  .priv_class = &compand_class,
562  .init = init,
563  .uninit = uninit,
567 };
CompandContext::delay
double delay
Definition: af_compand.c:61
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
ff_exp10
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
compand_delay
static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:191
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:243
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
ff_af_compand
const AVFilter ff_af_compand
Definition: af_compand.c:556
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: internal.h:184
CompandSegment::b
double b
Definition: af_compand.c:47
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
AVOption
AVOption.
Definition: opt.h:247
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:420
OFFSET
#define OFFSET(x)
Definition: af_compand.c:71
CompandContext::out_min_lin
double out_min_lin
Definition: af_compand.c:57
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_compand.c:87
CompandSegment::x
double x
Definition: af_compand.c:46
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
CompandSegment
Definition: af_compand.c:45
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(compand)
sample_rate
sample_rate
Definition: ffmpeg_filter.c:153
CompandContext
Definition: af_compand.c:50
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: af_compand.c:524
samplefmt.h
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_compand.c:298
CompandContext::attacks
char * attacks
Definition: af_compand.c:53
ChanParam::attack
double attack
Definition: af_compand.c:40
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
compand_options
static const AVOption compand_options[]
Definition: af_compand.c:74
CompandContext::delay_frame
AVFrame * delay_frame
Definition: af_compand.c:62
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
CompandContext::curve_dB
double curve_dB
Definition: af_compand.c:58
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
CompandContext::delay_samples
int delay_samples
Definition: af_compand.c:63
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
compand_nodelay
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:145
compand_inputs
static const AVFilterPad compand_inputs[]
Definition: af_compand.c:538
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
CompandContext::channels
ChanParam * channels
Definition: af_compand.c:55
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
get_volume
static double get_volume(CompandContext *s, double in_lin)
Definition: af_compand.c:124
src
#define src
Definition: vp8dsp.c:255
CompandContext::decays
char * decays
Definition: af_compand.c:53
update_volume
static void update_volume(ChanParam *cp, double in)
Definition: af_compand.c:114
CompandContext::compand
int(* compand)(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:68
exp
int8_t exp
Definition: eval.c:72
CompandContext::segments
CompandSegment * segments
Definition: af_compand.c:54
compand_drain
static int compand_drain(AVFilterLink *outlink)
Definition: af_compand.c:263
CompandContext::in_min_lin
double in_min_lin
Definition: af_compand.c:56
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
L
#define L(x)
MOD
#define MOD(a, b)
Definition: af_compand.c:189
CompandContext::nb_segments
int nb_segments
Definition: af_compand.c:52
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
CompandContext::delay_index
int delay_index
Definition: af_compand.c:65
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: af_compand.c:516
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
CompandContext::delay_count
int delay_count
Definition: af_compand.c:64
CompandContext::points
char * points
Definition: af_compand.c:53
A
#define A
Definition: af_compand.c:72
CompandContext::gain_dB
double gain_dB
Definition: af_compand.c:59
internal.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
CompandContext::pts
int64_t pts
Definition: af_compand.c:66
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:378
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
len
int len
Definition: vorbis_enc_data.h:426
CompandSegment::y
double y
Definition: af_compand.c:46
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
AVFilter
Filter definition.
Definition: avfilter.h:165
av_uninit
#define av_uninit(x)
Definition: attributes.h:154
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
S
#define S(x)
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_compand.c:94
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
ffmath.h
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
count_items
static void count_items(char *item_str, int *nb_items)
Definition: af_compand.c:103
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:43
ChanParam
Definition: af_compand.c:39
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
int
int
Definition: ffmpeg_filter.c:153
ChanParam::decay
double decay
Definition: af_compand.c:41
CompandSegment::a
double a
Definition: af_compand.c:47
CompandContext::initial_volume
double initial_volume
Definition: af_compand.c:60
ChanParam::volume
double volume
Definition: af_compand.c:42
compand_outputs
static const AVFilterPad compand_outputs[]
Definition: af_compand.c:546