FFmpeg
af_afade.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * fade audio filter
24  */
25 
26 #include "libavutil/opt.h"
27 #include "audio.h"
28 #include "avfilter.h"
29 #include "filters.h"
30 #include "internal.h"
31 
32 typedef struct AudioFadeContext {
33  const AVClass *class;
34  int type;
35  int curve, curve2;
36  int64_t nb_samples;
37  int64_t start_sample;
38  int64_t duration;
39  int64_t start_time;
40  int overlap;
41  int cf0_eof;
43  int64_t pts;
44 
45  void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
46  int nb_samples, int channels, int direction,
47  int64_t start, int64_t range, int curve);
48  void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
49  uint8_t * const *cf1,
50  int nb_samples, int channels,
51  int curve0, int curve1);
53 
55 
56 #define OFFSET(x) offsetof(AudioFadeContext, x)
57 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
58 #define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
59 
60  static const enum AVSampleFormat sample_fmts[] = {
66  };
67 
68 static double fade_gain(int curve, int64_t index, int64_t range)
69 {
70 #define CUBE(a) ((a)*(a)*(a))
71  double gain;
72 
73  gain = av_clipd(1.0 * index / range, 0, 1.0);
74 
75  switch (curve) {
76  case QSIN:
77  gain = sin(gain * M_PI / 2.0);
78  break;
79  case IQSIN:
80  /* 0.6... = 2 / M_PI */
81  gain = 0.6366197723675814 * asin(gain);
82  break;
83  case ESIN:
84  gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
85  break;
86  case HSIN:
87  gain = (1.0 - cos(gain * M_PI)) / 2.0;
88  break;
89  case IHSIN:
90  /* 0.3... = 1 / M_PI */
91  gain = 0.3183098861837907 * acos(1 - 2 * gain);
92  break;
93  case EXP:
94  /* -11.5... = 5*ln(0.1) */
95  gain = exp(-11.512925464970227 * (1 - gain));
96  break;
97  case LOG:
98  gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
99  break;
100  case PAR:
101  gain = 1 - sqrt(1 - gain);
102  break;
103  case IPAR:
104  gain = (1 - (1 - gain) * (1 - gain));
105  break;
106  case QUA:
107  gain *= gain;
108  break;
109  case CUB:
110  gain = CUBE(gain);
111  break;
112  case SQU:
113  gain = sqrt(gain);
114  break;
115  case CBR:
116  gain = cbrt(gain);
117  break;
118  case DESE:
119  gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
120  break;
121  case DESI:
122  gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
123  break;
124  case LOSI: {
125  const double a = 1. / (1. - 0.787) - 1;
126  double A = 1. / (1.0 + exp(0 -((gain-0.5) * a * 2.0)));
127  double B = 1. / (1.0 + exp(a));
128  double C = 1. / (1.0 + exp(0-a));
129  gain = (A - B) / (C - B);
130  }
131  break;
132  case SINC:
133  gain = gain >= 1.0 ? 1.0 : sin(M_PI * (1.0 - gain)) / (M_PI * (1.0 - gain));
134  break;
135  case ISINC:
136  gain = gain <= 0.0 ? 0.0 : 1.0 - sin(M_PI * gain) / (M_PI * gain);
137  break;
138  case NONE:
139  gain = 1.0;
140  break;
141  }
142 
143  return gain;
144 }
145 
146 #define FADE_PLANAR(name, type) \
147 static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
148  int nb_samples, int channels, int dir, \
149  int64_t start, int64_t range, int curve) \
150 { \
151  int i, c; \
152  \
153  for (i = 0; i < nb_samples; i++) { \
154  double gain = fade_gain(curve, start + i * dir, range); \
155  for (c = 0; c < channels; c++) { \
156  type *d = (type *)dst[c]; \
157  const type *s = (type *)src[c]; \
158  \
159  d[i] = s[i] * gain; \
160  } \
161  } \
162 }
163 
164 #define FADE(name, type) \
165 static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
166  int nb_samples, int channels, int dir, \
167  int64_t start, int64_t range, int curve) \
168 { \
169  type *d = (type *)dst[0]; \
170  const type *s = (type *)src[0]; \
171  int i, c, k = 0; \
172  \
173  for (i = 0; i < nb_samples; i++) { \
174  double gain = fade_gain(curve, start + i * dir, range); \
175  for (c = 0; c < channels; c++, k++) \
176  d[k] = s[k] * gain; \
177  } \
178 }
179 
180 FADE_PLANAR(dbl, double)
181 FADE_PLANAR(flt, float)
182 FADE_PLANAR(s16, int16_t)
183 FADE_PLANAR(s32, int32_t)
184 
185 FADE(dbl, double)
186 FADE(flt, float)
187 FADE(s16, int16_t)
188 FADE(s32, int32_t)
189 
190 static int config_output(AVFilterLink *outlink)
191 {
192  AVFilterContext *ctx = outlink->src;
193  AudioFadeContext *s = ctx->priv;
194 
195  switch (outlink->format) {
196  case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
197  case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
198  case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
199  case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
200  case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
201  case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
202  case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
203  case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
204  }
205 
206  if (s->duration)
207  s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
208  s->duration = 0;
209  if (s->start_time)
210  s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
211  s->start_time = 0;
212 
213  return 0;
214 }
215 
216 #if CONFIG_AFADE_FILTER
217 
218 static const AVOption afade_options[] = {
219  { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
220  { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
221  { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, TFLAGS, "type" },
222  { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, TFLAGS, "type" },
223  { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
224  { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
225  { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
226  { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
227  { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
228  { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
229  { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
230  { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
231  { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
232  { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
233  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, TFLAGS, "curve" },
234  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, TFLAGS, "curve" },
235  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, TFLAGS, "curve" },
236  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, TFLAGS, "curve" },
237  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, TFLAGS, "curve" },
238  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, TFLAGS, "curve" },
239  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, TFLAGS, "curve" },
240  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, TFLAGS, "curve" },
241  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, TFLAGS, "curve" },
242  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, TFLAGS, "curve" },
243  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, TFLAGS, "curve" },
244  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, TFLAGS, "curve" },
245  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, TFLAGS, "curve" },
246  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, TFLAGS, "curve" },
247  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, TFLAGS, "curve" },
248  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, TFLAGS, "curve" },
249  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, TFLAGS, "curve" },
250  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, TFLAGS, "curve" },
251  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, TFLAGS, "curve" },
252  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, TFLAGS, "curve" },
253  { NULL }
254 };
255 
256 AVFILTER_DEFINE_CLASS(afade);
257 
258 static av_cold int init(AVFilterContext *ctx)
259 {
260  AudioFadeContext *s = ctx->priv;
261 
262  if (INT64_MAX - s->nb_samples < s->start_sample)
263  return AVERROR(EINVAL);
264 
265  return 0;
266 }
267 
268 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
269 {
270  AudioFadeContext *s = inlink->dst->priv;
271  AVFilterLink *outlink = inlink->dst->outputs[0];
272  int nb_samples = buf->nb_samples;
273  AVFrame *out_buf;
274  int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
275 
276  if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
277  ( s->type && (cur_sample + nb_samples < s->start_sample)))
278  return ff_filter_frame(outlink, buf);
279 
280  if (av_frame_is_writable(buf)) {
281  out_buf = buf;
282  } else {
283  out_buf = ff_get_audio_buffer(outlink, nb_samples);
284  if (!out_buf)
285  return AVERROR(ENOMEM);
286  av_frame_copy_props(out_buf, buf);
287  }
288 
289  if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
290  ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
291  av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
292  out_buf->channels, out_buf->format);
293  } else {
294  int64_t start;
295 
296  if (!s->type)
297  start = cur_sample - s->start_sample;
298  else
299  start = s->start_sample + s->nb_samples - cur_sample;
300 
301  s->fade_samples(out_buf->extended_data, buf->extended_data,
302  nb_samples, buf->channels,
303  s->type ? -1 : 1, start,
304  s->nb_samples, s->curve);
305  }
306 
307  if (buf != out_buf)
308  av_frame_free(&buf);
309 
310  return ff_filter_frame(outlink, out_buf);
311 }
312 
313 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
314  char *res, int res_len, int flags)
315 {
316  int ret;
317 
318  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
319  if (ret < 0)
320  return ret;
321 
322  return config_output(ctx->outputs[0]);
323 }
324 
325 static const AVFilterPad avfilter_af_afade_inputs[] = {
326  {
327  .name = "default",
328  .type = AVMEDIA_TYPE_AUDIO,
329  .filter_frame = filter_frame,
330  },
331 };
332 
333 static const AVFilterPad avfilter_af_afade_outputs[] = {
334  {
335  .name = "default",
336  .type = AVMEDIA_TYPE_AUDIO,
337  .config_props = config_output,
338  },
339 };
340 
341 const AVFilter ff_af_afade = {
342  .name = "afade",
343  .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
344  .priv_size = sizeof(AudioFadeContext),
345  .init = init,
346  FILTER_INPUTS(avfilter_af_afade_inputs),
347  FILTER_OUTPUTS(avfilter_af_afade_outputs),
349  .priv_class = &afade_class,
350  .process_command = process_command,
352 };
353 
354 #endif /* CONFIG_AFADE_FILTER */
355 
356 #if CONFIG_ACROSSFADE_FILTER
357 
358 static const AVOption acrossfade_options[] = {
359  { "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
360  { "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
361  { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
362  { "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
363  { "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
364  { "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
365  { "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
366  { "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
367  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, "curve" },
368  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
369  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
370  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
371  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
372  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
373  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
374  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
375  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
376  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
377  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
378  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
379  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
380  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
381  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
382  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
383  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
384  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, "curve" },
385  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, FLAGS, "curve" },
386  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, FLAGS, "curve" },
387  { "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
388  { "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
389  { NULL }
390 };
391 
392 AVFILTER_DEFINE_CLASS(acrossfade);
393 
394 #define CROSSFADE_PLANAR(name, type) \
395 static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
396  uint8_t * const *cf1, \
397  int nb_samples, int channels, \
398  int curve0, int curve1) \
399 { \
400  int i, c; \
401  \
402  for (i = 0; i < nb_samples; i++) { \
403  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
404  double gain1 = fade_gain(curve1, i, nb_samples); \
405  for (c = 0; c < channels; c++) { \
406  type *d = (type *)dst[c]; \
407  const type *s0 = (type *)cf0[c]; \
408  const type *s1 = (type *)cf1[c]; \
409  \
410  d[i] = s0[i] * gain0 + s1[i] * gain1; \
411  } \
412  } \
413 }
414 
415 #define CROSSFADE(name, type) \
416 static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
417  uint8_t * const *cf1, \
418  int nb_samples, int channels, \
419  int curve0, int curve1) \
420 { \
421  type *d = (type *)dst[0]; \
422  const type *s0 = (type *)cf0[0]; \
423  const type *s1 = (type *)cf1[0]; \
424  int i, c, k = 0; \
425  \
426  for (i = 0; i < nb_samples; i++) { \
427  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
428  double gain1 = fade_gain(curve1, i, nb_samples); \
429  for (c = 0; c < channels; c++, k++) \
430  d[k] = s0[k] * gain0 + s1[k] * gain1; \
431  } \
432 }
433 
434 CROSSFADE_PLANAR(dbl, double)
435 CROSSFADE_PLANAR(flt, float)
436 CROSSFADE_PLANAR(s16, int16_t)
437 CROSSFADE_PLANAR(s32, int32_t)
438 
439 CROSSFADE(dbl, double)
440 CROSSFADE(flt, float)
441 CROSSFADE(s16, int16_t)
442 CROSSFADE(s32, int32_t)
443 
444 static int activate(AVFilterContext *ctx)
445 {
446  AudioFadeContext *s = ctx->priv;
447  AVFilterLink *outlink = ctx->outputs[0];
448  AVFrame *in = NULL, *out, *cf[2] = { NULL };
449  int ret = 0, nb_samples, status;
450  int64_t pts;
451 
453 
454  if (s->crossfade_is_over) {
455  ret = ff_inlink_consume_frame(ctx->inputs[1], &in);
456  if (ret > 0) {
457  in->pts = s->pts;
458  s->pts += av_rescale_q(in->nb_samples,
459  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
460  return ff_filter_frame(outlink, in);
461  } else if (ret < 0) {
462  return ret;
463  } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
464  ff_outlink_set_status(ctx->outputs[0], status, pts);
465  return 0;
466  } else if (!ret) {
467  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
468  ff_inlink_request_frame(ctx->inputs[1]);
469  return 0;
470  }
471  }
472  }
473 
474  nb_samples = ff_inlink_queued_samples(ctx->inputs[0]);
475  if (nb_samples > s->nb_samples) {
476  nb_samples -= s->nb_samples;
477  ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in);
478  if (ret < 0)
479  return ret;
480  in->pts = s->pts;
481  s->pts += av_rescale_q(in->nb_samples,
482  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
483  return ff_filter_frame(outlink, in);
484  } else if (s->cf0_eof && nb_samples >= s->nb_samples &&
485  ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples) {
486  if (s->overlap) {
487  out = ff_get_audio_buffer(outlink, s->nb_samples);
488  if (!out)
489  return AVERROR(ENOMEM);
490 
491  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
492  if (ret < 0) {
493  av_frame_free(&out);
494  return ret;
495  }
496 
497  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
498  if (ret < 0) {
499  av_frame_free(&out);
500  return ret;
501  }
502 
503  s->crossfade_samples(out->extended_data, cf[0]->extended_data,
504  cf[1]->extended_data,
505  s->nb_samples, out->channels,
506  s->curve, s->curve2);
507  out->pts = s->pts;
508  s->pts += av_rescale_q(s->nb_samples,
509  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
510  s->crossfade_is_over = 1;
511  av_frame_free(&cf[0]);
512  av_frame_free(&cf[1]);
513  return ff_filter_frame(outlink, out);
514  } else {
515  out = ff_get_audio_buffer(outlink, s->nb_samples);
516  if (!out)
517  return AVERROR(ENOMEM);
518 
519  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
520  if (ret < 0) {
521  av_frame_free(&out);
522  return ret;
523  }
524 
525  s->fade_samples(out->extended_data, cf[0]->extended_data, s->nb_samples,
526  outlink->channels, -1, s->nb_samples - 1, s->nb_samples, s->curve);
527  out->pts = s->pts;
528  s->pts += av_rescale_q(s->nb_samples,
529  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
530  av_frame_free(&cf[0]);
531  ret = ff_filter_frame(outlink, out);
532  if (ret < 0)
533  return ret;
534 
535  out = ff_get_audio_buffer(outlink, s->nb_samples);
536  if (!out)
537  return AVERROR(ENOMEM);
538 
539  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
540  if (ret < 0) {
541  av_frame_free(&out);
542  return ret;
543  }
544 
545  s->fade_samples(out->extended_data, cf[1]->extended_data, s->nb_samples,
546  outlink->channels, 1, 0, s->nb_samples, s->curve2);
547  out->pts = s->pts;
548  s->pts += av_rescale_q(s->nb_samples,
549  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
550  s->crossfade_is_over = 1;
551  av_frame_free(&cf[1]);
552  return ff_filter_frame(outlink, out);
553  }
554  } else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
555  if (!s->cf0_eof && ff_outlink_get_status(ctx->inputs[0])) {
556  s->cf0_eof = 1;
557  }
558  if (ff_outlink_get_status(ctx->inputs[1])) {
560  return 0;
561  }
562  if (!s->cf0_eof)
563  ff_inlink_request_frame(ctx->inputs[0]);
564  else
565  ff_inlink_request_frame(ctx->inputs[1]);
566  return 0;
567  }
568 
569  return ret;
570 }
571 
572 static int acrossfade_config_output(AVFilterLink *outlink)
573 {
574  AVFilterContext *ctx = outlink->src;
575  AudioFadeContext *s = ctx->priv;
576 
577  outlink->time_base = ctx->inputs[0]->time_base;
578 
579  switch (outlink->format) {
580  case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
581  case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
582  case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
583  case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
584  case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
585  case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
586  case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
587  case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
588  }
589 
590  config_output(outlink);
591 
592  return 0;
593 }
594 
595 static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
596  {
597  .name = "crossfade0",
598  .type = AVMEDIA_TYPE_AUDIO,
599  },
600  {
601  .name = "crossfade1",
602  .type = AVMEDIA_TYPE_AUDIO,
603  },
604 };
605 
606 static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
607  {
608  .name = "default",
609  .type = AVMEDIA_TYPE_AUDIO,
610  .config_props = acrossfade_config_output,
611  },
612 };
613 
614 const AVFilter ff_af_acrossfade = {
615  .name = "acrossfade",
616  .description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
617  .priv_size = sizeof(AudioFadeContext),
618  .activate = activate,
619  .priv_class = &acrossfade_class,
620  FILTER_INPUTS(avfilter_af_acrossfade_inputs),
621  FILTER_OUTPUTS(avfilter_af_acrossfade_outputs),
623 };
624 
625 #endif /* CONFIG_ACROSSFADE_FILTER */
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AudioFadeContext::type
int type
Definition: af_afade.c:34
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AudioFadeContext::curve2
int curve2
Definition: af_afade.c:35
ff_af_afade
const AVFilter ff_af_afade
out
FILE * out
Definition: movenc.c:54
NONE
@ NONE
Definition: af_afade.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
QUA
@ QUA
Definition: af_afade.c:54
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
index
fg index
Definition: ffmpeg_filter.c:167
AVOption
AVOption.
Definition: opt.h:247
IPAR
@ IPAR
Definition: af_afade.c:54
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
AV_OPT_TYPE_DURATION
@ AV_OPT_TYPE_DURATION
Definition: opt.h:238
AudioFadeContext::fade_samples
void(* fade_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, int direction, int64_t start, int64_t range, int curve)
Definition: af_afade.c:45
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_afade.c:190
NB_CURVES
@ NB_CURVES
Definition: af_afade.c:54
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
DESE
@ DESE
Definition: af_afade.c:54
DESI
@ DESI
Definition: af_afade.c:54
init
static int init
Definition: av_tx.c:47
A
#define A(x)
Definition: vp56_arith.h:28
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1417
FADE
#define FADE(name, type)
Definition: af_afade.c:164
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
ISINC
@ ISINC
Definition: af_afade.c:54
CUBE
#define CUBE(a)
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:653
OFFSET
#define OFFSET(x)
Definition: af_afade.c:56
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
cbrt
#define cbrt
Definition: tablegen.h:35
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
ff_af_acrossfade
const AVFilter ff_af_acrossfade
AudioFadeContext::cf0_eof
int cf0_eof
Definition: af_afade.c:41
av_cold
#define av_cold
Definition: attributes.h:90
CUB
@ CUB
Definition: af_afade.c:54
QSIN
@ QSIN
Definition: af_afade.c:54
duration
int64_t duration
Definition: movenc.c:64
IHSIN
@ IHSIN
Definition: af_afade.c:54
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1534
s
#define s(width, name)
Definition: cbs_vp9.c:257
TRI
@ TRI
Definition: af_afade.c:54
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:628
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
HSIN
@ HSIN
Definition: af_afade.c:54
AV_OPT_TYPE_INT64
@ AV_OPT_TYPE_INT64
Definition: opt.h:225
filters.h
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FLAGS
#define FLAGS
Definition: af_afade.c:57
channels
channels
Definition: aptx.h:33
IQSIN
@ IQSIN
Definition: af_afade.c:54
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
fade_gain
static double fade_gain(int curve, int64_t index, int64_t range)
Definition: af_afade.c:68
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
AudioFadeContext::crossfade_is_over
int crossfade_is_over
Definition: af_afade.c:42
AudioFadeContext::crossfade_samples
void(* crossfade_samples)(uint8_t **dst, uint8_t *const *cf0, uint8_t *const *cf1, int nb_samples, int channels, int curve0, int curve1)
Definition: af_afade.c:48
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1436
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AudioFadeContext::start_sample
int64_t start_sample
Definition: af_afade.c:37
activate
filter_frame For filters that do not use the activate() callback
src
#define src
Definition: vp8dsp.c:255
filter_frame
static int filter_frame(DBEDecodeContext *s, AVFrame *frame)
Definition: dolby_e.c:1050
exp
int8_t exp
Definition: eval.c:72
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1371
SQU
@ SQU
Definition: af_afade.c:54
CurveType
CurveType
Definition: af_afade.c:54
TFLAGS
#define TFLAGS
Definition: af_afade.c:58
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:306
av_clipd
#define av_clipd
Definition: common.h:147
start_time
static int64_t start_time
Definition: ffplay.c:330
AudioFadeContext::curve
int curve
Definition: af_afade.c:35
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
FILTER_SAMPLEFMTS_ARRAY
#define FILTER_SAMPLEFMTS_ARRAY(array)
Definition: internal.h:174
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:404
AudioFadeContext::duration
int64_t duration
Definition: af_afade.c:38
EXP
@ EXP
Definition: af_afade.c:54
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
M_PI
#define M_PI
Definition: mathematics.h:52
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:146
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:326
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:397
SINC
@ SINC
Definition: af_afade.c:54
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:378
AudioFadeContext::pts
int64_t pts
Definition: af_afade.c:43
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
AudioFadeContext::overlap
int overlap
Definition: af_afade.c:40
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
CBR
@ CBR
Definition: af_afade.c:54
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1396
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:128
AudioFadeContext::start_time
int64_t start_time
Definition: af_afade.c:39
av_samples_set_silence
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:244
AVFilter
Filter definition.
Definition: avfilter.h:165
LOSI
@ LOSI
Definition: af_afade.c:54
ret
ret
Definition: filter_design.txt:187
B
#define B
Definition: huffyuvdsp.h:32
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
FADE_PLANAR
#define FADE_PLANAR(name, type)
Definition: af_afade.c:146
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1557
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
audio.h
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
int32_t
int32_t
Definition: audioconvert.c:56
ESIN
@ ESIN
Definition: af_afade.c:54
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: af_afade.c:60
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:64
PAR
@ PAR
Definition: af_afade.c:54
AV_SAMPLE_FMT_S32
@ AV_SAMPLE_FMT_S32
signed 32 bits
Definition: samplefmt.h:62
AudioFadeContext::nb_samples
int64_t nb_samples
Definition: af_afade.c:36
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
AudioFadeContext
Definition: af_afade.c:32
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
LOG
@ LOG
Definition: af_afade.c:54