FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
af_amix.c
Go to the documentation of this file.
1 /*
2  * Audio Mix Filter
3  * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Audio Mix Filter
25  *
26  * Mixes audio from multiple sources into a single output. The channel layout,
27  * sample rate, and sample format will be the same for all inputs and the
28  * output.
29  */
30 
31 #include "libavutil/attributes.h"
32 #include "libavutil/audio_fifo.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/avstring.h"
36 #include "libavutil/common.h"
37 #include "libavutil/float_dsp.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/samplefmt.h"
41 
42 #include "audio.h"
43 #include "avfilter.h"
44 #include "formats.h"
45 #include "internal.h"
46 
47 #define INPUT_OFF 0 /**< input has reached EOF */
48 #define INPUT_ON 1 /**< input is active */
49 #define INPUT_INACTIVE 2 /**< input is on, but is currently inactive */
50 
51 #define DURATION_LONGEST 0
52 #define DURATION_SHORTEST 1
53 #define DURATION_FIRST 2
54 
55 
56 typedef struct FrameInfo {
58  int64_t pts;
59  struct FrameInfo *next;
60 } FrameInfo;
61 
62 /**
63  * Linked list used to store timestamps and frame sizes of all frames in the
64  * FIFO for the first input.
65  *
66  * This is needed to keep timestamps synchronized for the case where multiple
67  * input frames are pushed to the filter for processing before a frame is
68  * requested by the output link.
69  */
70 typedef struct FrameList {
71  int nb_frames;
75 } FrameList;
76 
77 static void frame_list_clear(FrameList *frame_list)
78 {
79  if (frame_list) {
80  while (frame_list->list) {
81  FrameInfo *info = frame_list->list;
82  frame_list->list = info->next;
83  av_free(info);
84  }
85  frame_list->nb_frames = 0;
86  frame_list->nb_samples = 0;
87  frame_list->end = NULL;
88  }
89 }
90 
91 static int frame_list_next_frame_size(FrameList *frame_list)
92 {
93  if (!frame_list->list)
94  return 0;
95  return frame_list->list->nb_samples;
96 }
97 
98 static int64_t frame_list_next_pts(FrameList *frame_list)
99 {
100  if (!frame_list->list)
101  return AV_NOPTS_VALUE;
102  return frame_list->list->pts;
103 }
104 
105 static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
106 {
107  if (nb_samples >= frame_list->nb_samples) {
108  frame_list_clear(frame_list);
109  } else {
110  int samples = nb_samples;
111  while (samples > 0) {
112  FrameInfo *info = frame_list->list;
113  av_assert0(info != NULL);
114  if (info->nb_samples <= samples) {
115  samples -= info->nb_samples;
116  frame_list->list = info->next;
117  if (!frame_list->list)
118  frame_list->end = NULL;
119  frame_list->nb_frames--;
120  frame_list->nb_samples -= info->nb_samples;
121  av_free(info);
122  } else {
123  info->nb_samples -= samples;
124  info->pts += samples;
125  frame_list->nb_samples -= samples;
126  samples = 0;
127  }
128  }
129  }
130 }
131 
132 static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t pts)
133 {
134  FrameInfo *info = av_malloc(sizeof(*info));
135  if (!info)
136  return AVERROR(ENOMEM);
137  info->nb_samples = nb_samples;
138  info->pts = pts;
139  info->next = NULL;
140 
141  if (!frame_list->list) {
142  frame_list->list = info;
143  frame_list->end = info;
144  } else {
145  av_assert0(frame_list->end != NULL);
146  frame_list->end->next = info;
147  frame_list->end = info;
148  }
149  frame_list->nb_frames++;
150  frame_list->nb_samples += nb_samples;
151 
152  return 0;
153 }
154 
155 
156 typedef struct MixContext {
157  const AVClass *class; /**< class for AVOptions */
159 
160  int nb_inputs; /**< number of inputs */
161  int active_inputs; /**< number of input currently active */
162  int duration_mode; /**< mode for determining duration */
163  float dropout_transition; /**< transition time when an input drops out */
164 
165  int nb_channels; /**< number of channels */
166  int sample_rate; /**< sample rate */
167  int planar;
168  AVAudioFifo **fifos; /**< audio fifo for each input */
169  uint8_t *input_state; /**< current state of each input */
170  float *input_scale; /**< mixing scale factor for each input */
171  float scale_norm; /**< normalization factor for all inputs */
172  int64_t next_pts; /**< calculated pts for next output frame */
173  FrameList *frame_list; /**< list of frame info for the first input */
174 } MixContext;
175 
176 #define OFFSET(x) offsetof(MixContext, x)
177 #define A AV_OPT_FLAG_AUDIO_PARAM
178 #define F AV_OPT_FLAG_FILTERING_PARAM
179 static const AVOption amix_options[] = {
180  { "inputs", "Number of inputs.",
181  OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A|F },
182  { "duration", "How to determine the end-of-stream.",
183  OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" },
184  { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A|F, "duration" },
185  { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A|F, "duration" },
186  { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A|F, "duration" },
187  { "dropout_transition", "Transition time, in seconds, for volume "
188  "renormalization when an input stream ends.",
189  OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
190  { NULL }
191 };
192 
194 
195 /**
196  * Update the scaling factors to apply to each input during mixing.
197  *
198  * This balances the full volume range between active inputs and handles
199  * volume transitions when EOF is encountered on an input but mixing continues
200  * with the remaining inputs.
201  */
202 static void calculate_scales(MixContext *s, int nb_samples)
203 {
204  int i;
205 
206  if (s->scale_norm > s->active_inputs) {
207  s->scale_norm -= nb_samples / (s->dropout_transition * s->sample_rate);
209  }
210 
211  for (i = 0; i < s->nb_inputs; i++) {
212  if (s->input_state[i] == INPUT_ON)
213  s->input_scale[i] = 1.0f / s->scale_norm;
214  else
215  s->input_scale[i] = 0.0f;
216  }
217 }
218 
219 static int config_output(AVFilterLink *outlink)
220 {
221  AVFilterContext *ctx = outlink->src;
222  MixContext *s = ctx->priv;
223  int i;
224  char buf[64];
225 
226  s->planar = av_sample_fmt_is_planar(outlink->format);
227  s->sample_rate = outlink->sample_rate;
228  outlink->time_base = (AVRational){ 1, outlink->sample_rate };
230 
231  s->frame_list = av_mallocz(sizeof(*s->frame_list));
232  if (!s->frame_list)
233  return AVERROR(ENOMEM);
234 
235  s->fifos = av_mallocz(s->nb_inputs * sizeof(*s->fifos));
236  if (!s->fifos)
237  return AVERROR(ENOMEM);
238 
240  for (i = 0; i < s->nb_inputs; i++) {
241  s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024);
242  if (!s->fifos[i])
243  return AVERROR(ENOMEM);
244  }
245 
247  if (!s->input_state)
248  return AVERROR(ENOMEM);
249  memset(s->input_state, INPUT_ON, s->nb_inputs);
250  s->active_inputs = s->nb_inputs;
251 
252  s->input_scale = av_mallocz_array(s->nb_inputs, sizeof(*s->input_scale));
253  if (!s->input_scale)
254  return AVERROR(ENOMEM);
255  s->scale_norm = s->active_inputs;
256  calculate_scales(s, 0);
257 
258  av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout);
259 
260  av_log(ctx, AV_LOG_VERBOSE,
261  "inputs:%d fmt:%s srate:%d cl:%s\n", s->nb_inputs,
262  av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf);
263 
264  return 0;
265 }
266 
267 /**
268  * Read samples from the input FIFOs, mix, and write to the output link.
269  */
270 static int output_frame(AVFilterLink *outlink, int nb_samples)
271 {
272  AVFilterContext *ctx = outlink->src;
273  MixContext *s = ctx->priv;
274  AVFrame *out_buf, *in_buf;
275  int i;
276 
277  calculate_scales(s, nb_samples);
278 
279  out_buf = ff_get_audio_buffer(outlink, nb_samples);
280  if (!out_buf)
281  return AVERROR(ENOMEM);
282 
283  in_buf = ff_get_audio_buffer(outlink, nb_samples);
284  if (!in_buf) {
285  av_frame_free(&out_buf);
286  return AVERROR(ENOMEM);
287  }
288 
289  for (i = 0; i < s->nb_inputs; i++) {
290  if (s->input_state[i] == INPUT_ON) {
291  int planes, plane_size, p;
292 
293  av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
294  nb_samples);
295 
296  planes = s->planar ? s->nb_channels : 1;
297  plane_size = nb_samples * (s->planar ? 1 : s->nb_channels);
298  plane_size = FFALIGN(plane_size, 16);
299 
300  for (p = 0; p < planes; p++) {
301  s->fdsp.vector_fmac_scalar((float *)out_buf->extended_data[p],
302  (float *) in_buf->extended_data[p],
303  s->input_scale[i], plane_size);
304  }
305  }
306  }
307  av_frame_free(&in_buf);
308 
309  out_buf->pts = s->next_pts;
310  if (s->next_pts != AV_NOPTS_VALUE)
311  s->next_pts += nb_samples;
312 
313  return ff_filter_frame(outlink, out_buf);
314 }
315 
316 /**
317  * Returns the smallest number of samples available in the input FIFOs other
318  * than that of the first input.
319  */
321 {
322  int i;
323  int available_samples = INT_MAX;
324 
325  av_assert0(s->nb_inputs > 1);
326 
327  for (i = 1; i < s->nb_inputs; i++) {
328  int nb_samples;
329  if (s->input_state[i] == INPUT_OFF)
330  continue;
331  nb_samples = av_audio_fifo_size(s->fifos[i]);
332  available_samples = FFMIN(available_samples, nb_samples);
333  }
334  if (available_samples == INT_MAX)
335  return 0;
336  return available_samples;
337 }
338 
339 /**
340  * Requests a frame, if needed, from each input link other than the first.
341  */
342 static int request_samples(AVFilterContext *ctx, int min_samples)
343 {
344  MixContext *s = ctx->priv;
345  int i, ret;
346 
347  av_assert0(s->nb_inputs > 1);
348 
349  for (i = 1; i < s->nb_inputs; i++) {
350  ret = 0;
351  if (s->input_state[i] == INPUT_OFF)
352  continue;
353  while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples)
354  ret = ff_request_frame(ctx->inputs[i]);
355  if (ret == AVERROR_EOF) {
356  if (av_audio_fifo_size(s->fifos[i]) == 0) {
357  s->input_state[i] = INPUT_OFF;
358  continue;
359  }
360  } else if (ret < 0)
361  return ret;
362  }
363  return 0;
364 }
365 
366 /**
367  * Calculates the number of active inputs and determines EOF based on the
368  * duration option.
369  *
370  * @return 0 if mixing should continue, or AVERROR_EOF if mixing should stop.
371  */
373 {
374  int i;
375  int active_inputs = 0;
376  for (i = 0; i < s->nb_inputs; i++)
377  active_inputs += !!(s->input_state[i] != INPUT_OFF);
378  s->active_inputs = active_inputs;
379 
380  if (!active_inputs ||
381  (s->duration_mode == DURATION_FIRST && s->input_state[0] == INPUT_OFF) ||
382  (s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs))
383  return AVERROR_EOF;
384  return 0;
385 }
386 
387 static int request_frame(AVFilterLink *outlink)
388 {
389  AVFilterContext *ctx = outlink->src;
390  MixContext *s = ctx->priv;
391  int ret;
392  int wanted_samples, available_samples;
393 
394  ret = calc_active_inputs(s);
395  if (ret < 0)
396  return ret;
397 
398  if (s->input_state[0] == INPUT_OFF) {
399  ret = request_samples(ctx, 1);
400  if (ret < 0)
401  return ret;
402 
403  ret = calc_active_inputs(s);
404  if (ret < 0)
405  return ret;
406 
407  available_samples = get_available_samples(s);
408  if (!available_samples)
409  return AVERROR(EAGAIN);
410 
411  return output_frame(outlink, available_samples);
412  }
413 
414  if (s->frame_list->nb_frames == 0) {
415  ret = ff_request_frame(ctx->inputs[0]);
416  if (ret == AVERROR_EOF) {
417  s->input_state[0] = INPUT_OFF;
418  if (s->nb_inputs == 1)
419  return AVERROR_EOF;
420  else
421  return AVERROR(EAGAIN);
422  } else if (ret < 0)
423  return ret;
424  }
426 
427  wanted_samples = frame_list_next_frame_size(s->frame_list);
428 
429  if (s->active_inputs > 1) {
430  ret = request_samples(ctx, wanted_samples);
431  if (ret < 0)
432  return ret;
433 
434  ret = calc_active_inputs(s);
435  if (ret < 0)
436  return ret;
437  }
438 
439  if (s->active_inputs > 1) {
440  available_samples = get_available_samples(s);
441  if (!available_samples)
442  return AVERROR(EAGAIN);
443  available_samples = FFMIN(available_samples, wanted_samples);
444  } else {
445  available_samples = wanted_samples;
446  }
447 
449  frame_list_remove_samples(s->frame_list, available_samples);
450 
451  return output_frame(outlink, available_samples);
452 }
453 
454 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
455 {
456  AVFilterContext *ctx = inlink->dst;
457  MixContext *s = ctx->priv;
458  AVFilterLink *outlink = ctx->outputs[0];
459  int i, ret = 0;
460 
461  for (i = 0; i < ctx->nb_inputs; i++)
462  if (ctx->inputs[i] == inlink)
463  break;
464  if (i >= ctx->nb_inputs) {
465  av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
466  ret = AVERROR(EINVAL);
467  goto fail;
468  }
469 
470  if (i == 0) {
471  int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
472  outlink->time_base);
473  ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
474  if (ret < 0)
475  goto fail;
476  }
477 
478  ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
479  buf->nb_samples);
480 
481 fail:
482  av_frame_free(&buf);
483 
484  return ret;
485 }
486 
487 static av_cold int init(AVFilterContext *ctx)
488 {
489  MixContext *s = ctx->priv;
490  int i;
491 
492  for (i = 0; i < s->nb_inputs; i++) {
493  char name[32];
494  AVFilterPad pad = { 0 };
495 
496  snprintf(name, sizeof(name), "input%d", i);
497  pad.type = AVMEDIA_TYPE_AUDIO;
498  pad.name = av_strdup(name);
500 
501  ff_insert_inpad(ctx, i, &pad);
502  }
503 
504  avpriv_float_dsp_init(&s->fdsp, 0);
505 
506  return 0;
507 }
508 
509 static av_cold void uninit(AVFilterContext *ctx)
510 {
511  int i;
512  MixContext *s = ctx->priv;
513 
514  if (s->fifos) {
515  for (i = 0; i < s->nb_inputs; i++)
516  av_audio_fifo_free(s->fifos[i]);
517  av_freep(&s->fifos);
518  }
520  av_freep(&s->frame_list);
521  av_freep(&s->input_state);
522  av_freep(&s->input_scale);
523 
524  for (i = 0; i < ctx->nb_inputs; i++)
525  av_freep(&ctx->input_pads[i].name);
526 }
527 
529 {
530  AVFilterFormats *formats = NULL;
531  ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
533  ff_set_common_formats(ctx, formats);
536  return 0;
537 }
538 
540  {
541  .name = "default",
542  .type = AVMEDIA_TYPE_AUDIO,
543  .config_props = config_output,
544  .request_frame = request_frame
545  },
546  { NULL }
547 };
548 
550  .name = "amix",
551  .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
552  .priv_size = sizeof(MixContext),
553  .priv_class = &amix_class,
554  .init = init,
555  .uninit = uninit,
557  .inputs = NULL,
558  .outputs = avfilter_af_amix_outputs,
560 };