FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
af_amerge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Audio merging filter
24  */
25 
26 #include "libavutil/avstring.h"
27 #include "libavutil/bprint.h"
29 #include "libavutil/opt.h"
30 #include "avfilter.h"
31 #include "audio.h"
32 #include "bufferqueue.h"
33 #include "internal.h"
34 
35 #define SWR_CH_MAX 32
36 
37 typedef struct {
38  const AVClass *class;
39  int nb_inputs;
40  int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
41  int bps;
42  struct amerge_input {
43  struct FFBufQueue queue;
44  int nb_ch; /**< number of channels for the input */
46  int pos;
47  } *in;
49 
50 #define OFFSET(x) offsetof(AMergeContext, x)
51 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
52 
53 static const AVOption amerge_options[] = {
54  { "inputs", "specify the number of inputs", OFFSET(nb_inputs),
55  AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS },
56  { NULL }
57 };
58 
59 AVFILTER_DEFINE_CLASS(amerge);
60 
61 static av_cold void uninit(AVFilterContext *ctx)
62 {
63  AMergeContext *am = ctx->priv;
64  int i;
65 
66  for (i = 0; i < am->nb_inputs; i++) {
67  if (am->in)
69  if (ctx->input_pads)
70  av_freep(&ctx->input_pads[i].name);
71  }
72  av_freep(&am->in);
73 }
74 
76 {
77  AMergeContext *am = ctx->priv;
78  int64_t inlayout[SWR_CH_MAX], outlayout = 0;
81  int i, overlap = 0, nb_ch = 0;
82 
83  for (i = 0; i < am->nb_inputs; i++) {
84  if (!ctx->inputs[i]->in_channel_layouts ||
87  "No channel layout for input %d\n", i + 1);
88  return AVERROR(EAGAIN);
89  }
90  inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
91  if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
92  char buf[256];
93  av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
94  av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
95  }
96  am->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
97  if (outlayout & inlayout[i])
98  overlap++;
99  outlayout |= inlayout[i];
100  nb_ch += am->in[i].nb_ch;
101  }
102  if (nb_ch > SWR_CH_MAX) {
103  av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
104  return AVERROR(EINVAL);
105  }
106  if (overlap) {
107  av_log(ctx, AV_LOG_WARNING,
108  "Input channel layouts overlap: "
109  "output layout will be determined by the number of distinct input channels\n");
110  for (i = 0; i < nb_ch; i++)
111  am->route[i] = i;
112  outlayout = av_get_default_channel_layout(nb_ch);
113  if (!outlayout)
114  outlayout = ((int64_t)1 << nb_ch) - 1;
115  } else {
116  int *route[SWR_CH_MAX];
117  int c, out_ch_number = 0;
118 
119  route[0] = am->route;
120  for (i = 1; i < am->nb_inputs; i++)
121  route[i] = route[i - 1] + am->in[i - 1].nb_ch;
122  for (c = 0; c < 64; c++)
123  for (i = 0; i < am->nb_inputs; i++)
124  if ((inlayout[i] >> c) & 1)
125  *(route[i]++) = out_ch_number++;
126  }
128  ff_set_common_formats(ctx, formats);
129  for (i = 0; i < am->nb_inputs; i++) {
130  layouts = NULL;
131  ff_add_channel_layout(&layouts, inlayout[i]);
133  }
134  layouts = NULL;
135  ff_add_channel_layout(&layouts, outlayout);
138  return 0;
139 }
140 
141 static int config_output(AVFilterLink *outlink)
142 {
143  AVFilterContext *ctx = outlink->src;
144  AMergeContext *am = ctx->priv;
145  AVBPrint bp;
146  int i;
147 
148  for (i = 1; i < am->nb_inputs; i++) {
149  if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
150  av_log(ctx, AV_LOG_ERROR,
151  "Inputs must have the same sample rate "
152  "%d for in%d vs %d\n",
153  ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
154  return AVERROR(EINVAL);
155  }
156  }
157  am->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
158  outlink->sample_rate = ctx->inputs[0]->sample_rate;
159  outlink->time_base = ctx->inputs[0]->time_base;
160 
161  av_bprint_init(&bp, 0, 1);
162  for (i = 0; i < am->nb_inputs; i++) {
163  av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
165  }
166  av_bprintf(&bp, " -> out:");
168  av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
169 
170  return 0;
171 }
172 
173 static int request_frame(AVFilterLink *outlink)
174 {
175  AVFilterContext *ctx = outlink->src;
176  AMergeContext *am = ctx->priv;
177  int i, ret;
178 
179  for (i = 0; i < am->nb_inputs; i++)
180  if (!am->in[i].nb_samples)
181  if ((ret = ff_request_frame(ctx->inputs[i])) < 0)
182  return ret;
183  return 0;
184 }
185 
186 /**
187  * Copy samples from several input streams to one output stream.
188  * @param nb_inputs number of inputs
189  * @param in inputs; used only for the nb_ch field;
190  * @param route routing values;
191  * input channel i goes to output channel route[i];
192  * i < in[0].nb_ch are the channels from the first output;
193  * i >= in[0].nb_ch are the channels from the second output
194  * @param ins pointer to the samples of each inputs, in packed format;
195  * will be left at the end of the copied samples
196  * @param outs pointer to the samples of the output, in packet format;
197  * must point to a buffer big enough;
198  * will be left at the end of the copied samples
199  * @param ns number of samples to copy
200  * @param bps bytes per sample
201  */
202 static inline void copy_samples(int nb_inputs, struct amerge_input in[],
203  int *route, uint8_t *ins[],
204  uint8_t **outs, int ns, int bps)
205 {
206  int *route_cur;
207  int i, c, nb_ch = 0;
208 
209  for (i = 0; i < nb_inputs; i++)
210  nb_ch += in[i].nb_ch;
211  while (ns--) {
212  route_cur = route;
213  for (i = 0; i < nb_inputs; i++) {
214  for (c = 0; c < in[i].nb_ch; c++) {
215  memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
216  ins[i] += bps;
217  }
218  }
219  *outs += nb_ch * bps;
220  }
221 }
222 
223 static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
224 {
225  AVFilterContext *ctx = inlink->dst;
226  AMergeContext *am = ctx->priv;
227  AVFilterLink *const outlink = ctx->outputs[0];
228  int input_number;
229  int nb_samples, ns, i;
230  AVFrame *outbuf, *inbuf[SWR_CH_MAX];
231  uint8_t *ins[SWR_CH_MAX], *outs;
232 
233  for (input_number = 0; input_number < am->nb_inputs; input_number++)
234  if (inlink == ctx->inputs[input_number])
235  break;
236  av_assert1(input_number < am->nb_inputs);
237  if (ff_bufqueue_is_full(&am->in[input_number].queue)) {
238  av_frame_free(&insamples);
239  return AVERROR(ENOMEM);
240  }
241  ff_bufqueue_add(ctx, &am->in[input_number].queue, av_frame_clone(insamples));
242  am->in[input_number].nb_samples += insamples->nb_samples;
243  av_frame_free(&insamples);
244  nb_samples = am->in[0].nb_samples;
245  for (i = 1; i < am->nb_inputs; i++)
246  nb_samples = FFMIN(nb_samples, am->in[i].nb_samples);
247  if (!nb_samples)
248  return 0;
249 
250  outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
251  if (!outbuf)
252  return AVERROR(ENOMEM);
253  outs = outbuf->data[0];
254  for (i = 0; i < am->nb_inputs; i++) {
255  inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
256  ins[i] = inbuf[i]->data[0] +
257  am->in[i].pos * am->in[i].nb_ch * am->bps;
258  }
259  av_frame_copy_props(outbuf, inbuf[0]);
260  outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
261  inbuf[0]->pts +
262  av_rescale_q(am->in[0].pos,
263  av_make_q(1, ctx->inputs[0]->sample_rate),
264  ctx->outputs[0]->time_base);
265 
266  outbuf->nb_samples = nb_samples;
267  outbuf->channel_layout = outlink->channel_layout;
268  av_frame_set_channels(outbuf, outlink->channels);
269 
270  while (nb_samples) {
271  ns = nb_samples;
272  for (i = 0; i < am->nb_inputs; i++)
273  ns = FFMIN(ns, inbuf[i]->nb_samples - am->in[i].pos);
274  /* Unroll the most common sample formats: speed +~350% for the loop,
275  +~13% overall (including two common decoders) */
276  switch (am->bps) {
277  case 1:
278  copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 1);
279  break;
280  case 2:
281  copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 2);
282  break;
283  case 4:
284  copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 4);
285  break;
286  default:
287  copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, am->bps);
288  break;
289  }
290 
291  nb_samples -= ns;
292  for (i = 0; i < am->nb_inputs; i++) {
293  am->in[i].nb_samples -= ns;
294  am->in[i].pos += ns;
295  if (am->in[i].pos == inbuf[i]->nb_samples) {
296  am->in[i].pos = 0;
297  av_frame_free(&inbuf[i]);
298  ff_bufqueue_get(&am->in[i].queue);
299  inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
300  ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL;
301  }
302  }
303  }
304  return ff_filter_frame(ctx->outputs[0], outbuf);
305 }
306 
307 static av_cold int init(AVFilterContext *ctx)
308 {
309  AMergeContext *am = ctx->priv;
310  int i;
311 
312  am->in = av_calloc(am->nb_inputs, sizeof(*am->in));
313  if (!am->in)
314  return AVERROR(ENOMEM);
315  for (i = 0; i < am->nb_inputs; i++) {
316  char *name = av_asprintf("in%d", i);
317  AVFilterPad pad = {
318  .name = name,
319  .type = AVMEDIA_TYPE_AUDIO,
320  .filter_frame = filter_frame,
321  };
322  if (!name)
323  return AVERROR(ENOMEM);
324  ff_insert_inpad(ctx, i, &pad);
325  }
326  return 0;
327 }
328 
329 static const AVFilterPad amerge_outputs[] = {
330  {
331  .name = "default",
332  .type = AVMEDIA_TYPE_AUDIO,
333  .config_props = config_output,
334  .request_frame = request_frame,
335  },
336  { NULL }
337 };
338 
340  .name = "amerge",
341  .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
342  "a single multi-channel stream."),
343  .priv_size = sizeof(AMergeContext),
344  .init = init,
345  .uninit = uninit,
347  .inputs = NULL,
348  .outputs = amerge_outputs,
349  .priv_class = &amerge_class,
351 };