FFmpeg
af_axcorrelate.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/audio_fifo.h"
23 #include "libavutil/common.h"
24 #include "libavutil/opt.h"
25 
26 #include "audio.h"
27 #include "avfilter.h"
28 #include "formats.h"
29 #include "filters.h"
30 #include "internal.h"
31 
32 typedef struct AudioXCorrelateContext {
33  const AVClass *class;
34 
35  int size;
36  int algo;
37  int64_t pts;
38 
44  int used;
45  int eof;
46 
49 
50 #define MEAN_SUM(suffix, type, zero) \
51 static type mean_sum_##suffix(const type *in, \
52  int size) \
53 { \
54  type mean_sum = zero; \
55  \
56  for (int i = 0; i < size; i++) \
57  mean_sum += in[i]; \
58  \
59  return mean_sum; \
60 }
61 
62 MEAN_SUM(f, float, 0.f)
63 MEAN_SUM(d, double, 0.0)
64 
65 #define SQUARE_SUM(suffix, type, zero) \
66 static type square_sum_##suffix(const type *x, \
67  const type *y, \
68  int size) \
69 { \
70  type square_sum = zero; \
71  \
72  for (int i = 0; i < size; i++) \
73  square_sum += x[i] * y[i]; \
74  \
75  return square_sum; \
76 }
77 
78 SQUARE_SUM(f, float, 0.f)
79 SQUARE_SUM(d, double, 0.0)
80 
81 #define XCORRELATE(suffix, type, zero, small, sqrtfun)\
82 static type xcorrelate_##suffix(const type *x, \
83  const type *y, \
84  type sumx, \
85  type sumy, int size) \
86 { \
87  const type xm = sumx / size, ym = sumy / size; \
88  type num = zero, den, den0 = zero, den1 = zero; \
89  \
90  for (int i = 0; i < size; i++) { \
91  type xd = x[i] - xm; \
92  type yd = y[i] - ym; \
93  \
94  num += xd * yd; \
95  den0 += xd * xd; \
96  den1 += yd * yd; \
97  } \
98  \
99  num /= size; \
100  den = sqrtfun((den0 * den1) / size / size); \
101  \
102  return den <= small ? zero : num / den; \
103 }
104 
105 XCORRELATE(f, float, 0.f, 1e-6f, sqrtf)
106 XCORRELATE(d, double, 0.0, 1e-9, sqrt)
107 
108 #define XCORRELATE_SLOW(suffix, type) \
109 static int xcorrelate_slow_##suffix(AVFilterContext *ctx, \
110  AVFrame *out, int available) \
111 { \
112  AudioXCorrelateContext *s = ctx->priv; \
113  const int size = FFMIN(available, s->size); \
114  int used; \
115  \
116  for (int ch = 0; ch < out->channels; ch++) { \
117  const type *x = (const type *)s->cache[0]->extended_data[ch]; \
118  const type *y = (const type *)s->cache[1]->extended_data[ch]; \
119  type *sumx = (type *)s->mean_sum[0]->extended_data[ch]; \
120  type *sumy = (type *)s->mean_sum[1]->extended_data[ch]; \
121  type *dst = (type *)out->extended_data[ch]; \
122  \
123  used = s->used; \
124  if (!used) { \
125  sumx[0] = mean_sum_##suffix(x, size); \
126  sumy[0] = mean_sum_##suffix(y, size); \
127  used = 1; \
128  } \
129  \
130  for (int n = 0; n < out->nb_samples; n++) { \
131  const int idx = available <= s->size ? out->nb_samples - n - 1 : n + size; \
132  \
133  dst[n] = xcorrelate_##suffix(x + n, y + n, \
134  sumx[0], sumy[0], \
135  size); \
136  \
137  sumx[0] -= x[n]; \
138  sumx[0] += x[idx]; \
139  sumy[0] -= y[n]; \
140  sumy[0] += y[idx]; \
141  } \
142  } \
143  \
144  return used; \
145 }
146 
147 XCORRELATE_SLOW(f, float)
148 XCORRELATE_SLOW(d, double)
149 
150 #define XCORRELATE_FAST(suffix, type, zero, small, sqrtfun) \
151 static int xcorrelate_fast_##suffix(AVFilterContext *ctx, AVFrame *out, \
152  int available) \
153 { \
154  AudioXCorrelateContext *s = ctx->priv; \
155  const int size = FFMIN(available, s->size); \
156  int used; \
157  \
158  for (int ch = 0; ch < out->channels; ch++) { \
159  const type *x = (const type *)s->cache[0]->extended_data[ch]; \
160  const type *y = (const type *)s->cache[1]->extended_data[ch]; \
161  type *num_sum = (type *)s->num_sum->extended_data[ch]; \
162  type *den_sumx = (type *)s->den_sum[0]->extended_data[ch]; \
163  type *den_sumy = (type *)s->den_sum[1]->extended_data[ch]; \
164  type *dst = (type *)out->extended_data[ch]; \
165  \
166  used = s->used; \
167  if (!used) { \
168  num_sum[0] = square_sum_##suffix(x, y, size); \
169  den_sumx[0] = square_sum_##suffix(x, x, size); \
170  den_sumy[0] = square_sum_##suffix(y, y, size); \
171  used = 1; \
172  } \
173  \
174  for (int n = 0; n < out->nb_samples; n++) { \
175  const int idx = available <= s->size ? out->nb_samples - n - 1 : n + size; \
176  type num, den; \
177  \
178  num = num_sum[0] / size; \
179  den = sqrtfun((den_sumx[0] * den_sumy[0]) / size / size); \
180  \
181  dst[n] = den <= small ? zero : num / den; \
182  \
183  num_sum[0] -= x[n] * y[n]; \
184  num_sum[0] += x[idx] * y[idx]; \
185  den_sumx[0] -= x[n] * x[n]; \
186  den_sumx[0] += x[idx] * x[idx]; \
187  den_sumx[0] = FFMAX(den_sumx[0], zero); \
188  den_sumy[0] -= y[n] * y[n]; \
189  den_sumy[0] += y[idx] * y[idx]; \
190  den_sumy[0] = FFMAX(den_sumy[0], zero); \
191  } \
192  } \
193  \
194  return used; \
195 }
196 
197 XCORRELATE_FAST(f, float, 0.f, 1e-6f, sqrtf)
198 XCORRELATE_FAST(d, double, 0.0, 1e-9, sqrt)
199 
201 {
202  AudioXCorrelateContext *s = ctx->priv;
203  AVFrame *frame = NULL;
204  int ret, status;
205  int available;
206  int64_t pts;
207 
209 
210  for (int i = 0; i < 2; i++) {
211  ret = ff_inlink_consume_frame(ctx->inputs[i], &frame);
212  if (ret > 0) {
213  if (s->pts == AV_NOPTS_VALUE)
214  s->pts = frame->pts;
215  ret = av_audio_fifo_write(s->fifo[i], (void **)frame->extended_data,
216  frame->nb_samples);
218  if (ret < 0)
219  return ret;
220  }
221  }
222 
223  available = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
224  if (available > s->size || (s->eof && available > 0)) {
225  const int out_samples = s->eof ? available : available - s->size;
226  AVFrame *out;
227 
228  if (!s->cache[0] || s->cache[0]->nb_samples < available) {
229  av_frame_free(&s->cache[0]);
230  s->cache[0] = ff_get_audio_buffer(ctx->outputs[0], available);
231  if (!s->cache[0])
232  return AVERROR(ENOMEM);
233  }
234 
235  if (!s->cache[1] || s->cache[1]->nb_samples < available) {
236  av_frame_free(&s->cache[1]);
237  s->cache[1] = ff_get_audio_buffer(ctx->outputs[0], available);
238  if (!s->cache[1])
239  return AVERROR(ENOMEM);
240  }
241 
242  ret = av_audio_fifo_peek(s->fifo[0], (void **)s->cache[0]->extended_data, available);
243  if (ret < 0)
244  return ret;
245 
246  ret = av_audio_fifo_peek(s->fifo[1], (void **)s->cache[1]->extended_data, available);
247  if (ret < 0)
248  return ret;
249 
250  out = ff_get_audio_buffer(ctx->outputs[0], out_samples);
251  if (!out)
252  return AVERROR(ENOMEM);
253 
254  s->used = s->xcorrelate(ctx, out, available);
255 
256  out->pts = s->pts;
257  s->pts += out_samples;
258 
259  av_audio_fifo_drain(s->fifo[0], out_samples);
260  av_audio_fifo_drain(s->fifo[1], out_samples);
261 
262  return ff_filter_frame(ctx->outputs[0], out);
263  }
264 
265  for (int i = 0; i < 2 && !s->eof; i++) {
266  if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts))
267  s->eof = 1;
268  }
269 
270  if (s->eof &&
271  (av_audio_fifo_size(s->fifo[0]) <= 0 ||
272  av_audio_fifo_size(s->fifo[1]) <= 0)) {
273  ff_outlink_set_status(ctx->outputs[0], AVERROR_EOF, s->pts);
274  return 0;
275  }
276 
277  if ((av_audio_fifo_size(s->fifo[0]) > s->size &&
278  av_audio_fifo_size(s->fifo[1]) > s->size) || s->eof) {
280  return 0;
281  }
282 
283  if (ff_outlink_frame_wanted(ctx->outputs[0]) && !s->eof) {
284  for (int i = 0; i < 2; i++) {
285  if (av_audio_fifo_size(s->fifo[i]) > s->size)
286  continue;
287  ff_inlink_request_frame(ctx->inputs[i]);
288  return 0;
289  }
290  }
291 
292  return FFERROR_NOT_READY;
293 }
294 
295 static int config_output(AVFilterLink *outlink)
296 {
297  AVFilterContext *ctx = outlink->src;
298  AudioXCorrelateContext *s = ctx->priv;
299 
300  s->pts = AV_NOPTS_VALUE;
301 
302  s->fifo[0] = av_audio_fifo_alloc(outlink->format, outlink->channels, s->size);
303  s->fifo[1] = av_audio_fifo_alloc(outlink->format, outlink->channels, s->size);
304  if (!s->fifo[0] || !s->fifo[1])
305  return AVERROR(ENOMEM);
306 
307  s->mean_sum[0] = ff_get_audio_buffer(outlink, 1);
308  s->mean_sum[1] = ff_get_audio_buffer(outlink, 1);
309  s->num_sum = ff_get_audio_buffer(outlink, 1);
310  s->den_sum[0] = ff_get_audio_buffer(outlink, 1);
311  s->den_sum[1] = ff_get_audio_buffer(outlink, 1);
312  if (!s->mean_sum[0] || !s->mean_sum[1] || !s->num_sum ||
313  !s->den_sum[0] || !s->den_sum[1])
314  return AVERROR(ENOMEM);
315 
316  switch (s->algo) {
317  case 0: s->xcorrelate = xcorrelate_slow_f; break;
318  case 1: s->xcorrelate = xcorrelate_fast_f; break;
319  }
320 
321  if (outlink->format == AV_SAMPLE_FMT_DBLP) {
322  switch (s->algo) {
323  case 0: s->xcorrelate = xcorrelate_slow_d; break;
324  case 1: s->xcorrelate = xcorrelate_fast_d; break;
325  }
326  }
327 
328  return 0;
329 }
330 
332 {
333  AudioXCorrelateContext *s = ctx->priv;
334 
335  av_audio_fifo_free(s->fifo[0]);
336  av_audio_fifo_free(s->fifo[1]);
337  av_frame_free(&s->cache[0]);
338  av_frame_free(&s->cache[1]);
339  av_frame_free(&s->mean_sum[0]);
340  av_frame_free(&s->mean_sum[1]);
341  av_frame_free(&s->num_sum);
342  av_frame_free(&s->den_sum[0]);
343  av_frame_free(&s->den_sum[1]);
344 }
345 
346 static const AVFilterPad inputs[] = {
347  {
348  .name = "axcorrelate0",
349  .type = AVMEDIA_TYPE_AUDIO,
350  },
351  {
352  .name = "axcorrelate1",
353  .type = AVMEDIA_TYPE_AUDIO,
354  },
355 };
356 
357 static const AVFilterPad outputs[] = {
358  {
359  .name = "default",
360  .type = AVMEDIA_TYPE_AUDIO,
361  .config_props = config_output,
362  },
363 };
364 
365 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
366 #define OFFSET(x) offsetof(AudioXCorrelateContext, x)
367 
368 static const AVOption axcorrelate_options[] = {
369  { "size", "set segment size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=256}, 2, 131072, AF },
370  { "algo", "set algorithm", OFFSET(algo), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AF, "algo" },
371  { "slow", "slow algorithm", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "algo" },
372  { "fast", "fast algorithm", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "algo" },
373  { NULL }
374 };
375 
376 AVFILTER_DEFINE_CLASS(axcorrelate);
377 
379  .name = "axcorrelate",
380  .description = NULL_IF_CONFIG_SMALL("Cross-correlate two audio streams."),
381  .priv_size = sizeof(AudioXCorrelateContext),
382  .priv_class = &axcorrelate_class,
383  .activate = activate,
384  .uninit = uninit,
388 };
av_audio_fifo_free
void av_audio_fifo_free(AVAudioFifo *af)
Free an AVAudioFifo.
Definition: audio_fifo.c:45
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AudioXCorrelateContext::eof
int eof
Definition: af_axcorrelate.c:45
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(axcorrelate)
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVOption
AVOption.
Definition: opt.h:247
AudioXCorrelateContext::fifo
AVAudioFifo * fifo[2]
Definition: af_axcorrelate.c:39
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
XCORRELATE_FAST
#define XCORRELATE_FAST(suffix, type, zero, small, sqrtfun)
Definition: af_axcorrelate.c:150
formats.h
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1417
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
AVAudioFifo
Context for an Audio FIFO Buffer.
Definition: audio_fifo.c:34
av_audio_fifo_drain
int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples)
Drain data from an AVAudioFifo.
Definition: audio_fifo.c:201
XCORRELATE
#define XCORRELATE(suffix, type, zero, small, sqrtfun)
Definition: af_axcorrelate.c:81
pts
static int64_t pts
Definition: transcode_aac.c:653
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
SQUARE_SUM
#define SQUARE_SUM(suffix, type, zero)
Definition: af_axcorrelate.c:65
av_cold
#define av_cold
Definition: attributes.h:90
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
OFFSET
#define OFFSET(x)
Definition: af_axcorrelate.c:366
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1534
AudioXCorrelateContext::num_sum
AVFrame * num_sum
Definition: af_axcorrelate.c:42
s
#define s(width, name)
Definition: cbs_vp9.c:257
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_axcorrelate.c:295
av_audio_fifo_write
int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples)
Write data to an AVAudioFifo.
Definition: audio_fifo.c:112
AudioXCorrelateContext::used
int used
Definition: af_axcorrelate.c:44
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
filters.h
AudioXCorrelateContext
Definition: af_axcorrelate.c:32
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ff_af_axcorrelate
const AVFilter ff_af_axcorrelate
Definition: af_axcorrelate.c:378
f
#define f(width, name)
Definition: cbs_vp9.c:255
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AudioXCorrelateContext::xcorrelate
int(* xcorrelate)(AVFilterContext *ctx, AVFrame *out, int available)
Definition: af_axcorrelate.c:47
av_audio_fifo_alloc
AVAudioFifo * av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, int nb_samples)
Allocate an AVAudioFifo.
Definition: audio_fifo.c:59
AudioXCorrelateContext::mean_sum
AVFrame * mean_sum[2]
Definition: af_axcorrelate.c:41
AudioXCorrelateContext::algo
int algo
Definition: af_axcorrelate.c:36
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1371
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AF
#define AF
Definition: af_axcorrelate.c:365
XCORRELATE_SLOW
#define XCORRELATE_SLOW(suffix, type)
Definition: af_axcorrelate.c:108
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AudioXCorrelateContext::size
int size
Definition: af_axcorrelate.c:35
av_audio_fifo_size
int av_audio_fifo_size(AVAudioFifo *af)
Get the current number of samples in the AVAudioFifo available for reading.
Definition: audio_fifo.c:228
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_axcorrelate.c:331
internal.h
AudioXCorrelateContext::cache
AVFrame * cache[2]
Definition: af_axcorrelate.c:40
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
algo
Definition: dct.c:53
AudioXCorrelateContext::den_sum
AVFrame * den_sum[2]
Definition: af_axcorrelate.c:43
available
if no frame is available
Definition: filter_design.txt:166
common.h
axcorrelate_options
static const AVOption axcorrelate_options[]
Definition: af_axcorrelate.c:368
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
audio_fifo.h
inputs
static const AVFilterPad inputs[]
Definition: af_axcorrelate.c:346
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AVFilter
Filter definition.
Definition: avfilter.h:165
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
activate
static int activate(AVFilterContext *ctx)
Definition: af_axcorrelate.c:200
channel_layout.h
AudioXCorrelateContext::pts
int64_t pts
Definition: af_axcorrelate.c:37
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
audio.h
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
d
d
Definition: ffmpeg_filter.c:153
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
int
int
Definition: ffmpeg_filter.c:153
av_audio_fifo_peek
int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples)
Peek data from an AVAudioFifo.
Definition: audio_fifo.c:138
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
MEAN_SUM
#define MEAN_SUM(suffix, type, zero)
Definition: af_axcorrelate.c:50
FILTER_SAMPLEFMTS
#define FILTER_SAMPLEFMTS(...)
Definition: internal.h:179
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:211
outputs
static const AVFilterPad outputs[]
Definition: af_axcorrelate.c:357