FFmpeg
vaf_spectrumsynth.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * SpectrumSynth filter
24  * @todo support float pixel format
25  */
26 
27 #include "libavcodec/avfft.h"
28 #include "libavutil/avassert.h"
30 #include "libavutil/ffmath.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/parseutils.h"
33 #include "avfilter.h"
34 #include "formats.h"
35 #include "audio.h"
36 #include "video.h"
37 #include "filters.h"
38 #include "internal.h"
39 #include "window_func.h"
40 
44 
45 typedef struct SpectrumSynthContext {
46  const AVClass *class;
48  int channels;
49  int scale;
50  int sliding;
51  int win_func;
52  float overlap;
54 
56  FFTContext *fft; ///< Fast Fourier Transform context
57  int fft_bits; ///< number of bits (FFT window size = 1<<fft_bits)
58  FFTComplex **fft_data; ///< bins holder for each (displayed) channels
59  int win_size;
60  int size;
61  int nb_freq;
62  int hop_size;
63  int start, end;
64  int xpos;
65  int xend;
66  int64_t pts;
67  float factor;
69  float *window_func_lut; ///< Window function LUT
71 
72 #define OFFSET(x) offsetof(SpectrumSynthContext, x)
73 #define A AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
74 #define V AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
75 
76 static const AVOption spectrumsynth_options[] = {
77  { "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 44100}, 15, INT_MAX, A },
78  { "channels", "set channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 1}, 1, 8, A },
79  { "scale", "set input amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = LOG}, 0, NB_SCALES-1, V, "scale" },
80  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, V, "scale" },
81  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, V, "scale" },
82  { "slide", "set input sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = FULLFRAME}, 0, NB_SLIDES-1, V, "slide" },
83  { "replace", "consume old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, V, "slide" },
84  { "scroll", "consume only most right column", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, V, "slide" },
85  { "fullframe", "consume full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, V, "slide" },
86  { "rscroll", "consume only most left column", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, V, "slide" },
87  { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_WFUNC-1, A, "win_func" },
88  { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, A, "win_func" },
89  { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, A, "win_func" },
90  { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, A, "win_func" },
91  { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, A, "win_func" },
92  { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, A, "win_func" },
93  { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, A, "win_func" },
94  { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, A },
95  { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, V, "orientation" },
96  { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, V, "orientation" },
97  { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, V, "orientation" },
98  { NULL }
99 };
100 
101 AVFILTER_DEFINE_CLASS(spectrumsynth);
102 
104 {
105  SpectrumSynthContext *s = ctx->priv;
108  AVFilterLink *magnitude = ctx->inputs[0];
109  AVFilterLink *phase = ctx->inputs[1];
110  AVFilterLink *outlink = ctx->outputs[0];
112  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
115  int ret, sample_rates[] = { 48000, -1 };
116 
117  formats = ff_make_format_list(sample_fmts);
118  if ((ret = ff_formats_ref (formats, &outlink->incfg.formats )) < 0 ||
119  (ret = ff_add_channel_layout (&layout, FF_COUNT2LAYOUT(s->channels))) < 0 ||
120  (ret = ff_channel_layouts_ref (layout , &outlink->incfg.channel_layouts)) < 0)
121  return ret;
122 
123  sample_rates[0] = s->sample_rate;
124  formats = ff_make_format_list(sample_rates);
125  if (!formats)
126  return AVERROR(ENOMEM);
127  if ((ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
128  return ret;
129 
130  formats = ff_make_format_list(pix_fmts);
131  if (!formats)
132  return AVERROR(ENOMEM);
133  if ((ret = ff_formats_ref(formats, &magnitude->outcfg.formats)) < 0)
134  return ret;
135 
136  formats = ff_make_format_list(pix_fmts);
137  if (!formats)
138  return AVERROR(ENOMEM);
139  if ((ret = ff_formats_ref(formats, &phase->outcfg.formats)) < 0)
140  return ret;
141 
142  return 0;
143 }
144 
145 static int config_output(AVFilterLink *outlink)
146 {
147  AVFilterContext *ctx = outlink->src;
148  SpectrumSynthContext *s = ctx->priv;
149  int width = ctx->inputs[0]->w;
150  int height = ctx->inputs[0]->h;
151  AVRational time_base = ctx->inputs[0]->time_base;
152  AVRational frame_rate = ctx->inputs[0]->frame_rate;
153  int i, ch, fft_bits;
154  float factor, overlap;
155 
156  outlink->sample_rate = s->sample_rate;
157  outlink->time_base = (AVRational){1, s->sample_rate};
158 
159  if (width != ctx->inputs[1]->w ||
160  height != ctx->inputs[1]->h) {
161  av_log(ctx, AV_LOG_ERROR,
162  "Magnitude and Phase sizes differ (%dx%d vs %dx%d).\n",
163  width, height,
164  ctx->inputs[1]->w, ctx->inputs[1]->h);
165  return AVERROR_INVALIDDATA;
166  } else if (av_cmp_q(time_base, ctx->inputs[1]->time_base) != 0) {
167  av_log(ctx, AV_LOG_ERROR,
168  "Magnitude and Phase time bases differ (%d/%d vs %d/%d).\n",
169  time_base.num, time_base.den,
170  ctx->inputs[1]->time_base.num,
171  ctx->inputs[1]->time_base.den);
172  return AVERROR_INVALIDDATA;
173  } else if (av_cmp_q(frame_rate, ctx->inputs[1]->frame_rate) != 0) {
174  av_log(ctx, AV_LOG_ERROR,
175  "Magnitude and Phase framerates differ (%d/%d vs %d/%d).\n",
176  frame_rate.num, frame_rate.den,
177  ctx->inputs[1]->frame_rate.num,
178  ctx->inputs[1]->frame_rate.den);
179  return AVERROR_INVALIDDATA;
180  }
181 
182  s->size = s->orientation == VERTICAL ? height / s->channels : width / s->channels;
183  s->xend = s->orientation == VERTICAL ? width : height;
184 
185  for (fft_bits = 1; 1 << fft_bits < 2 * s->size; fft_bits++);
186 
187  s->win_size = 1 << fft_bits;
188  s->nb_freq = 1 << (fft_bits - 1);
189 
190  s->fft = av_fft_init(fft_bits, 1);
191  if (!s->fft) {
192  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
193  "The window size might be too high.\n");
194  return AVERROR(EINVAL);
195  }
196  s->fft_data = av_calloc(s->channels, sizeof(*s->fft_data));
197  if (!s->fft_data)
198  return AVERROR(ENOMEM);
199  for (ch = 0; ch < s->channels; ch++) {
200  s->fft_data[ch] = av_calloc(s->win_size, sizeof(**s->fft_data));
201  if (!s->fft_data[ch])
202  return AVERROR(ENOMEM);
203  }
204 
205  s->buffer = ff_get_audio_buffer(outlink, s->win_size * 2);
206  if (!s->buffer)
207  return AVERROR(ENOMEM);
208 
209  /* pre-calc windowing function */
211  sizeof(*s->window_func_lut));
212  if (!s->window_func_lut)
213  return AVERROR(ENOMEM);
215  if (s->overlap == 1)
216  s->overlap = overlap;
217  s->hop_size = (1 - s->overlap) * s->win_size;
218  for (factor = 0, i = 0; i < s->win_size; i++) {
219  factor += s->window_func_lut[i] * s->window_func_lut[i];
220  }
221  s->factor = (factor / s->win_size) / FFMAX(1 / (1 - s->overlap) - 1, 1);
222 
223  return 0;
224 }
225 
227  int x, int y, int f, int ch)
228 {
229  const int m_linesize = s->magnitude->linesize[0];
230  const int p_linesize = s->phase->linesize[0];
231  const uint16_t *m = (uint16_t *)(s->magnitude->data[0] + y * m_linesize);
232  const uint16_t *p = (uint16_t *)(s->phase->data[0] + y * p_linesize);
233  float magnitude, phase;
234 
235  switch (s->scale) {
236  case LINEAR:
237  magnitude = m[x] / (double)UINT16_MAX;
238  break;
239  case LOG:
240  magnitude = ff_exp10(((m[x] / (double)UINT16_MAX) - 1.) * 6.);
241  break;
242  default:
243  av_assert0(0);
244  }
245  phase = ((p[x] / (double)UINT16_MAX) * 2. - 1.) * M_PI;
246 
247  s->fft_data[ch][f].re = magnitude * cos(phase);
248  s->fft_data[ch][f].im = magnitude * sin(phase);
249 }
250 
252  int x, int y, int f, int ch)
253 {
254  const int m_linesize = s->magnitude->linesize[0];
255  const int p_linesize = s->phase->linesize[0];
256  const uint8_t *m = (uint8_t *)(s->magnitude->data[0] + y * m_linesize);
257  const uint8_t *p = (uint8_t *)(s->phase->data[0] + y * p_linesize);
258  float magnitude, phase;
259 
260  switch (s->scale) {
261  case LINEAR:
262  magnitude = m[x] / (double)UINT8_MAX;
263  break;
264  case LOG:
265  magnitude = ff_exp10(((m[x] / (double)UINT8_MAX) - 1.) * 6.);
266  break;
267  default:
268  av_assert0(0);
269  }
270  phase = ((p[x] / (double)UINT8_MAX) * 2. - 1.) * M_PI;
271 
272  s->fft_data[ch][f].re = magnitude * cos(phase);
273  s->fft_data[ch][f].im = magnitude * sin(phase);
274 }
275 
276 static void read_fft_data(AVFilterContext *ctx, int x, int h, int ch)
277 {
278  SpectrumSynthContext *s = ctx->priv;
279  AVFilterLink *inlink = ctx->inputs[0];
280  int start = h * (s->channels - ch) - 1;
281  int end = h * (s->channels - ch - 1);
282  int y, f;
283 
284  switch (s->orientation) {
285  case VERTICAL:
286  switch (inlink->format) {
288  case AV_PIX_FMT_GRAY16:
289  for (y = start, f = 0; y >= end; y--, f++) {
290  read16_fft_bin(s, x, y, f, ch);
291  }
292  break;
293  case AV_PIX_FMT_YUVJ444P:
294  case AV_PIX_FMT_YUV444P:
295  case AV_PIX_FMT_GRAY8:
296  for (y = start, f = 0; y >= end; y--, f++) {
297  read8_fft_bin(s, x, y, f, ch);
298  }
299  break;
300  }
301  break;
302  case HORIZONTAL:
303  switch (inlink->format) {
305  case AV_PIX_FMT_GRAY16:
306  for (y = end, f = 0; y <= start; y++, f++) {
307  read16_fft_bin(s, y, x, f, ch);
308  }
309  break;
310  case AV_PIX_FMT_YUVJ444P:
311  case AV_PIX_FMT_YUV444P:
312  case AV_PIX_FMT_GRAY8:
313  for (y = end, f = 0; y <= start; y++, f++) {
314  read8_fft_bin(s, y, x, f, ch);
315  }
316  break;
317  }
318  break;
319  }
320 }
321 
322 static void synth_window(AVFilterContext *ctx, int x)
323 {
324  SpectrumSynthContext *s = ctx->priv;
325  const int h = s->size;
326  int nb = s->win_size;
327  int y, f, ch;
328 
329  for (ch = 0; ch < s->channels; ch++) {
330  read_fft_data(ctx, x, h, ch);
331 
332  for (y = h; y <= s->nb_freq; y++) {
333  s->fft_data[ch][y].re = 0;
334  s->fft_data[ch][y].im = 0;
335  }
336 
337  for (y = s->nb_freq + 1, f = s->nb_freq - 1; y < nb; y++, f--) {
338  s->fft_data[ch][y].re = s->fft_data[ch][f].re;
339  s->fft_data[ch][y].im = -s->fft_data[ch][f].im;
340  }
341 
342  av_fft_permute(s->fft, s->fft_data[ch]);
343  av_fft_calc(s->fft, s->fft_data[ch]);
344  }
345 }
346 
347 static int try_push_frame(AVFilterContext *ctx, int x)
348 {
349  SpectrumSynthContext *s = ctx->priv;
350  AVFilterLink *outlink = ctx->outputs[0];
351  const float factor = s->factor;
352  int ch, n, i, ret;
353  int start, end;
354  AVFrame *out;
355 
356  synth_window(ctx, x);
357 
358  for (ch = 0; ch < s->channels; ch++) {
359  float *buf = (float *)s->buffer->extended_data[ch];
360  int j, k;
361 
362  start = s->start;
363  end = s->end;
364  k = end;
365  for (i = 0, j = start; j < k && i < s->win_size; i++, j++) {
366  buf[j] += s->fft_data[ch][i].re;
367  }
368 
369  for (; i < s->win_size; i++, j++) {
370  buf[j] = s->fft_data[ch][i].re;
371  }
372 
373  start += s->hop_size;
374  end = j;
375 
376  if (start >= s->win_size) {
377  start -= s->win_size;
378  end -= s->win_size;
379 
380  if (ch == s->channels - 1) {
381  float *dst;
382  int c;
383 
384  out = ff_get_audio_buffer(outlink, s->win_size);
385  if (!out) {
387  av_frame_free(&s->phase);
388  return AVERROR(ENOMEM);
389  }
390 
391  out->pts = s->pts;
392  s->pts += s->win_size;
393  for (c = 0; c < s->channels; c++) {
394  dst = (float *)out->extended_data[c];
395  buf = (float *)s->buffer->extended_data[c];
396 
397  for (n = 0; n < s->win_size; n++) {
398  dst[n] = buf[n] * factor;
399  }
400  memmove(buf, buf + s->win_size, s->win_size * 4);
401  }
402 
403  ret = ff_filter_frame(outlink, out);
404  if (ret < 0)
405  return ret;
406  }
407  }
408  }
409 
410  s->start = start;
411  s->end = end;
412 
413  return 0;
414 }
415 
417 {
418  SpectrumSynthContext *s = ctx->priv;
419  int ret, x;
420 
421  if (!(s->magnitude && s->phase))
422  return 0;
423 
424  switch (s->sliding) {
425  case REPLACE:
426  ret = try_push_frame(ctx, s->xpos);
427  s->xpos++;
428  if (s->xpos >= s->xend)
429  s->xpos = 0;
430  break;
431  case SCROLL:
432  s->xpos = s->xend - 1;
433  ret = try_push_frame(ctx, s->xpos);
434  break;
435  case RSCROLL:
436  s->xpos = 0;
437  ret = try_push_frame(ctx, s->xpos);
438  break;
439  case FULLFRAME:
440  for (x = 0; x < s->xend; x++) {
441  ret = try_push_frame(ctx, x);
442  if (ret < 0)
443  break;
444  }
445  break;
446  default:
447  av_assert0(0);
448  }
449 
451  av_frame_free(&s->phase);
452  return ret;
453 }
454 
456 {
457  SpectrumSynthContext *s = ctx->priv;
458  AVFrame **staging[2] = { &s->magnitude, &s->phase };
459  int64_t pts;
460  int i, ret;
461 
463 
464  for (i = 0; i < 2; i++) {
465  if (*staging[i])
466  continue;
467  ret = ff_inlink_consume_frame(ctx->inputs[i], staging[i]);
468  if (ret < 0)
469  return ret;
470  if (ret) {
471  ff_filter_set_ready(ctx, 10);
472  return try_push_frames(ctx);
473  }
474  }
475 
476  for (i = 0; i < 2; i++) {
477  if (ff_inlink_acknowledge_status(ctx->inputs[i], &ret, &pts)) {
478  ff_outlink_set_status(ctx->outputs[0], ret, pts);
479  ff_inlink_set_status(ctx->inputs[1 - i], ret);
480  return 0;
481  }
482  }
483 
484  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
485  for (i = 0; i < 2; i++) {
486  if (!*staging[i])
488  }
489  }
490 
491  return FFERROR_NOT_READY;
492 }
493 
495 {
496  SpectrumSynthContext *s = ctx->priv;
497  int i;
498 
500  av_frame_free(&s->phase);
501  av_frame_free(&s->buffer);
502  av_fft_end(s->fft);
503  if (s->fft_data) {
504  for (i = 0; i < s->channels; i++)
505  av_freep(&s->fft_data[i]);
506  }
507  av_freep(&s->fft_data);
509 }
510 
512  {
513  .name = "magnitude",
514  .type = AVMEDIA_TYPE_VIDEO,
515  },
516  {
517  .name = "phase",
518  .type = AVMEDIA_TYPE_VIDEO,
519  },
520  { NULL }
521 };
522 
524  {
525  .name = "default",
526  .type = AVMEDIA_TYPE_AUDIO,
527  .config_props = config_output,
528  },
529  { NULL }
530 };
531 
533  .name = "spectrumsynth",
534  .description = NULL_IF_CONFIG_SMALL("Convert input spectrum videos to audio output."),
535  .uninit = uninit,
536  .query_formats = query_formats,
537  .activate = activate,
538  .priv_size = sizeof(SpectrumSynthContext),
539  .inputs = spectrumsynth_inputs,
540  .outputs = spectrumsynth_outputs,
541  .priv_class = &spectrumsynth_class,
542 };
float, planar
Definition: samplefmt.h:69
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1489
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVOption.
Definition: opt.h:248
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
Main libavfilter public API header.
AVFILTER_DEFINE_CLASS(spectrumsynth)
int num
Numerator.
Definition: rational.h:59
FFTSample re
Definition: avfft.h:38
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
static void generate_window_func(float *lut, int N, int win_func, float *overlap)
Definition: window_func.h:36
return FFERROR_NOT_READY
static const AVOption spectrumsynth_options[]
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1615
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:462
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
float * window_func_lut
Window function LUT.
FFTComplex ** fft_data
bins holder for each (displayed) channels
uint8_t
#define av_cold
Definition: attributes.h:88
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
static int query_formats(AVFilterContext *ctx)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static const AVFilterPad spectrumsynth_outputs[]
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
static int try_push_frames(AVFilterContext *ctx)
static void read16_fft_bin(SpectrumSynthContext *s, int x, int y, int f, int ch)
#define height
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
channels
Definition: aptx.h:33
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
#define av_log(a,...)
SlideMode
A filter pad used for either input or output.
Definition: internal.h:54
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1444
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static void read8_fft_bin(SpectrumSynthContext *s, int x, int y, int f, int ch)
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:339
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
void * priv
private data for use by the filter
Definition: avfilter.h:356
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
static const AVFilterPad spectrumsynth_inputs[]
simple assert() macros that are a bit more flexible than ISO C assert().
#define V
static void read_fft_data(AVFilterContext *ctx, int x, int h, int ch)
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
#define FFMAX(a, b)
Definition: common.h:94
static int activate(AVFilterContext *ctx)
Definition: fft.h:88
FFTContext * fft
Fast Fourier Transform context.
audio channel layout utility functions
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:455
#define width
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
MagnitudeScale
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:450
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
A list of supported channel layouts.
Definition: formats.h:86
static av_cold void uninit(AVFilterContext *ctx)
Orientation
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
FFT functions.
#define A
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int fft_bits
number of bits (FFT window size = 1<<fft_bits)
void ff_inlink_set_status(AVFilterLink *link, int status)
Set the status on an input link.
Definition: avfilter.c:1623
const char * name
Filter name.
Definition: avfilter.h:149
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
internal math functions header
Y , 8bpp.
Definition: pixfmt.h:74
sample_rates
FFTSample im
Definition: avfft.h:38
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:193
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
int den
Denominator.
Definition: rational.h:60
static int try_push_frame(AVFilterContext *ctx, int x)
AVFilter ff_vaf_spectrumsynth
A list of supported formats for one end of a filter link.
Definition: formats.h:65
An instance of a filter.
Definition: avfilter.h:341
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:940
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define FF_COUNT2LAYOUT(c)
Encode a channel count as a channel layout.
Definition: formats.h:103
#define M_PI
Definition: mathematics.h:52
formats
Definition: signature.h:48
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
static int config_output(AVFilterLink *outlink)
static void synth_window(AVFilterContext *ctx, int x)
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define OFFSET(x)