FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
af_afir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * An arbitrary audio FIR filter
24  */
25 
26 #include <float.h>
27 
28 #include "libavutil/common.h"
29 #include "libavutil/float_dsp.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
33 #include "libavcodec/avfft.h"
34 
35 #include "audio.h"
36 #include "avfilter.h"
37 #include "filters.h"
38 #include "formats.h"
39 #include "internal.h"
40 #include "af_afir.h"
41 
42 static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
43 {
44  int n;
45 
46  for (n = 0; n < len; n++) {
47  const float cre = c[2 * n ];
48  const float cim = c[2 * n + 1];
49  const float tre = t[2 * n ];
50  const float tim = t[2 * n + 1];
51 
52  sum[2 * n ] += tre * cre - tim * cim;
53  sum[2 * n + 1] += tre * cim + tim * cre;
54  }
55 
56  sum[2 * n] += t[2 * n] * c[2 * n];
57 }
58 
60 {
61  AudioFIRContext *s = ctx->priv;
62  const float *in = (const float *)s->in[0]->extended_data[ch] + offset;
63  float *block, *buf, *ptr = (float *)out->extended_data[ch] + offset;
64  const int nb_samples = FFMIN(s->min_part_size, out->nb_samples - offset);
65  int n, i, j;
66 
67  for (int segment = 0; segment < s->nb_segments; segment++) {
68  AudioFIRSegment *seg = &s->seg[segment];
69  float *src = (float *)seg->input->extended_data[ch];
70  float *dst = (float *)seg->output->extended_data[ch];
71  float *sum = (float *)seg->sum->extended_data[ch];
72 
73  s->fdsp->vector_fmul_scalar(src + seg->input_offset, in, s->dry_gain, FFALIGN(nb_samples, 4));
74  emms_c();
75 
76  seg->output_offset[ch] += s->min_part_size;
77  if (seg->output_offset[ch] == seg->part_size) {
78  seg->output_offset[ch] = 0;
79  } else {
80  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
81 
82  dst += seg->output_offset[ch];
83  for (n = 0; n < nb_samples; n++) {
84  ptr[n] += dst[n];
85  }
86  continue;
87  }
88 
89  memset(sum, 0, sizeof(*sum) * seg->fft_length);
90  block = (float *)seg->block->extended_data[ch] + seg->part_index[ch] * seg->block_size;
91  memset(block + seg->part_size, 0, sizeof(*block) * (seg->fft_length - seg->part_size));
92 
93  memcpy(block, src, sizeof(*src) * seg->part_size);
94 
95  av_rdft_calc(seg->rdft[ch], block);
96  block[2 * seg->part_size] = block[1];
97  block[1] = 0;
98 
99  j = seg->part_index[ch];
100 
101  for (i = 0; i < seg->nb_partitions; i++) {
102  const int coffset = j * seg->coeff_size;
103  const float *block = (const float *)seg->block->extended_data[ch] + i * seg->block_size;
104  const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
105 
106  s->fcmul_add(sum, block, (const float *)coeff, seg->part_size);
107 
108  if (j == 0)
109  j = seg->nb_partitions;
110  j--;
111  }
112 
113  sum[1] = sum[2 * seg->part_size];
114  av_rdft_calc(seg->irdft[ch], sum);
115 
116  buf = (float *)seg->buffer->extended_data[ch];
117  for (n = 0; n < seg->part_size; n++) {
118  buf[n] += sum[n];
119  }
120 
121  memcpy(dst, buf, seg->part_size * sizeof(*dst));
122 
123  buf = (float *)seg->buffer->extended_data[ch];
124  memcpy(buf, sum + seg->part_size, seg->part_size * sizeof(*buf));
125 
126  seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
127 
128  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
129 
130  for (n = 0; n < nb_samples; n++) {
131  ptr[n] += dst[n];
132  }
133  }
134 
135  s->fdsp->vector_fmul_scalar(ptr, ptr, s->wet_gain, FFALIGN(nb_samples, 4));
136  emms_c();
137 
138  return 0;
139 }
140 
142 {
143  AudioFIRContext *s = ctx->priv;
144 
145  for (int offset = 0; offset < out->nb_samples; offset += s->min_part_size) {
146  fir_quantum(ctx, out, ch, offset);
147  }
148 
149  return 0;
150 }
151 
152 static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
153 {
154  AVFrame *out = arg;
155  const int start = (out->channels * jobnr) / nb_jobs;
156  const int end = (out->channels * (jobnr+1)) / nb_jobs;
157 
158  for (int ch = start; ch < end; ch++) {
159  fir_channel(ctx, out, ch);
160  }
161 
162  return 0;
163 }
164 
166 {
167  AVFilterContext *ctx = outlink->src;
168  AVFrame *out = NULL;
169 
170  out = ff_get_audio_buffer(outlink, in->nb_samples);
171  if (!out) {
172  av_frame_free(&in);
173  return AVERROR(ENOMEM);
174  }
175 
176  if (s->pts == AV_NOPTS_VALUE)
177  s->pts = in->pts;
178  s->in[0] = in;
179  ctx->internal->execute(ctx, fir_channels, out, NULL, FFMIN(outlink->channels,
181 
182  out->pts = s->pts;
183  if (s->pts != AV_NOPTS_VALUE)
184  s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
185 
186  av_frame_free(&in);
187  s->in[0] = NULL;
188 
189  return ff_filter_frame(outlink, out);
190 }
191 
192 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
193 {
194  const uint8_t *font;
195  int font_height;
196  int i;
197 
198  font = avpriv_cga_font, font_height = 8;
199 
200  for (i = 0; txt[i]; i++) {
201  int char_y, mask;
202 
203  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
204  for (char_y = 0; char_y < font_height; char_y++) {
205  for (mask = 0x80; mask; mask >>= 1) {
206  if (font[txt[i] * font_height + char_y] & mask)
207  AV_WL32(p, color);
208  p += 4;
209  }
210  p += pic->linesize[0] - 8 * 4;
211  }
212  }
213 }
214 
215 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
216 {
217  int dx = FFABS(x1-x0);
218  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
219  int err = (dx>dy ? dx : -dy) / 2, e2;
220 
221  for (;;) {
222  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
223 
224  if (x0 == x1 && y0 == y1)
225  break;
226 
227  e2 = err;
228 
229  if (e2 >-dx) {
230  err -= dy;
231  x0--;
232  }
233 
234  if (e2 < dy) {
235  err += dx;
236  y0 += sy;
237  }
238  }
239 }
240 
242 {
243  AudioFIRContext *s = ctx->priv;
244  float *mag, *phase, *delay, min = FLT_MAX, max = FLT_MIN;
245  float min_delay = FLT_MAX, max_delay = FLT_MIN;
246  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
247  char text[32];
248  int channel, i, x;
249 
250  memset(out->data[0], 0, s->h * out->linesize[0]);
251 
252  phase = av_malloc_array(s->w, sizeof(*phase));
253  mag = av_malloc_array(s->w, sizeof(*mag));
254  delay = av_malloc_array(s->w, sizeof(*delay));
255  if (!mag || !phase || !delay)
256  goto end;
257 
258  channel = av_clip(s->ir_channel, 0, s->in[1]->channels - 1);
259  for (i = 0; i < s->w; i++) {
260  const float *src = (const float *)s->in[1]->extended_data[channel];
261  double w = i * M_PI / (s->w - 1);
262  double div, real_num = 0., imag_num = 0., real = 0., imag = 0.;
263 
264  for (x = 0; x < s->nb_taps; x++) {
265  real += cos(-x * w) * src[x];
266  imag += sin(-x * w) * src[x];
267  real_num += cos(-x * w) * src[x] * x;
268  imag_num += sin(-x * w) * src[x] * x;
269  }
270 
271  mag[i] = hypot(real, imag);
272  phase[i] = atan2(imag, real);
273  div = real * real + imag * imag;
274  delay[i] = (real_num * real + imag_num * imag) / div;
275  min = fminf(min, mag[i]);
276  max = fmaxf(max, mag[i]);
277  min_delay = fminf(min_delay, delay[i]);
278  max_delay = fmaxf(max_delay, delay[i]);
279  }
280 
281  for (i = 0; i < s->w; i++) {
282  int ymag = mag[i] / max * (s->h - 1);
283  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
284  int yphase = (0.5 * (1. + phase[i] / M_PI)) * (s->h - 1);
285 
286  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
287  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
288  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
289 
290  if (prev_ymag < 0)
291  prev_ymag = ymag;
292  if (prev_yphase < 0)
293  prev_yphase = yphase;
294  if (prev_ydelay < 0)
295  prev_ydelay = ydelay;
296 
297  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
298  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
299  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
300 
301  prev_ymag = ymag;
302  prev_yphase = yphase;
303  prev_ydelay = ydelay;
304  }
305 
306  if (s->w > 400 && s->h > 100) {
307  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
308  snprintf(text, sizeof(text), "%.2f", max);
309  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
310 
311  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
312  snprintf(text, sizeof(text), "%.2f", min);
313  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
314 
315  drawtext(out, 2, 22, "Max Delay:", 0xDDDDDDDD);
316  snprintf(text, sizeof(text), "%.2f", max_delay);
317  drawtext(out, 11 * 8 + 2, 22, text, 0xDDDDDDDD);
318 
319  drawtext(out, 2, 32, "Min Delay:", 0xDDDDDDDD);
320  snprintf(text, sizeof(text), "%.2f", min_delay);
321  drawtext(out, 11 * 8 + 2, 32, text, 0xDDDDDDDD);
322  }
323 
324 end:
325  av_free(delay);
326  av_free(phase);
327  av_free(mag);
328 }
329 
331  int offset, int nb_partitions, int part_size)
332 {
333  AudioFIRContext *s = ctx->priv;
334 
335  seg->rdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->rdft));
336  seg->irdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->irdft));
337  if (!seg->rdft || !seg->irdft)
338  return AVERROR(ENOMEM);
339 
340  seg->fft_length = part_size * 2 + 1;
341  seg->part_size = part_size;
342  seg->block_size = FFALIGN(seg->fft_length, 32);
343  seg->coeff_size = FFALIGN(seg->part_size + 1, 32);
344  seg->nb_partitions = nb_partitions;
345  seg->input_size = offset + s->min_part_size;
346  seg->input_offset = offset;
347 
348  seg->part_index = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->part_index));
349  seg->output_offset = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->output_offset));
350  if (!seg->part_index || !seg->output_offset)
351  return AVERROR(ENOMEM);
352 
353  for (int ch = 0; ch < ctx->inputs[0]->channels; ch++) {
354  seg->rdft[ch] = av_rdft_init(av_log2(2 * part_size), DFT_R2C);
355  seg->irdft[ch] = av_rdft_init(av_log2(2 * part_size), IDFT_C2R);
356  if (!seg->rdft[ch] || !seg->irdft[ch])
357  return AVERROR(ENOMEM);
358  }
359 
360  seg->sum = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
361  seg->block = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size);
362  seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
363  seg->coeff = ff_get_audio_buffer(ctx->inputs[1], seg->nb_partitions * seg->coeff_size * 2);
364  seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
365  seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
366  if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff || !seg->input || !seg->output)
367  return AVERROR(ENOMEM);
368 
369  return 0;
370 }
371 
373 {
374  AudioFIRContext *s = ctx->priv;
375  int left, offset = 0, part_size, max_part_size;
376  int ret, i, ch, n;
377  float power = 0;
378 
380  if (s->nb_taps <= 0)
381  return AVERROR(EINVAL);
382 
383  if (s->minp > s->maxp) {
384  s->maxp = s->minp;
385  }
386 
387  left = s->nb_taps;
388  part_size = 1 << av_log2(s->minp);
389  max_part_size = 1 << av_log2(s->maxp);
390 
391  s->min_part_size = part_size;
392 
393  for (i = 0; left > 0; i++) {
394  int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0);
395  int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
396 
397  s->nb_segments = i + 1;
398  ret = init_segment(ctx, &s->seg[i], offset, nb_partitions, part_size);
399  if (ret < 0)
400  return ret;
401  offset += nb_partitions * part_size;
402  left -= nb_partitions * part_size;
403  part_size *= 2;
404  part_size = FFMIN(part_size, max_part_size);
405  }
406 
407  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_taps, s->nb_taps, &s->in[1]);
408  if (ret < 0)
409  return ret;
410  if (ret == 0)
411  return AVERROR_BUG;
412 
413  if (s->response)
414  draw_response(ctx, s->video);
415 
416  s->gain = 1;
417 
418  switch (s->gtype) {
419  case -1:
420  /* nothing to do */
421  break;
422  case 0:
423  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
424  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
425 
426  for (i = 0; i < s->nb_taps; i++)
427  power += FFABS(time[i]);
428  }
429  s->gain = ctx->inputs[1]->channels / power;
430  break;
431  case 1:
432  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
433  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
434 
435  for (i = 0; i < s->nb_taps; i++)
436  power += time[i];
437  }
438  s->gain = ctx->inputs[1]->channels / power;
439  break;
440  case 2:
441  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
442  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
443 
444  for (i = 0; i < s->nb_taps; i++)
445  power += time[i] * time[i];
446  }
447  s->gain = sqrtf(ch / power);
448  break;
449  default:
450  return AVERROR_BUG;
451  }
452 
453  s->gain = FFMIN(s->gain * s->ir_gain, 1.f);
454  av_log(ctx, AV_LOG_DEBUG, "power %f, gain %f\n", power, s->gain);
455  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
456  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
457 
458  s->fdsp->vector_fmul_scalar(time, time, s->gain, FFALIGN(s->nb_taps, 4));
459  }
460 
461  av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", s->nb_taps);
462  av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments);
463 
464  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
465  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
466  int toffset = 0;
467 
468  for (i = FFMAX(1, s->length * s->nb_taps); i < s->nb_taps; i++)
469  time[i] = 0;
470 
471  av_log(ctx, AV_LOG_DEBUG, "channel: %d\n", ch);
472 
473  for (int segment = 0; segment < s->nb_segments; segment++) {
474  AudioFIRSegment *seg = &s->seg[segment];
475  float *block = (float *)seg->block->extended_data[ch];
477 
478  av_log(ctx, AV_LOG_DEBUG, "segment: %d\n", segment);
479 
480  for (i = 0; i < seg->nb_partitions; i++) {
481  const float scale = 1.f / seg->part_size;
482  const int coffset = i * seg->coeff_size;
483  const int remaining = s->nb_taps - toffset;
484  const int size = remaining >= seg->part_size ? seg->part_size : remaining;
485 
486  memset(block, 0, sizeof(*block) * seg->fft_length);
487  memcpy(block, time + toffset, size * sizeof(*block));
488 
489  av_rdft_calc(seg->rdft[0], block);
490 
491  coeff[coffset].re = block[0] * scale;
492  coeff[coffset].im = 0;
493  for (n = 1; n < seg->part_size; n++) {
494  coeff[coffset + n].re = block[2 * n] * scale;
495  coeff[coffset + n].im = block[2 * n + 1] * scale;
496  }
497  coeff[coffset + seg->part_size].re = block[1] * scale;
498  coeff[coffset + seg->part_size].im = 0;
499 
500  toffset += size;
501  }
502 
503  av_log(ctx, AV_LOG_DEBUG, "nb_partitions: %d\n", seg->nb_partitions);
504  av_log(ctx, AV_LOG_DEBUG, "partition size: %d\n", seg->part_size);
505  av_log(ctx, AV_LOG_DEBUG, "block size: %d\n", seg->block_size);
506  av_log(ctx, AV_LOG_DEBUG, "fft_length: %d\n", seg->fft_length);
507  av_log(ctx, AV_LOG_DEBUG, "coeff_size: %d\n", seg->coeff_size);
508  av_log(ctx, AV_LOG_DEBUG, "input_size: %d\n", seg->input_size);
509  av_log(ctx, AV_LOG_DEBUG, "input_offset: %d\n", seg->input_offset);
510  }
511  }
512 
513  av_frame_free(&s->in[1]);
514  s->have_coeffs = 1;
515 
516  return 0;
517 }
518 
519 static int check_ir(AVFilterLink *link, AVFrame *frame)
520 {
521  AVFilterContext *ctx = link->dst;
522  AudioFIRContext *s = ctx->priv;
523  int nb_taps, max_nb_taps;
524 
525  nb_taps = ff_inlink_queued_samples(link);
526  max_nb_taps = s->max_ir_len * ctx->outputs[0]->sample_rate;
527  if (nb_taps > max_nb_taps) {
528  av_log(ctx, AV_LOG_ERROR, "Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
529  return AVERROR(EINVAL);
530  }
531 
532  return 0;
533 }
534 
536 {
537  AudioFIRContext *s = ctx->priv;
538  AVFilterLink *outlink = ctx->outputs[0];
539  int ret, status, available, wanted;
540  AVFrame *in = NULL;
541  int64_t pts;
542 
544  if (s->response)
546  if (!s->eof_coeffs) {
547  AVFrame *ir = NULL;
548 
549  ret = check_ir(ctx->inputs[1], ir);
550  if (ret < 0)
551  return ret;
552 
553  if (ff_outlink_get_status(ctx->inputs[1]) == AVERROR_EOF)
554  s->eof_coeffs = 1;
555 
556  if (!s->eof_coeffs) {
557  if (ff_outlink_frame_wanted(ctx->outputs[0]))
559  else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1]))
561  return 0;
562  }
563  }
564 
565  if (!s->have_coeffs && s->eof_coeffs) {
566  ret = convert_coeffs(ctx);
567  if (ret < 0)
568  return ret;
569  }
570 
571  available = ff_inlink_queued_samples(ctx->inputs[0]);
572  wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
573  ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
574  if (ret > 0)
575  ret = fir_frame(s, in, outlink);
576 
577  if (ret < 0)
578  return ret;
579 
580  if (s->response && s->have_coeffs) {
581  int64_t old_pts = s->video->pts;
582  int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base);
583 
584  if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) {
585  s->video->pts = new_pts;
586  return ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
587  }
588  }
589 
590  if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
591  ff_filter_set_ready(ctx, 10);
592  return 0;
593  }
594 
595  if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
596  if (status == AVERROR_EOF) {
597  ff_outlink_set_status(ctx->outputs[0], status, pts);
598  if (s->response)
599  ff_outlink_set_status(ctx->outputs[1], status, pts);
600  return 0;
601  }
602  }
603 
604  if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
605  !ff_outlink_get_status(ctx->inputs[0])) {
607  return 0;
608  }
609 
610  if (s->response &&
611  ff_outlink_frame_wanted(ctx->outputs[1]) &&
612  !ff_outlink_get_status(ctx->inputs[0])) {
614  return 0;
615  }
616 
617  return FFERROR_NOT_READY;
618 }
619 
621 {
622  AudioFIRContext *s = ctx->priv;
625  static const enum AVSampleFormat sample_fmts[] = {
628  };
629  static const enum AVPixelFormat pix_fmts[] = {
632  };
633  int ret;
634 
635  if (s->response) {
636  AVFilterLink *videolink = ctx->outputs[1];
637  formats = ff_make_format_list(pix_fmts);
638  if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
639  return ret;
640  }
641 
642  layouts = ff_all_channel_counts();
643  if (!layouts)
644  return AVERROR(ENOMEM);
645 
646  if (s->ir_format) {
647  ret = ff_set_common_channel_layouts(ctx, layouts);
648  if (ret < 0)
649  return ret;
650  } else {
652 
654  if (ret)
655  return ret;
656 
657  if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts)) < 0)
658  return ret;
659  if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
660  return ret;
661  if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[1]->out_channel_layouts)) < 0)
662  return ret;
663  }
664 
665  formats = ff_make_format_list(sample_fmts);
666  if ((ret = ff_set_common_formats(ctx, formats)) < 0)
667  return ret;
668 
669  formats = ff_all_samplerates();
670  return ff_set_common_samplerates(ctx, formats);
671 }
672 
673 static int config_output(AVFilterLink *outlink)
674 {
675  AVFilterContext *ctx = outlink->src;
676  AudioFIRContext *s = ctx->priv;
677 
678  s->one2many = ctx->inputs[1]->channels == 1;
679  outlink->sample_rate = ctx->inputs[0]->sample_rate;
680  outlink->time_base = ctx->inputs[0]->time_base;
681  outlink->channel_layout = ctx->inputs[0]->channel_layout;
682  outlink->channels = ctx->inputs[0]->channels;
683 
684  s->nb_channels = outlink->channels;
685  s->nb_coef_channels = ctx->inputs[1]->channels;
686  s->pts = AV_NOPTS_VALUE;
687 
688  return 0;
689 }
690 
692 {
693  AudioFIRContext *s = ctx->priv;
694 
695  if (seg->rdft) {
696  for (int ch = 0; ch < s->nb_channels; ch++) {
697  av_rdft_end(seg->rdft[ch]);
698  }
699  }
700  av_freep(&seg->rdft);
701 
702  if (seg->irdft) {
703  for (int ch = 0; ch < s->nb_channels; ch++) {
704  av_rdft_end(seg->irdft[ch]);
705  }
706  }
707  av_freep(&seg->irdft);
708 
709  av_freep(&seg->output_offset);
710  av_freep(&seg->part_index);
711 
712  av_frame_free(&seg->block);
713  av_frame_free(&seg->sum);
714  av_frame_free(&seg->buffer);
715  av_frame_free(&seg->coeff);
716  av_frame_free(&seg->input);
717  av_frame_free(&seg->output);
718  seg->input_size = 0;
719 }
720 
722 {
723  AudioFIRContext *s = ctx->priv;
724 
725  for (int i = 0; i < s->nb_segments; i++) {
726  uninit_segment(ctx, &s->seg[i]);
727  }
728 
729  av_freep(&s->fdsp);
730  av_frame_free(&s->in[1]);
731 
732  for (int i = 0; i < ctx->nb_outputs; i++)
733  av_freep(&ctx->output_pads[i].name);
734  av_frame_free(&s->video);
735 }
736 
737 static int config_video(AVFilterLink *outlink)
738 {
739  AVFilterContext *ctx = outlink->src;
740  AudioFIRContext *s = ctx->priv;
741 
742  outlink->sample_aspect_ratio = (AVRational){1,1};
743  outlink->w = s->w;
744  outlink->h = s->h;
745  outlink->frame_rate = s->frame_rate;
746  outlink->time_base = av_inv_q(outlink->frame_rate);
747 
748  av_frame_free(&s->video);
749  s->video = ff_get_video_buffer(outlink, outlink->w, outlink->h);
750  if (!s->video)
751  return AVERROR(ENOMEM);
752 
753  return 0;
754 }
755 
757 {
758  AudioFIRContext *s = ctx->priv;
759  AVFilterPad pad, vpad;
760  int ret;
761 
762  pad = (AVFilterPad){
763  .name = av_strdup("default"),
764  .type = AVMEDIA_TYPE_AUDIO,
765  .config_props = config_output,
766  };
767 
768  if (!pad.name)
769  return AVERROR(ENOMEM);
770 
771  if (s->response) {
772  vpad = (AVFilterPad){
773  .name = av_strdup("filter_response"),
774  .type = AVMEDIA_TYPE_VIDEO,
775  .config_props = config_video,
776  };
777  if (!vpad.name)
778  return AVERROR(ENOMEM);
779  }
780 
781  ret = ff_insert_outpad(ctx, 0, &pad);
782  if (ret < 0) {
783  av_freep(&pad.name);
784  return ret;
785  }
786 
787  if (s->response) {
788  ret = ff_insert_outpad(ctx, 1, &vpad);
789  if (ret < 0) {
790  av_freep(&vpad.name);
791  return ret;
792  }
793  }
794 
795  s->fcmul_add = fcmul_add_c;
796 
798  if (!s->fdsp)
799  return AVERROR(ENOMEM);
800 
801  if (ARCH_X86)
802  ff_afir_init_x86(s);
803 
804  return 0;
805 }
806 
807 static const AVFilterPad afir_inputs[] = {
808  {
809  .name = "main",
810  .type = AVMEDIA_TYPE_AUDIO,
811  },{
812  .name = "ir",
813  .type = AVMEDIA_TYPE_AUDIO,
814  },
815  { NULL }
816 };
817 
818 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
819 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
820 #define OFFSET(x) offsetof(AudioFIRContext, x)
821 
822 static const AVOption afir_options[] = {
823  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
824  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
825  { "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
826  { "gtype", "set IR auto gain type",OFFSET(gtype), AV_OPT_TYPE_INT, {.i64=0}, -1, 2, AF, "gtype" },
827  { "none", "without auto gain", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "gtype" },
828  { "peak", "peak gain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "gtype" },
829  { "dc", "DC gain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "gtype" },
830  { "gn", "gain to noise", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "gtype" },
831  { "irgain", "set IR gain", OFFSET(ir_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
832  { "irfmt", "set IR format", OFFSET(ir_format), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "irfmt" },
833  { "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "irfmt" },
834  { "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "irfmt" },
835  { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
836  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
837  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
838  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
839  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
840  { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 16, 32768, AF },
841  { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 16, 32768, AF },
842  { NULL }
843 };
844 
846 
848  .name = "afir",
849  .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in 2nd stream."),
850  .priv_size = sizeof(AudioFIRContext),
851  .priv_class = &afir_class,
853  .init = init,
854  .activate = activate,
855  .uninit = uninit,
856  .inputs = afir_inputs,
859 };
float, planar
Definition: samplefmt.h:69
#define NULL
Definition: coverity.c:32
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates...
Definition: formats.c:549
AVFrame * in[2]
Definition: af_afir.h:85
This structure describes decoded (raw) audio or video data.
Definition: frame.h:233
AVOption.
Definition: opt.h:246
int nb_coef_channels
Definition: af_afir.h:79
static int config_video(AVFilterLink *outlink)
Definition: af_afir.c:737
int * part_index
Definition: af_afir.h:44
int nb_channels
Definition: af_afir.h:78
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
Main libavfilter public API header.
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_afir.c:192
static const AVOption afir_options[]
Definition: af_afir.c:822
AVFrame * block
Definition: af_afir.h:47
int input_offset
Definition: af_afir.h:41
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
Definition: af_afir.c:691
float fminf(float, float)
static int convert_coeffs(AVFilterContext *ctx)
Definition: af_afir.c:372
int input_size
Definition: af_afir.h:40
int av_log2(unsigned v)
Definition: intmath.c:26
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
int min_part_size
Definition: af_afir.h:87
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
float ir_gain
Definition: af_afir.h:63
#define src
Definition: vp8dsp.c:254
AVFrame * coeff
Definition: af_afir.h:49
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1607
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
Definition: filters.h:172
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static int config_output(AVFilterLink *outlink)
Definition: af_afir.c:673
static int16_t block[64]
Definition: dct.c:115
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int eof_coeffs
Definition: af_afir.h:75
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
uint8_t
#define av_cold
Definition: attributes.h:82
float dry_gain
Definition: af_afir.h:60
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
AVFrame * buffer
Definition: af_afir.h:48
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:326
static AVFrame * frame
static av_cold int init(AVFilterContext *ctx)
Definition: af_afir.c:756
#define AVERROR_EOF
End of file.
Definition: error.h:55
void ff_afir_init_x86(AudioFIRContext *s)
Definition: af_afir_init.c:28
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
ptrdiff_t size
Definition: opengl_enc.c:101
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static int query_formats(AVFilterContext *ctx)
Definition: af_afir.c:620
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVFrame * output
Definition: af_afir.h:51
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1436
AVFrame * sum
Definition: af_afir.h:46
static int activate(AVFilterContext *ctx)
Definition: af_afir.c:535
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:343
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
static const uint16_t mask[17]
Definition: lzw.c:38
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
RDFTContext ** irdft
Definition: af_afir.h:53
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int offset, int nb_partitions, int part_size)
Definition: af_afir.c:330
const char * arg
Definition: jacosubdec.c:66
GLsizei GLsizei * length
Definition: opengl_enc.c:115
Definition: avfft.h:73
static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
Definition: af_afir.c:42
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
#define FFMAX(a, b)
Definition: common.h:94
void av_rdft_calc(RDFTContext *s, FFTSample *data)
static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
Definition: af_afir.c:59
#define AF
Definition: af_afir.c:818
Definition: hls.c:68
static av_const double hypot(double x, double y)
Definition: libm.h:366
int channels
number of audio channels, only used for audio.
Definition: frame.h:537
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define FFMIN(a, b)
Definition: common.h:96
float fmaxf(float, float)
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
int coeff_size
Definition: af_afir.h:39
uint8_t w
Definition: llviddspenc.c:38
RDFTContext ** rdft
Definition: af_afir.h:53
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1461
AVFormatContext * ctx
Definition: movenc.c:48
int ir_channel
Definition: af_afir.h:69
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
AVFilter ff_af_afir
Definition: af_afir.c:847
void(* vector_fmul_scalar)(float *dst, const float *src, float mul, int len)
Multiply a vector of floats by a scalar float.
Definition: float_dsp.h:85
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
int n
Definition: avisynth_c.h:684
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
void(* fcmul_add)(float *sum, const float *t, const float *c, ptrdiff_t len)
Definition: af_afir.h:91
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
A list of supported channel layouts.
Definition: formats.h:85
static int check_ir(AVFilterLink *link, AVFrame *frame)
Definition: af_afir.c:519
int64_t pts
Definition: af_afir.h:88
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
Definition: af_afir.c:141
AVFloatDSPContext * fdsp
Definition: af_afir.h:90
static void draw_response(AVFilterContext *ctx, AVFrame *out)
Definition: af_afir.c:241
#define VF
Definition: af_afir.c:819
AVFrame * input
Definition: af_afir.h:50
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:264
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1500
FFT functions.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
void * buf
Definition: avisynth_c.h:690
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
Filter definition.
Definition: avfilter.h:144
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1630
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_afir.c:152
int have_coeffs
Definition: af_afir.h:76
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_afir.c:721
float max_ir_len
Definition: af_afir.h:65
AVFILTER_DEFINE_CLASS(afir)
offset must point to AVRational
Definition: opt.h:236
const char * name
Filter name.
Definition: avfilter.h:148
#define snprintf
Definition: snprintf.h:34
offset must point to two consecutive integers
Definition: opt.h:233
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
float length
Definition: af_afir.h:61
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:606
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:247
int nb_partitions
Definition: af_afir.h:35
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
common internal and external API header
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_afir.c:215
static double c[64]
AudioFIRSegment seg[1024]
Definition: af_afir.h:82
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:193
#define OFFSET(x)
Definition: af_afir.c:820
avfilter_execute_func * execute
Definition: internal.h:155
AVFrame * video
Definition: af_afir.h:86
float gain
Definition: af_afir.h:73
int fft_length
Definition: af_afir.h:38
#define av_free(p)
int len
float wet_gain
Definition: af_afir.h:59
int * output_offset
Definition: af_afir.h:43
int block_size
Definition: af_afir.h:37
A list of supported formats for one end of a filter link.
Definition: formats.h:64
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
AVRational frame_rate
Definition: af_afir.h:68
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_afir.c:165
An instance of a filter.
Definition: avfilter.h:338
static const AVFilterPad afir_inputs[]
Definition: af_afir.c:807
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
FILE * out
Definition: movenc.c:54
#define av_freep(p)
void INT64 start
Definition: avisynth_c.h:690
#define M_PI
Definition: mathematics.h:52
#define av_malloc_array(a, b)
formats
Definition: signature.h:48
internal API functions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:410
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:280
#define AV_CH_LAYOUT_MONO
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
float min
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int nb_segments
Definition: af_afir.h:83
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:299
for(j=16;j >0;--j)
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
CGA/EGA/VGA ROM font data.
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56