FFmpeg
af_headphone.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2017 Paul B Mahol
3  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 
23 #include "libavutil/avstring.h"
25 #include "libavutil/float_dsp.h"
26 #include "libavutil/intmath.h"
27 #include "libavutil/mem.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/tx.h"
30 
31 #include "avfilter.h"
32 #include "filters.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "audio.h"
36 
37 #define TIME_DOMAIN 0
38 #define FREQUENCY_DOMAIN 1
39 
40 #define HRIR_STEREO 0
41 #define HRIR_MULTI 1
42 
43 typedef struct HeadphoneContext {
44  const AVClass *class;
45 
46  char *map;
47  int type;
48 
50 
52  int eof_hrirs;
53 
54  int ir_len;
55  int air_len;
56 
58 
59  int nb_irs;
60 
61  float gain;
63 
64  float *ringbuffer[2];
65  int write[2];
66 
68  int n_fft;
69  int size;
70  int hrir_fmt;
71 
72  float *data_ir[2];
73  float *temp_src[2];
77 
78  AVTXContext *fft[2], *ifft[2];
81 
82  float (*scalarproduct_float)(const float *v1, const float *v2, int len);
83  struct hrir_inputs {
84  int ir_len;
85  int eof;
86  } hrir_in[64];
88  enum AVChannel mapping[64];
89  uint8_t hrir_map[64];
91 
92 static int parse_channel_name(const char *arg, enum AVChannel *rchannel)
93 {
95 
97  return AVERROR(EINVAL);
98  *rchannel = channel;
99  return 0;
100 }
101 
103 {
104  HeadphoneContext *s = ctx->priv;
105  char *arg, *tokenizer, *p;
106  uint64_t used_channels = 0;
107 
108  p = s->map;
109  while ((arg = av_strtok(p, "|", &tokenizer))) {
110  enum AVChannel out_channel;
111 
112  p = NULL;
113  if (parse_channel_name(arg, &out_channel)) {
114  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", arg);
115  continue;
116  }
117  if (used_channels & (1ULL << out_channel)) {
118  av_log(ctx, AV_LOG_WARNING, "Ignoring duplicate channel '%s'.\n", arg);
119  continue;
120  }
121  used_channels |= (1ULL << out_channel);
122  s->mapping[s->nb_irs] = out_channel;
123  s->nb_irs++;
124  }
125  av_channel_layout_from_mask(&s->map_channel_layout, used_channels);
126 
127  if (s->hrir_fmt == HRIR_MULTI)
128  s->nb_hrir_inputs = 1;
129  else
130  s->nb_hrir_inputs = s->nb_irs;
131 }
132 
133 typedef struct ThreadData {
134  AVFrame *in, *out;
135  int *write;
136  float **ir;
138  float **ringbuffer;
139  float **temp_src;
143 } ThreadData;
144 
145 static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
146 {
147  HeadphoneContext *s = ctx->priv;
148  ThreadData *td = arg;
149  AVFrame *in = td->in, *out = td->out;
150  int offset = jobnr;
151  int *write = &td->write[jobnr];
152  const float *const ir = td->ir[jobnr];
153  int *n_clippings = &td->n_clippings[jobnr];
154  float *ringbuffer = td->ringbuffer[jobnr];
155  float *temp_src = td->temp_src[jobnr];
156  const int ir_len = s->ir_len;
157  const int air_len = s->air_len;
158  const float *src = (const float *)in->data[0];
159  float *dst = (float *)out->data[0];
160  const int in_channels = in->ch_layout.nb_channels;
161  const int buffer_length = s->buffer_length;
162  const uint32_t modulo = (uint32_t)buffer_length - 1;
163  float *buffer[64];
164  int wr = *write;
165  int read;
166  int i, l;
167 
168  dst += offset;
169  for (l = 0; l < in_channels; l++) {
170  buffer[l] = ringbuffer + l * buffer_length;
171  }
172 
173  for (i = 0; i < in->nb_samples; i++) {
174  const float *cur_ir = ir;
175 
176  *dst = 0;
177  for (l = 0; l < in_channels; l++) {
178  *(buffer[l] + wr) = src[l];
179  }
180 
181  for (l = 0; l < in_channels; cur_ir += air_len, l++) {
182  const float *const bptr = buffer[l];
183 
184  if (l == s->lfe_channel) {
185  *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
186  continue;
187  }
188 
189  read = (wr - (ir_len - 1)) & modulo;
190 
191  if (read + ir_len < buffer_length) {
192  memcpy(temp_src, bptr + read, ir_len * sizeof(*temp_src));
193  } else {
194  int len = FFMIN(air_len - (read % ir_len), buffer_length - read);
195 
196  memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
197  memcpy(temp_src + len, bptr, (air_len - len) * sizeof(*temp_src));
198  }
199 
200  dst[0] += s->scalarproduct_float(cur_ir, temp_src, FFALIGN(ir_len, 32));
201  }
202 
203  if (fabsf(dst[0]) > 1)
204  n_clippings[0]++;
205 
206  dst += 2;
207  src += in_channels;
208  wr = (wr + 1) & modulo;
209  }
210 
211  *write = wr;
212 
213  return 0;
214 }
215 
216 static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
217 {
218  HeadphoneContext *s = ctx->priv;
219  ThreadData *td = arg;
220  AVFrame *in = td->in, *out = td->out;
221  int offset = jobnr;
222  int *write = &td->write[jobnr];
223  AVComplexFloat *hrtf = s->data_hrtf[jobnr];
224  int *n_clippings = &td->n_clippings[jobnr];
225  float *ringbuffer = td->ringbuffer[jobnr];
226  const int ir_len = s->ir_len;
227  const float *src = (const float *)in->data[0];
228  float *dst = (float *)out->data[0];
229  const int in_channels = in->ch_layout.nb_channels;
230  const int buffer_length = s->buffer_length;
231  const uint32_t modulo = (uint32_t)buffer_length - 1;
232  AVComplexFloat *fft_out = s->out_fft[jobnr];
233  AVComplexFloat *fft_in = s->in_fft[jobnr];
234  AVComplexFloat *fft_acc = s->temp_afft[jobnr];
235  AVTXContext *ifft = s->ifft[jobnr];
236  AVTXContext *fft = s->fft[jobnr];
237  av_tx_fn tx_fn = s->tx_fn[jobnr];
238  av_tx_fn itx_fn = s->itx_fn[jobnr];
239  const int n_fft = s->n_fft;
240  const float fft_scale = 1.0f / s->n_fft;
241  AVComplexFloat *hrtf_offset;
242  int wr = *write;
243  int n_read;
244  int i, j;
245 
246  dst += offset;
247 
248  n_read = FFMIN(ir_len, in->nb_samples);
249  for (j = 0; j < n_read; j++) {
250  dst[2 * j] = ringbuffer[wr];
251  ringbuffer[wr] = 0.0;
252  wr = (wr + 1) & modulo;
253  }
254 
255  for (j = n_read; j < in->nb_samples; j++) {
256  dst[2 * j] = 0;
257  }
258 
259  memset(fft_acc, 0, sizeof(AVComplexFloat) * n_fft);
260 
261  for (i = 0; i < in_channels; i++) {
262  if (i == s->lfe_channel) {
263  for (j = 0; j < in->nb_samples; j++) {
264  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
265  }
266  continue;
267  }
268 
269  offset = i * n_fft;
270  hrtf_offset = hrtf + s->hrir_map[i] * n_fft;
271 
272  memset(fft_in, 0, sizeof(AVComplexFloat) * n_fft);
273 
274  for (j = 0; j < in->nb_samples; j++) {
275  fft_in[j].re = src[j * in_channels + i];
276  }
277 
278  tx_fn(fft, fft_out, fft_in, sizeof(*fft_in));
279 
280  for (j = 0; j < n_fft; j++) {
281  const AVComplexFloat *hcomplex = hrtf_offset + j;
282  const float re = fft_out[j].re;
283  const float im = fft_out[j].im;
284 
285  fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
286  fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
287  }
288  }
289 
290  itx_fn(ifft, fft_out, fft_acc, sizeof(*fft_acc));
291 
292  for (j = 0; j < in->nb_samples; j++) {
293  dst[2 * j] += fft_out[j].re * fft_scale;
294  if (fabsf(dst[2 * j]) > 1)
295  n_clippings[0]++;
296  }
297 
298  for (j = 0; j < ir_len - 1; j++) {
299  int write_pos = (wr + j) & modulo;
300 
301  *(ringbuffer + write_pos) += fft_out[in->nb_samples + j].re * fft_scale;
302  }
303 
304  *write = wr;
305 
306  return 0;
307 }
308 
309 static int check_ir(AVFilterLink *inlink, int input_number)
310 {
311  AVFilterContext *ctx = inlink->dst;
312  HeadphoneContext *s = ctx->priv;
313  int ir_len, max_ir_len;
314 
316  max_ir_len = 65536;
317  if (ir_len > max_ir_len) {
318  av_log(ctx, AV_LOG_ERROR, "Too big length of IRs: %d > %d.\n", ir_len, max_ir_len);
319  return AVERROR(EINVAL);
320  }
321  s->hrir_in[input_number].ir_len = ir_len;
322  s->ir_len = FFMAX(ir_len, s->ir_len);
323 
324  if (ff_inlink_check_available_samples(inlink, ir_len + 1) == 1) {
325  s->hrir_in[input_number].eof = 1;
326  return 1;
327  }
328 
329  if (!s->hrir_in[input_number].eof) {
331  return 0;
332  }
333 
334  return 0;
335 }
336 
338 {
339  AVFilterContext *ctx = outlink->src;
340  int n_clippings[2] = { 0 };
341  ThreadData td;
342  AVFrame *out;
343 
344  out = ff_get_audio_buffer(outlink, in->nb_samples);
345  if (!out) {
346  av_frame_free(&in);
347  return AVERROR(ENOMEM);
348  }
349  out->pts = in->pts;
350 
351  td.in = in; td.out = out; td.write = s->write;
352  td.ir = s->data_ir; td.n_clippings = n_clippings;
353  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
354  td.out_fft = s->out_fft;
355  td.in_fft = s->in_fft;
356  td.temp_afft = s->temp_afft;
357 
358  if (s->type == TIME_DOMAIN) {
360  } else {
362  }
363 
364  if (n_clippings[0] + n_clippings[1] > 0) {
365  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
366  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
367  }
368 
369  av_frame_free(&in);
370  return ff_filter_frame(outlink, out);
371 }
372 
374 {
375  struct HeadphoneContext *s = ctx->priv;
376  const int ir_len = s->ir_len;
377  int nb_input_channels = ctx->inputs[0]->ch_layout.nb_channels;
378  const int nb_hrir_channels = s->nb_hrir_inputs == 1 ? ctx->inputs[1]->ch_layout.nb_channels : s->nb_hrir_inputs * 2;
379  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10);
380  AVFrame *frame;
381  int ret = 0;
382  int n_fft;
383  int i, j, k;
384 
385  s->air_len = 1 << (32 - ff_clz(ir_len));
386  if (s->type == TIME_DOMAIN) {
387  s->air_len = FFALIGN(s->air_len, 32);
388  }
389  s->buffer_length = 1 << (32 - ff_clz(s->air_len));
390  s->n_fft = n_fft = 1 << (32 - ff_clz(ir_len + s->size));
391 
392  if (s->type == FREQUENCY_DOMAIN) {
393  float scale = 1.f;
394 
395  ret = av_tx_init(&s->fft[0], &s->tx_fn[0], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
396  if (ret < 0)
397  goto fail;
398  ret = av_tx_init(&s->fft[1], &s->tx_fn[1], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
399  if (ret < 0)
400  goto fail;
401  ret = av_tx_init(&s->ifft[0], &s->itx_fn[0], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
402  if (ret < 0)
403  goto fail;
404  ret = av_tx_init(&s->ifft[1], &s->itx_fn[1], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
405  if (ret < 0)
406  goto fail;
407 
408  if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
409  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
410  ret = AVERROR(ENOMEM);
411  goto fail;
412  }
413  }
414 
415  if (s->type == TIME_DOMAIN) {
416  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
417  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
418  } else {
419  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
420  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
421  s->out_fft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
422  s->out_fft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
423  s->in_fft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
424  s->in_fft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
425  s->temp_afft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
426  s->temp_afft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
427  if (!s->in_fft[0] || !s->in_fft[1] ||
428  !s->out_fft[0] || !s->out_fft[1] ||
429  !s->temp_afft[0] || !s->temp_afft[1]) {
430  ret = AVERROR(ENOMEM);
431  goto fail;
432  }
433  }
434 
435  if (!s->ringbuffer[0] || !s->ringbuffer[1]) {
436  ret = AVERROR(ENOMEM);
437  goto fail;
438  }
439 
440  if (s->type == TIME_DOMAIN) {
441  s->temp_src[0] = av_calloc(s->air_len, sizeof(float));
442  s->temp_src[1] = av_calloc(s->air_len, sizeof(float));
443 
444  s->data_ir[0] = av_calloc(nb_hrir_channels * s->air_len, sizeof(*s->data_ir[0]));
445  s->data_ir[1] = av_calloc(nb_hrir_channels * s->air_len, sizeof(*s->data_ir[1]));
446  if (!s->data_ir[0] || !s->data_ir[1] || !s->temp_src[0] || !s->temp_src[1]) {
447  ret = AVERROR(ENOMEM);
448  goto fail;
449  }
450  } else {
451  s->data_hrtf[0] = av_calloc(n_fft, sizeof(*s->data_hrtf[0]) * nb_hrir_channels);
452  s->data_hrtf[1] = av_calloc(n_fft, sizeof(*s->data_hrtf[1]) * nb_hrir_channels);
453  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
454  ret = AVERROR(ENOMEM);
455  goto fail;
456  }
457  }
458 
459  for (i = 0; i < s->nb_hrir_inputs; av_frame_free(&frame), i++) {
460  int len = s->hrir_in[i].ir_len;
461  float *ptr;
462 
463  ret = ff_inlink_consume_samples(ctx->inputs[i + 1], len, len, &frame);
464  if (ret < 0)
465  goto fail;
466  ptr = (float *)frame->extended_data[0];
467 
468  if (s->hrir_fmt == HRIR_STEREO) {
469  int idx = av_channel_layout_index_from_channel(&s->map_channel_layout,
470  s->mapping[i]);
471  if (idx < 0)
472  continue;
473 
474  s->hrir_map[i] = idx;
475  if (s->type == TIME_DOMAIN) {
476  float *data_ir_l = s->data_ir[0] + idx * s->air_len;
477  float *data_ir_r = s->data_ir[1] + idx * s->air_len;
478 
479  for (j = 0; j < len; j++) {
480  data_ir_l[j] = ptr[len * 2 - j * 2 - 2] * gain_lin;
481  data_ir_r[j] = ptr[len * 2 - j * 2 - 1] * gain_lin;
482  }
483  } else {
484  AVComplexFloat *fft_out_l = s->data_hrtf[0] + idx * n_fft;
485  AVComplexFloat *fft_out_r = s->data_hrtf[1] + idx * n_fft;
486  AVComplexFloat *fft_in_l = s->in_fft[0];
487  AVComplexFloat *fft_in_r = s->in_fft[1];
488 
489  for (j = 0; j < len; j++) {
490  fft_in_l[j].re = ptr[j * 2 ] * gain_lin;
491  fft_in_r[j].re = ptr[j * 2 + 1] * gain_lin;
492  }
493 
494  s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(*fft_in_l));
495  s->tx_fn[0](s->fft[0], fft_out_r, fft_in_r, sizeof(*fft_in_r));
496  }
497  } else {
498  int I, N = ctx->inputs[1]->ch_layout.nb_channels;
499 
500  for (k = 0; k < N / 2; k++) {
501  int idx = av_channel_layout_index_from_channel(&inlink->ch_layout,
502  s->mapping[k]);
503  if (idx < 0)
504  continue;
505 
506  s->hrir_map[k] = idx;
507  I = k * 2;
508  if (s->type == TIME_DOMAIN) {
509  float *data_ir_l = s->data_ir[0] + idx * s->air_len;
510  float *data_ir_r = s->data_ir[1] + idx * s->air_len;
511 
512  for (j = 0; j < len; j++) {
513  data_ir_l[j] = ptr[len * N - j * N - N + I ] * gain_lin;
514  data_ir_r[j] = ptr[len * N - j * N - N + I + 1] * gain_lin;
515  }
516  } else {
517  AVComplexFloat *fft_out_l = s->data_hrtf[0] + idx * n_fft;
518  AVComplexFloat *fft_out_r = s->data_hrtf[1] + idx * n_fft;
519  AVComplexFloat *fft_in_l = s->in_fft[0];
520  AVComplexFloat *fft_in_r = s->in_fft[1];
521 
522  for (j = 0; j < len; j++) {
523  fft_in_l[j].re = ptr[j * N + I ] * gain_lin;
524  fft_in_r[j].re = ptr[j * N + I + 1] * gain_lin;
525  }
526 
527  s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(*fft_in_l));
528  s->tx_fn[0](s->fft[0], fft_out_r, fft_in_r, sizeof(*fft_in_r));
529  }
530  }
531  }
532  }
533 
534  s->have_hrirs = 1;
535 
536 fail:
537  return ret;
538 }
539 
541 {
542  HeadphoneContext *s = ctx->priv;
543  AVFilterLink *inlink = ctx->inputs[0];
544  AVFilterLink *outlink = ctx->outputs[0];
545  AVFrame *in = NULL;
546  int i, ret;
547 
549  if (!s->eof_hrirs) {
550  int eof = 1;
551  for (i = 0; i < s->nb_hrir_inputs; i++) {
552  AVFilterLink *input = ctx->inputs[i + 1];
553 
554  if (s->hrir_in[i].eof)
555  continue;
556 
557  if ((ret = check_ir(input, i)) <= 0)
558  return ret;
559 
560  if (s->hrir_in[i].eof) {
562  av_log(ctx, AV_LOG_ERROR, "No samples provided for "
563  "HRIR stream %d.\n", i);
564  return AVERROR_INVALIDDATA;
565  }
566  } else {
567  eof = 0;
568  }
569  }
570  if (!eof) {
571  ff_filter_set_ready(ctx, 100);
572  return 0;
573  }
574  s->eof_hrirs = 1;
575 
577  if (ret < 0)
578  return ret;
579  } else if (!s->have_hrirs)
580  return AVERROR_EOF;
581 
582  if ((ret = ff_inlink_consume_samples(inlink, s->size, s->size, &in)) > 0) {
583  ret = headphone_frame(s, in, outlink);
584  if (ret < 0)
585  return ret;
586  }
587 
588  if (ret < 0)
589  return ret;
590 
592  if (ff_outlink_frame_wanted(outlink))
594 
595  return 0;
596 }
597 
599 {
600  struct HeadphoneContext *s = ctx->priv;
603  AVFilterChannelLayouts *stereo_layout = NULL;
604  AVFilterChannelLayouts *hrir_layouts = NULL;
605  int ret, i;
606 
608  if (ret)
609  return ret;
611  if (ret)
612  return ret;
613 
615  if (!layouts)
616  return AVERROR(ENOMEM);
617 
618  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts);
619  if (ret)
620  return ret;
621 
623  if (ret)
624  return ret;
625  ret = ff_channel_layouts_ref(stereo_layout, &ctx->outputs[0]->incfg.channel_layouts);
626  if (ret)
627  return ret;
628 
629  if (s->hrir_fmt == HRIR_MULTI) {
630  hrir_layouts = ff_all_channel_counts();
631  if (!hrir_layouts)
632  return AVERROR(ENOMEM);
633  ret = ff_channel_layouts_ref(hrir_layouts, &ctx->inputs[1]->outcfg.channel_layouts);
634  if (ret)
635  return ret;
636  } else {
637  for (i = 1; i <= s->nb_hrir_inputs; i++) {
638  ret = ff_channel_layouts_ref(stereo_layout, &ctx->inputs[i]->outcfg.channel_layouts);
639  if (ret)
640  return ret;
641  }
642  }
643 
645 }
646 
648 {
649  AVFilterContext *ctx = inlink->dst;
650  HeadphoneContext *s = ctx->priv;
651 
652  if (s->nb_irs < inlink->ch_layout.nb_channels) {
653  av_log(ctx, AV_LOG_ERROR, "Number of HRIRs must be >= %d.\n", inlink->ch_layout.nb_channels);
654  return AVERROR(EINVAL);
655  }
656 
657  s->lfe_channel = av_channel_layout_index_from_channel(&inlink->ch_layout,
659  return 0;
660 }
661 
663 {
664  HeadphoneContext *s = ctx->priv;
665  int i, ret;
666 
667  AVFilterPad pad = {
668  .name = "in0",
669  .type = AVMEDIA_TYPE_AUDIO,
670  .config_props = config_input,
671  };
672  if ((ret = ff_append_inpad(ctx, &pad)) < 0)
673  return ret;
674 
675  if (!s->map) {
676  av_log(ctx, AV_LOG_ERROR, "Valid mapping must be set.\n");
677  return AVERROR(EINVAL);
678  }
679 
680  parse_map(ctx);
681 
682  for (i = 0; i < s->nb_hrir_inputs; i++) {
683  char *name = av_asprintf("hrir%d", i);
684  AVFilterPad pad = {
685  .name = name,
686  .type = AVMEDIA_TYPE_AUDIO,
687  };
688  if (!name)
689  return AVERROR(ENOMEM);
690  if ((ret = ff_append_inpad_free_name(ctx, &pad)) < 0)
691  return ret;
692  }
693 
694  if (s->type == TIME_DOMAIN) {
696  if (!fdsp)
697  return AVERROR(ENOMEM);
698  s->scalarproduct_float = fdsp->scalarproduct_float;
699  av_free(fdsp);
700  }
701 
702  return 0;
703 }
704 
705 static int config_output(AVFilterLink *outlink)
706 {
707  AVFilterContext *ctx = outlink->src;
708  HeadphoneContext *s = ctx->priv;
709  AVFilterLink *inlink = ctx->inputs[0];
710 
711  if (s->hrir_fmt == HRIR_MULTI) {
712  AVFilterLink *hrir_link = ctx->inputs[1];
713 
714  if (hrir_link->ch_layout.nb_channels < inlink->ch_layout.nb_channels * 2) {
715  av_log(ctx, AV_LOG_ERROR, "Number of channels in HRIR stream must be >= %d.\n", inlink->ch_layout.nb_channels * 2);
716  return AVERROR(EINVAL);
717  }
718  }
719 
720  s->gain_lfe = expf((s->gain - 3 * inlink->ch_layout.nb_channels + s->lfe_gain) / 20 * M_LN10);
721 
722  return 0;
723 }
724 
726 {
727  HeadphoneContext *s = ctx->priv;
728 
729  av_tx_uninit(&s->ifft[0]);
730  av_tx_uninit(&s->ifft[1]);
731  av_tx_uninit(&s->fft[0]);
732  av_tx_uninit(&s->fft[1]);
733  av_freep(&s->data_ir[0]);
734  av_freep(&s->data_ir[1]);
735  av_freep(&s->ringbuffer[0]);
736  av_freep(&s->ringbuffer[1]);
737  av_freep(&s->temp_src[0]);
738  av_freep(&s->temp_src[1]);
739  av_freep(&s->out_fft[0]);
740  av_freep(&s->out_fft[1]);
741  av_freep(&s->in_fft[0]);
742  av_freep(&s->in_fft[1]);
743  av_freep(&s->temp_afft[0]);
744  av_freep(&s->temp_afft[1]);
745  av_freep(&s->data_hrtf[0]);
746  av_freep(&s->data_hrtf[1]);
747 }
748 
749 #define OFFSET(x) offsetof(HeadphoneContext, x)
750 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
751 
752 static const AVOption headphone_options[] = {
753  { "map", "set channels convolution mappings", OFFSET(map), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
754  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
755  { "lfe", "set lfe gain in dB", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
756  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, .unit = "type" },
757  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, .unit = "type" },
758  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, .unit = "type" },
759  { "size", "set frame size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
760  { "hrir", "set hrir format", OFFSET(hrir_fmt), AV_OPT_TYPE_INT, {.i64=HRIR_STEREO}, 0, 1, .flags = FLAGS, .unit = "hrir" },
761  { "stereo", "hrir files have exactly 2 channels", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_STEREO}, 0, 0, .flags = FLAGS, .unit = "hrir" },
762  { "multich", "single multichannel hrir file", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_MULTI}, 0, 0, .flags = FLAGS, .unit = "hrir" },
763  { NULL }
764 };
765 
766 AVFILTER_DEFINE_CLASS(headphone);
767 
768 static const AVFilterPad outputs[] = {
769  {
770  .name = "default",
771  .type = AVMEDIA_TYPE_AUDIO,
772  .config_props = config_output,
773  },
774 };
775 
777  .name = "headphone",
778  .description = NULL_IF_CONFIG_SMALL("Apply headphone binaural spatialization with HRTFs in additional streams."),
779  .priv_size = sizeof(HeadphoneContext),
780  .priv_class = &headphone_class,
781  .init = init,
782  .uninit = uninit,
783  .activate = activate,
784  .inputs = NULL,
788 };
formats
formats
Definition: signature.h:48
HeadphoneContext::hrir_inputs
Definition: af_headphone.c:83
convert_coeffs
static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: af_headphone.c:373
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:97
FREQUENCY_DOMAIN
#define FREQUENCY_DOMAIN
Definition: af_headphone.c:38
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
HeadphoneContext::gain_lfe
float gain_lfe
Definition: af_headphone.c:62
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
HeadphoneContext::data_ir
float * data_ir[2]
Definition: af_headphone.c:72
ThreadData::out_fft
AVComplexFloat ** out_fft
Definition: af_headphone.c:140
out
FILE * out
Definition: movenc.c:55
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:387
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:674
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:335
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
HeadphoneContext::size
int size
Definition: af_headphone.c:69
HeadphoneContext::temp_afft
AVComplexFloat * temp_afft[2]
Definition: af_headphone.c:76
AVTXContext
Definition: tx_priv.h:235
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
ff_clz
#define ff_clz
Definition: intmath.h:143
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:622
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
TIME_DOMAIN
#define TIME_DOMAIN
Definition: af_headphone.c:37
AVOption
AVOption.
Definition: opt.h:357
HeadphoneContext::ringbuffer
float * ringbuffer[2]
Definition: af_headphone.c:64
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
expf
#define expf(x)
Definition: libm.h:283
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:822
HeadphoneContext::eof_hrirs
int eof_hrirs
Definition: af_headphone.c:52
AVComplexFloat
Definition: tx.h:27
HeadphoneContext::fft
AVTXContext * fft[2]
Definition: af_headphone.c:78
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:527
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
ThreadData::temp_src
float ** temp_src
Definition: af_headphone.c:139
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
HeadphoneContext::data_hrtf
AVComplexFloat * data_hrtf[2]
Definition: af_headphone.c:80
HeadphoneContext::hrir_inputs::ir_len
int ir_len
Definition: af_headphone.c:84
AVComplexFloat::im
float im
Definition: tx.h:28
HeadphoneContext::air_len
int air_len
Definition: af_headphone.c:55
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
ff_append_inpad
int ff_append_inpad(AVFilterContext *f, AVFilterPad *p)
Append a new input/output pad to the filter's list of such pads.
Definition: avfilter.c:127
HeadphoneContext::ir_len
int ir_len
Definition: af_headphone.c:54
fail
#define fail()
Definition: checkasm.h:185
HeadphoneContext::scalarproduct_float
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Definition: af_headphone.c:82
activate
static int activate(AVFilterContext *ctx)
Definition: af_headphone.c:540
HeadphoneContext::hrir_inputs::eof
int eof
Definition: af_headphone.c:85
ThreadData::ringbuffer
float ** ringbuffer
Definition: af_headphone.c:138
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:775
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_headphone.c:647
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_inlink_check_available_samples
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1423
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: af_headphone.c:749
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:868
HRIR_MULTI
#define HRIR_MULTI
Definition: af_headphone.c:41
HRIR_STEREO
#define HRIR_STEREO
Definition: af_headphone.c:40
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
float
float
Definition: af_crystalizer.c:121
AVFloatDSPContext::scalarproduct_float
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Calculate the scalar product of two vectors of floats.
Definition: float_dsp.h:175
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1568
s
#define s(width, name)
Definition: cbs_vp9.c:198
HeadphoneContext::buffer_length
int buffer_length
Definition: af_headphone.c:67
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_channel_layout_from_mask
int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask)
Initialize a native channel layout from a bitmask indicating which channels are present.
Definition: channel_layout.c:247
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
filters.h
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
ctx
AVFormatContext * ctx
Definition: movenc.c:49
HeadphoneContext::lfe_gain
float lfe_gain
Definition: af_headphone.c:62
parse_channel_name
static int parse_channel_name(const char *arg, enum AVChannel *rchannel)
Definition: af_headphone.c:92
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_headphone.c:725
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
headphone_convolute
static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:145
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1462
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: af_headphone.c:750
parse_map
static void parse_map(AVFilterContext *ctx)
Definition: af_headphone.c:102
ff_append_inpad_free_name
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:132
outputs
static const AVFilterPad outputs[]
Definition: af_headphone.c:768
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:505
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
ThreadData::in_fft
AVComplexFloat ** in_fft
Definition: af_headphone.c:141
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_headphone.c:598
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, const AVChannelLayout *channel_layout)
Definition: formats.c:522
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(headphone)
float_dsp.h
headphone_frame
static int headphone_frame(HeadphoneContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_headphone.c:337
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:311
HeadphoneContext::write
int write[2]
Definition: af_headphone.c:65
AV_CHAN_LOW_FREQUENCY
@ AV_CHAN_LOW_FREQUENCY
Definition: channel_layout.h:53
size
int size
Definition: twinvq_data.h:10344
AVComplexFloat::re
float re
Definition: tx.h:28
HeadphoneContext::gain
float gain
Definition: af_headphone.c:61
AVFloatDSPContext
Definition: float_dsp.h:24
HeadphoneContext
Definition: af_headphone.c:43
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
N
#define N
Definition: af_mcompand.c:54
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:613
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
headphone_options
static const AVOption headphone_options[]
Definition: af_headphone.c:752
HeadphoneContext::have_hrirs
int have_hrirs
Definition: af_headphone.c:51
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:248
AVChannel
AVChannel
Definition: channel_layout.h:47
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:454
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
HeadphoneContext::map
char * map
Definition: af_headphone.c:46
HeadphoneContext::tx_fn
av_tx_fn tx_fn[2]
Definition: af_headphone.c:79
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_headphone.c:662
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
HeadphoneContext::hrir_fmt
int hrir_fmt
Definition: af_headphone.c:70
ThreadData::n_clippings
int * n_clippings
Definition: af_headphone.c:137
HeadphoneContext::mapping
enum AVChannel mapping[64]
Definition: af_headphone.c:88
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1417
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
HeadphoneContext::in_fft
AVComplexFloat * in_fft[2]
Definition: af_headphone.c:75
AVFilter
Filter definition.
Definition: avfilter.h:166
HeadphoneContext::hrir_map
uint8_t hrir_map[64]
Definition: af_headphone.c:89
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ThreadData::write
int * write
Definition: af_headphone.c:135
av_channel_from_string
enum AVChannel av_channel_from_string(const char *str)
This is the inverse function of av_channel_name().
Definition: channel_layout.c:150
ThreadData::temp_afft
AVComplexFloat ** temp_afft
Definition: af_headphone.c:142
HeadphoneContext::map_channel_layout
AVChannelLayout map_channel_layout
Definition: af_headphone.c:87
channel_layout.h
HeadphoneContext::nb_hrir_inputs
int nb_hrir_inputs
Definition: af_headphone.c:57
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
av_channel_layout_index_from_channel
int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout, enum AVChannel channel)
Get the index of a given channel in a channel layout.
Definition: channel_layout.c:708
HeadphoneContext::ifft
AVTXContext * ifft[2]
Definition: af_headphone.c:78
HeadphoneContext::itx_fn
av_tx_fn itx_fn[2]
Definition: af_headphone.c:79
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:245
avfilter.h
headphone_fast_convolute
static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:216
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
mem.h
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:49
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
ff_af_headphone
const AVFilter ff_af_headphone
Definition: af_headphone.c:776
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:146
ThreadData::ir
float ** ir
Definition: af_headphone.c:136
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HeadphoneContext::temp_src
float * temp_src[2]
Definition: af_headphone.c:73
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:249
HeadphoneContext::type
int type
Definition: af_headphone.c:47
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
HeadphoneContext::lfe_channel
int lfe_channel
Definition: af_headphone.c:49
HeadphoneContext::hrir_in
struct HeadphoneContext::hrir_inputs hrir_in[64]
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:254
HeadphoneContext::nb_irs
int nb_irs
Definition: af_headphone.c:59
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_headphone.c:705
HeadphoneContext::n_fft
int n_fft
Definition: af_headphone.c:68
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
HeadphoneContext::out_fft
AVComplexFloat * out_fft[2]
Definition: af_headphone.c:74
channel
channel
Definition: ebur128.h:39
read
static uint32_t BS_FUNC() read(BSCTX *bc, unsigned int n)
Return n bits from the buffer, n has to be in the 0-32 range.
Definition: bitstream_template.h:231
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:235
tx.h
check_ir
static int check_ir(AVFilterLink *inlink, int input_number)
Definition: af_headphone.c:309
intmath.h