FFmpeg
af_headphone.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2017 Paul B Mahol
3  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 
23 #include "libavutil/avstring.h"
25 #include "libavutil/float_dsp.h"
26 #include "libavutil/intmath.h"
27 #include "libavutil/opt.h"
28 #include "libavcodec/avfft.h"
29 
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "internal.h"
33 #include "audio.h"
34 
35 #define TIME_DOMAIN 0
36 #define FREQUENCY_DOMAIN 1
37 
38 #define HRIR_STEREO 0
39 #define HRIR_MULTI 1
40 
41 typedef struct HeadphoneContext {
42  const AVClass *class;
43 
44  char *map;
45  int type;
46 
48 
50  int eof_hrirs;
51 
52  int ir_len;
53  int air_len;
54 
55  int mapping[64];
56 
57  int nb_inputs;
58 
59  int nb_irs;
60 
61  float gain;
63 
64  float *ringbuffer[2];
65  int write[2];
66 
68  int n_fft;
69  int size;
70  int hrir_fmt;
71 
72  int *delay[2];
73  float *data_ir[2];
74  float *temp_src[2];
77 
78  FFTContext *fft[2], *ifft[2];
80 
84  int ir_len;
85  int delay_l;
86  int delay_r;
87  int eof;
88  } *in;
90 
91 static int parse_channel_name(char **arg, int *rchannel, char *buf)
92 {
93  int len, i, channel_id = 0;
94  int64_t layout, layout0;
95 
96  if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
97  layout0 = layout = av_get_channel_layout(buf);
98  for (i = 32; i > 0; i >>= 1) {
99  if (layout >= 1LL << i) {
100  channel_id += i;
101  layout >>= i;
102  }
103  }
104  if (channel_id >= 64 || layout0 != 1LL << channel_id)
105  return AVERROR(EINVAL);
106  *rchannel = channel_id;
107  *arg += len;
108  return 0;
109  }
110  return AVERROR(EINVAL);
111 }
112 
114 {
115  HeadphoneContext *s = ctx->priv;
116  char *arg, *tokenizer, *p, *args = av_strdup(s->map);
117  uint64_t used_channels = 0;
118  int i;
119 
120  if (!args)
121  return;
122  p = args;
123 
124  s->lfe_channel = -1;
125  s->nb_inputs = 1;
126 
127  for (i = 0; i < 64; i++) {
128  s->mapping[i] = -1;
129  }
130 
131  while ((arg = av_strtok(p, "|", &tokenizer))) {
132  int out_ch_id;
133  char buf[8];
134 
135  p = NULL;
136  if (parse_channel_name(&arg, &out_ch_id, buf)) {
137  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", arg);
138  continue;
139  }
140  if (used_channels & (1ULL << out_ch_id)) {
141  av_log(ctx, AV_LOG_WARNING, "Ignoring duplicate channel '%s'.\n", buf);
142  continue;
143  }
144  used_channels |= 1ULL << out_ch_id;
145  if (out_ch_id == av_log2(AV_CH_LOW_FREQUENCY))
146  s->lfe_channel = s->nb_irs;
147  s->mapping[s->nb_irs] = out_ch_id;
148  s->nb_irs++;
149  }
150 
151  if (s->hrir_fmt == HRIR_MULTI)
152  s->nb_inputs = 2;
153  else
154  s->nb_inputs = s->nb_irs + 1;
155 
156  av_free(args);
157 }
158 
159 typedef struct ThreadData {
160  AVFrame *in, *out;
161  int *write;
162  int **delay;
163  float **ir;
165  float **ringbuffer;
166  float **temp_src;
169 } ThreadData;
170 
171 static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
172 {
173  HeadphoneContext *s = ctx->priv;
174  ThreadData *td = arg;
175  AVFrame *in = td->in, *out = td->out;
176  int offset = jobnr;
177  int *write = &td->write[jobnr];
178  const int *const delay = td->delay[jobnr];
179  const float *const ir = td->ir[jobnr];
180  int *n_clippings = &td->n_clippings[jobnr];
181  float *ringbuffer = td->ringbuffer[jobnr];
182  float *temp_src = td->temp_src[jobnr];
183  const int ir_len = s->ir_len;
184  const int air_len = s->air_len;
185  const float *src = (const float *)in->data[0];
186  float *dst = (float *)out->data[0];
187  const int in_channels = in->channels;
188  const int buffer_length = s->buffer_length;
189  const uint32_t modulo = (uint32_t)buffer_length - 1;
190  float *buffer[64];
191  int wr = *write;
192  int read;
193  int i, l;
194 
195  dst += offset;
196  for (l = 0; l < in_channels; l++) {
197  buffer[l] = ringbuffer + l * buffer_length;
198  }
199 
200  for (i = 0; i < in->nb_samples; i++) {
201  const float *temp_ir = ir;
202 
203  *dst = 0;
204  for (l = 0; l < in_channels; l++) {
205  *(buffer[l] + wr) = src[l];
206  }
207 
208  for (l = 0; l < in_channels; l++) {
209  const float *const bptr = buffer[l];
210 
211  if (l == s->lfe_channel) {
212  *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
213  temp_ir += air_len;
214  continue;
215  }
216 
217  read = (wr - *(delay + l) - (ir_len - 1) + buffer_length) & modulo;
218 
219  if (read + ir_len < buffer_length) {
220  memcpy(temp_src, bptr + read, ir_len * sizeof(*temp_src));
221  } else {
222  int len = FFMIN(air_len - (read % ir_len), buffer_length - read);
223 
224  memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
225  memcpy(temp_src + len, bptr, (air_len - len) * sizeof(*temp_src));
226  }
227 
228  dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, FFALIGN(ir_len, 32));
229  temp_ir += air_len;
230  }
231 
232  if (fabsf(dst[0]) > 1)
233  n_clippings[0]++;
234 
235  dst += 2;
236  src += in_channels;
237  wr = (wr + 1) & modulo;
238  }
239 
240  *write = wr;
241 
242  return 0;
243 }
244 
245 static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
246 {
247  HeadphoneContext *s = ctx->priv;
248  ThreadData *td = arg;
249  AVFrame *in = td->in, *out = td->out;
250  int offset = jobnr;
251  int *write = &td->write[jobnr];
252  FFTComplex *hrtf = s->data_hrtf[jobnr];
253  int *n_clippings = &td->n_clippings[jobnr];
254  float *ringbuffer = td->ringbuffer[jobnr];
255  const int ir_len = s->ir_len;
256  const float *src = (const float *)in->data[0];
257  float *dst = (float *)out->data[0];
258  const int in_channels = in->channels;
259  const int buffer_length = s->buffer_length;
260  const uint32_t modulo = (uint32_t)buffer_length - 1;
261  FFTComplex *fft_in = s->temp_fft[jobnr];
262  FFTComplex *fft_acc = s->temp_afft[jobnr];
263  FFTContext *ifft = s->ifft[jobnr];
264  FFTContext *fft = s->fft[jobnr];
265  const int n_fft = s->n_fft;
266  const float fft_scale = 1.0f / s->n_fft;
267  FFTComplex *hrtf_offset;
268  int wr = *write;
269  int n_read;
270  int i, j;
271 
272  dst += offset;
273 
274  n_read = FFMIN(ir_len, in->nb_samples);
275  for (j = 0; j < n_read; j++) {
276  dst[2 * j] = ringbuffer[wr];
277  ringbuffer[wr] = 0.0;
278  wr = (wr + 1) & modulo;
279  }
280 
281  for (j = n_read; j < in->nb_samples; j++) {
282  dst[2 * j] = 0;
283  }
284 
285  memset(fft_acc, 0, sizeof(FFTComplex) * n_fft);
286 
287  for (i = 0; i < in_channels; i++) {
288  if (i == s->lfe_channel) {
289  for (j = 0; j < in->nb_samples; j++) {
290  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
291  }
292  continue;
293  }
294 
295  offset = i * n_fft;
296  hrtf_offset = hrtf + offset;
297 
298  memset(fft_in, 0, sizeof(FFTComplex) * n_fft);
299 
300  for (j = 0; j < in->nb_samples; j++) {
301  fft_in[j].re = src[j * in_channels + i];
302  }
303 
304  av_fft_permute(fft, fft_in);
305  av_fft_calc(fft, fft_in);
306  for (j = 0; j < n_fft; j++) {
307  const FFTComplex *hcomplex = hrtf_offset + j;
308  const float re = fft_in[j].re;
309  const float im = fft_in[j].im;
310 
311  fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
312  fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
313  }
314  }
315 
316  av_fft_permute(ifft, fft_acc);
317  av_fft_calc(ifft, fft_acc);
318 
319  for (j = 0; j < in->nb_samples; j++) {
320  dst[2 * j] += fft_acc[j].re * fft_scale;
321  }
322 
323  for (j = 0; j < ir_len - 1; j++) {
324  int write_pos = (wr + j) & modulo;
325 
326  *(ringbuffer + write_pos) += fft_acc[in->nb_samples + j].re * fft_scale;
327  }
328 
329  for (i = 0; i < out->nb_samples; i++) {
330  if (fabsf(dst[0]) > 1) {
331  n_clippings[0]++;
332  }
333 
334  dst += 2;
335  }
336 
337  *write = wr;
338 
339  return 0;
340 }
341 
342 static int check_ir(AVFilterLink *inlink, int input_number)
343 {
344  AVFilterContext *ctx = inlink->dst;
345  HeadphoneContext *s = ctx->priv;
346  int ir_len, max_ir_len;
347 
349  max_ir_len = 65536;
350  if (ir_len > max_ir_len) {
351  av_log(ctx, AV_LOG_ERROR, "Too big length of IRs: %d > %d.\n", ir_len, max_ir_len);
352  return AVERROR(EINVAL);
353  }
354  s->in[input_number].ir_len = ir_len;
355  s->ir_len = FFMAX(ir_len, s->ir_len);
356 
357  return 0;
358 }
359 
361 {
362  AVFilterContext *ctx = outlink->src;
363  int n_clippings[2] = { 0 };
364  ThreadData td;
365  AVFrame *out;
366 
367  out = ff_get_audio_buffer(outlink, in->nb_samples);
368  if (!out) {
369  av_frame_free(&in);
370  return AVERROR(ENOMEM);
371  }
372  out->pts = in->pts;
373 
374  td.in = in; td.out = out; td.write = s->write;
375  td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
376  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
377  td.temp_fft = s->temp_fft;
378  td.temp_afft = s->temp_afft;
379 
380  if (s->type == TIME_DOMAIN) {
381  ctx->internal->execute(ctx, headphone_convolute, &td, NULL, 2);
382  } else {
383  ctx->internal->execute(ctx, headphone_fast_convolute, &td, NULL, 2);
384  }
385  emms_c();
386 
387  if (n_clippings[0] + n_clippings[1] > 0) {
388  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
389  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
390  }
391 
392  av_frame_free(&in);
393  return ff_filter_frame(outlink, out);
394 }
395 
397 {
398  struct HeadphoneContext *s = ctx->priv;
399  const int ir_len = s->ir_len;
400  int nb_irs = s->nb_irs;
401  int nb_input_channels = ctx->inputs[0]->channels;
402  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10);
403  FFTComplex *data_hrtf_l = NULL;
404  FFTComplex *data_hrtf_r = NULL;
405  FFTComplex *fft_in_l = NULL;
406  FFTComplex *fft_in_r = NULL;
407  float *data_ir_l = NULL;
408  float *data_ir_r = NULL;
409  int offset = 0, ret = 0;
410  int n_fft;
411  int i, j, k;
412 
413  s->air_len = 1 << (32 - ff_clz(ir_len));
414  if (s->type == TIME_DOMAIN) {
415  s->air_len = FFALIGN(s->air_len, 32);
416  }
417  s->buffer_length = 1 << (32 - ff_clz(s->air_len));
418  s->n_fft = n_fft = 1 << (32 - ff_clz(ir_len + s->size));
419 
420  if (s->type == FREQUENCY_DOMAIN) {
421  fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
422  fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
423  if (!fft_in_l || !fft_in_r) {
424  ret = AVERROR(ENOMEM);
425  goto fail;
426  }
427 
428  av_fft_end(s->fft[0]);
429  av_fft_end(s->fft[1]);
430  s->fft[0] = av_fft_init(av_log2(s->n_fft), 0);
431  s->fft[1] = av_fft_init(av_log2(s->n_fft), 0);
432  av_fft_end(s->ifft[0]);
433  av_fft_end(s->ifft[1]);
434  s->ifft[0] = av_fft_init(av_log2(s->n_fft), 1);
435  s->ifft[1] = av_fft_init(av_log2(s->n_fft), 1);
436 
437  if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
438  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
439  ret = AVERROR(ENOMEM);
440  goto fail;
441  }
442  }
443 
444  s->data_ir[0] = av_calloc(s->air_len, sizeof(float) * s->nb_irs);
445  s->data_ir[1] = av_calloc(s->air_len, sizeof(float) * s->nb_irs);
446  s->delay[0] = av_calloc(s->nb_irs, sizeof(float));
447  s->delay[1] = av_calloc(s->nb_irs, sizeof(float));
448 
449  if (s->type == TIME_DOMAIN) {
450  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
451  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
452  } else {
453  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
454  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
455  s->temp_fft[0] = av_calloc(s->n_fft, sizeof(FFTComplex));
456  s->temp_fft[1] = av_calloc(s->n_fft, sizeof(FFTComplex));
457  s->temp_afft[0] = av_calloc(s->n_fft, sizeof(FFTComplex));
458  s->temp_afft[1] = av_calloc(s->n_fft, sizeof(FFTComplex));
459  if (!s->temp_fft[0] || !s->temp_fft[1] ||
460  !s->temp_afft[0] || !s->temp_afft[1]) {
461  ret = AVERROR(ENOMEM);
462  goto fail;
463  }
464  }
465 
466  if (!s->data_ir[0] || !s->data_ir[1] ||
467  !s->ringbuffer[0] || !s->ringbuffer[1]) {
468  ret = AVERROR(ENOMEM);
469  goto fail;
470  }
471 
472  if (s->type == TIME_DOMAIN) {
473  s->temp_src[0] = av_calloc(s->air_len, sizeof(float));
474  s->temp_src[1] = av_calloc(s->air_len, sizeof(float));
475 
476  data_ir_l = av_calloc(nb_irs * s->air_len, sizeof(*data_ir_l));
477  data_ir_r = av_calloc(nb_irs * s->air_len, sizeof(*data_ir_r));
478  if (!data_ir_r || !data_ir_l || !s->temp_src[0] || !s->temp_src[1]) {
479  ret = AVERROR(ENOMEM);
480  goto fail;
481  }
482  } else {
483  data_hrtf_l = av_calloc(n_fft, sizeof(*data_hrtf_l) * nb_irs);
484  data_hrtf_r = av_calloc(n_fft, sizeof(*data_hrtf_r) * nb_irs);
485  if (!data_hrtf_r || !data_hrtf_l) {
486  ret = AVERROR(ENOMEM);
487  goto fail;
488  }
489  }
490 
491  for (i = 0; i < s->nb_inputs - 1; i++) {
492  int len = s->in[i + 1].ir_len;
493  int delay_l = s->in[i + 1].delay_l;
494  int delay_r = s->in[i + 1].delay_r;
495  float *ptr;
496 
497  ret = ff_inlink_consume_samples(ctx->inputs[i + 1], len, len, &s->in[i + 1].frame);
498  if (ret < 0)
499  goto fail;
500  ptr = (float *)s->in[i + 1].frame->extended_data[0];
501 
502  if (s->hrir_fmt == HRIR_STEREO) {
503  int idx = -1;
504 
505  for (j = 0; j < inlink->channels; j++) {
506  if (s->mapping[i] < 0) {
507  continue;
508  }
509 
510  if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[i])) {
511  idx = i;
512  break;
513  }
514  }
515 
516  if (idx == -1)
517  continue;
518  if (s->type == TIME_DOMAIN) {
519  offset = idx * s->air_len;
520  for (j = 0; j < len; j++) {
521  data_ir_l[offset + j] = ptr[len * 2 - j * 2 - 2] * gain_lin;
522  data_ir_r[offset + j] = ptr[len * 2 - j * 2 - 1] * gain_lin;
523  }
524  } else {
525  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
526  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
527 
528  offset = idx * n_fft;
529  for (j = 0; j < len; j++) {
530  fft_in_l[delay_l + j].re = ptr[j * 2 ] * gain_lin;
531  fft_in_r[delay_r + j].re = ptr[j * 2 + 1] * gain_lin;
532  }
533 
534  av_fft_permute(s->fft[0], fft_in_l);
535  av_fft_calc(s->fft[0], fft_in_l);
536  memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
537  av_fft_permute(s->fft[0], fft_in_r);
538  av_fft_calc(s->fft[0], fft_in_r);
539  memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
540  }
541  } else {
542  int I, N = ctx->inputs[1]->channels;
543 
544  for (k = 0; k < N / 2; k++) {
545  int idx = -1;
546 
547  for (j = 0; j < inlink->channels; j++) {
548  if (s->mapping[k] < 0) {
549  continue;
550  }
551 
552  if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[k])) {
553  idx = k;
554  break;
555  }
556  }
557  if (idx == -1)
558  continue;
559 
560  I = idx * 2;
561  if (s->type == TIME_DOMAIN) {
562  offset = idx * s->air_len;
563  for (j = 0; j < len; j++) {
564  data_ir_l[offset + j] = ptr[len * N - j * N - N + I ] * gain_lin;
565  data_ir_r[offset + j] = ptr[len * N - j * N - N + I + 1] * gain_lin;
566  }
567  } else {
568  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
569  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
570 
571  offset = idx * n_fft;
572  for (j = 0; j < len; j++) {
573  fft_in_l[delay_l + j].re = ptr[j * N + I ] * gain_lin;
574  fft_in_r[delay_r + j].re = ptr[j * N + I + 1] * gain_lin;
575  }
576 
577  av_fft_permute(s->fft[0], fft_in_l);
578  av_fft_calc(s->fft[0], fft_in_l);
579  memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
580  av_fft_permute(s->fft[0], fft_in_r);
581  av_fft_calc(s->fft[0], fft_in_r);
582  memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
583  }
584  }
585  }
586 
587  av_frame_free(&s->in[i + 1].frame);
588  }
589 
590  if (s->type == TIME_DOMAIN) {
591  memcpy(s->data_ir[0], data_ir_l, sizeof(float) * nb_irs * s->air_len);
592  memcpy(s->data_ir[1], data_ir_r, sizeof(float) * nb_irs * s->air_len);
593  } else {
594  s->data_hrtf[0] = av_calloc(n_fft * s->nb_irs, sizeof(FFTComplex));
595  s->data_hrtf[1] = av_calloc(n_fft * s->nb_irs, sizeof(FFTComplex));
596  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
597  ret = AVERROR(ENOMEM);
598  goto fail;
599  }
600 
601  memcpy(s->data_hrtf[0], data_hrtf_l,
602  sizeof(FFTComplex) * nb_irs * n_fft);
603  memcpy(s->data_hrtf[1], data_hrtf_r,
604  sizeof(FFTComplex) * nb_irs * n_fft);
605  }
606 
607  s->have_hrirs = 1;
608 
609 fail:
610 
611  for (i = 0; i < s->nb_inputs - 1; i++)
612  av_frame_free(&s->in[i + 1].frame);
613 
614  av_freep(&data_ir_l);
615  av_freep(&data_ir_r);
616 
617  av_freep(&data_hrtf_l);
618  av_freep(&data_hrtf_r);
619 
620  av_freep(&fft_in_l);
621  av_freep(&fft_in_r);
622 
623  return ret;
624 }
625 
627 {
628  HeadphoneContext *s = ctx->priv;
629  AVFilterLink *inlink = ctx->inputs[0];
630  AVFilterLink *outlink = ctx->outputs[0];
631  AVFrame *in = NULL;
632  int i, ret;
633 
635  if (!s->eof_hrirs) {
636  for (i = 1; i < s->nb_inputs; i++) {
637  if (s->in[i].eof)
638  continue;
639 
640  if ((ret = check_ir(ctx->inputs[i], i)) < 0)
641  return ret;
642 
643  if (ff_outlink_get_status(ctx->inputs[i]) == AVERROR_EOF) {
644  if (!ff_inlink_queued_samples(ctx->inputs[i])) {
645  av_log(ctx, AV_LOG_ERROR, "No samples provided for "
646  "HRIR stream %d.\n", i - 1);
647  return AVERROR_INVALIDDATA;
648  }
649  s->in[i].eof = 1;
650  }
651  }
652 
653  for (i = 1; i < s->nb_inputs; i++) {
654  if (!s->in[i].eof)
655  break;
656  }
657 
658  if (i != s->nb_inputs) {
659  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
660  for (i = 1; i < s->nb_inputs; i++) {
661  if (!s->in[i].eof)
662  ff_inlink_request_frame(ctx->inputs[i]);
663  }
664  }
665 
666  return 0;
667  } else {
668  s->eof_hrirs = 1;
669  }
670  }
671 
672  if (!s->have_hrirs && s->eof_hrirs) {
674  if (ret < 0)
675  return ret;
676  }
677 
678  if ((ret = ff_inlink_consume_samples(ctx->inputs[0], s->size, s->size, &in)) > 0) {
679  ret = headphone_frame(s, in, outlink);
680  if (ret < 0)
681  return ret;
682  }
683 
684  if (ret < 0)
685  return ret;
686 
687  FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]);
688  if (ff_outlink_frame_wanted(ctx->outputs[0]))
689  ff_inlink_request_frame(ctx->inputs[0]);
690 
691  return 0;
692 }
693 
695 {
696  struct HeadphoneContext *s = ctx->priv;
699  AVFilterChannelLayouts *stereo_layout = NULL;
700  AVFilterChannelLayouts *hrir_layouts = NULL;
701  int ret, i;
702 
704  if (ret)
705  return ret;
707  if (ret)
708  return ret;
709 
711  if (!layouts)
712  return AVERROR(ENOMEM);
713 
714  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
715  if (ret)
716  return ret;
717 
718  ret = ff_add_channel_layout(&stereo_layout, AV_CH_LAYOUT_STEREO);
719  if (ret)
720  return ret;
721  ret = ff_channel_layouts_ref(stereo_layout, &ctx->outputs[0]->in_channel_layouts);
722  if (ret)
723  return ret;
724 
725  if (s->hrir_fmt == HRIR_MULTI) {
726  hrir_layouts = ff_all_channel_counts();
727  if (!hrir_layouts)
728  return AVERROR(ENOMEM);
729  ret = ff_channel_layouts_ref(hrir_layouts, &ctx->inputs[1]->out_channel_layouts);
730  if (ret)
731  return ret;
732  } else {
733  for (i = 1; i < s->nb_inputs; i++) {
734  ret = ff_channel_layouts_ref(stereo_layout, &ctx->inputs[i]->out_channel_layouts);
735  if (ret)
736  return ret;
737  }
738  }
739 
741  if (!formats)
742  return AVERROR(ENOMEM);
744 }
745 
747 {
748  AVFilterContext *ctx = inlink->dst;
749  HeadphoneContext *s = ctx->priv;
750 
751  if (s->nb_irs < inlink->channels) {
752  av_log(ctx, AV_LOG_ERROR, "Number of HRIRs must be >= %d.\n", inlink->channels);
753  return AVERROR(EINVAL);
754  }
755 
756  return 0;
757 }
758 
760 {
761  HeadphoneContext *s = ctx->priv;
762  int i, ret;
763 
764  AVFilterPad pad = {
765  .name = "in0",
766  .type = AVMEDIA_TYPE_AUDIO,
767  .config_props = config_input,
768  };
769  if ((ret = ff_insert_inpad(ctx, 0, &pad)) < 0)
770  return ret;
771 
772  if (!s->map) {
773  av_log(ctx, AV_LOG_ERROR, "Valid mapping must be set.\n");
774  return AVERROR(EINVAL);
775  }
776 
777  parse_map(ctx);
778 
779  s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
780  if (!s->in)
781  return AVERROR(ENOMEM);
782 
783  for (i = 1; i < s->nb_inputs; i++) {
784  char *name = av_asprintf("hrir%d", i - 1);
785  AVFilterPad pad = {
786  .name = name,
787  .type = AVMEDIA_TYPE_AUDIO,
788  };
789  if (!name)
790  return AVERROR(ENOMEM);
791  if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
792  av_freep(&pad.name);
793  return ret;
794  }
795  }
796 
797  s->fdsp = avpriv_float_dsp_alloc(0);
798  if (!s->fdsp)
799  return AVERROR(ENOMEM);
800 
801  return 0;
802 }
803 
804 static int config_output(AVFilterLink *outlink)
805 {
806  AVFilterContext *ctx = outlink->src;
807  HeadphoneContext *s = ctx->priv;
808  AVFilterLink *inlink = ctx->inputs[0];
809 
810  if (s->hrir_fmt == HRIR_MULTI) {
811  AVFilterLink *hrir_link = ctx->inputs[1];
812 
813  if (hrir_link->channels < inlink->channels * 2) {
814  av_log(ctx, AV_LOG_ERROR, "Number of channels in HRIR stream must be >= %d.\n", inlink->channels * 2);
815  return AVERROR(EINVAL);
816  }
817  }
818 
819  s->gain_lfe = expf((s->gain - 3 * inlink->channels + s->lfe_gain) / 20 * M_LN10);
820 
821  return 0;
822 }
823 
825 {
826  HeadphoneContext *s = ctx->priv;
827 
828  av_fft_end(s->ifft[0]);
829  av_fft_end(s->ifft[1]);
830  av_fft_end(s->fft[0]);
831  av_fft_end(s->fft[1]);
832  av_freep(&s->delay[0]);
833  av_freep(&s->delay[1]);
834  av_freep(&s->data_ir[0]);
835  av_freep(&s->data_ir[1]);
836  av_freep(&s->ringbuffer[0]);
837  av_freep(&s->ringbuffer[1]);
838  av_freep(&s->temp_src[0]);
839  av_freep(&s->temp_src[1]);
840  av_freep(&s->temp_fft[0]);
841  av_freep(&s->temp_fft[1]);
842  av_freep(&s->temp_afft[0]);
843  av_freep(&s->temp_afft[1]);
844  av_freep(&s->data_hrtf[0]);
845  av_freep(&s->data_hrtf[1]);
846  av_freep(&s->fdsp);
847 
848  av_freep(&s->in);
849  for (unsigned i = 1; i < ctx->nb_inputs; i++)
850  av_freep(&ctx->input_pads[i].name);
851 }
852 
853 #define OFFSET(x) offsetof(HeadphoneContext, x)
854 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
855 
856 static const AVOption headphone_options[] = {
857  { "map", "set channels convolution mappings", OFFSET(map), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
858  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
859  { "lfe", "set lfe gain in dB", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
860  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
861  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
862  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
863  { "size", "set frame size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
864  { "hrir", "set hrir format", OFFSET(hrir_fmt), AV_OPT_TYPE_INT, {.i64=HRIR_STEREO}, 0, 1, .flags = FLAGS, "hrir" },
865  { "stereo", "hrir files have exactly 2 channels", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_STEREO}, 0, 0, .flags = FLAGS, "hrir" },
866  { "multich", "single multichannel hrir file", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_MULTI}, 0, 0, .flags = FLAGS, "hrir" },
867  { NULL }
868 };
869 
870 AVFILTER_DEFINE_CLASS(headphone);
871 
872 static const AVFilterPad outputs[] = {
873  {
874  .name = "default",
875  .type = AVMEDIA_TYPE_AUDIO,
876  .config_props = config_output,
877  },
878  { NULL }
879 };
880 
882  .name = "headphone",
883  .description = NULL_IF_CONFIG_SMALL("Apply headphone binaural spatialization with HRTFs in additional streams."),
884  .priv_size = sizeof(HeadphoneContext),
885  .priv_class = &headphone_class,
886  .init = init,
887  .uninit = uninit,
889  .activate = activate,
890  .inputs = NULL,
891  .outputs = outputs,
893 };
formats
formats
Definition: signature.h:48
convert_coeffs
static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: af_headphone.c:396
av_fft_end
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
FREQUENCY_DOMAIN
#define FREQUENCY_DOMAIN
Definition: af_headphone.c:36
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
td
#define td
Definition: regdef.h:70
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
HeadphoneContext::gain_lfe
float gain_lfe
Definition: af_headphone.c:62
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
HeadphoneContext::data_ir
float * data_ir[2]
Definition: af_headphone.c:73
HeadphoneContext::headphone_inputs::eof
int eof
Definition: af_headphone.c:87
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:479
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
HeadphoneContext::size
int size
Definition: af_headphone.c:69
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
parse_channel_name
static int parse_channel_name(char **arg, int *rchannel, char *buf)
Definition: af_headphone.c:91
ff_clz
#define ff_clz
Definition: intmath.h:142
ThreadData::delay
int ** delay
Definition: af_headphone.c:162
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:454
AudioConvert::channels
int channels
Definition: audio_convert.c:54
im
float im
Definition: fft.c:82
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
TIME_DOMAIN
#define TIME_DOMAIN
Definition: af_headphone.c:35
AVOption
AVOption.
Definition: opt.h:246
HeadphoneContext::ringbuffer
float * ringbuffer[2]
Definition: af_headphone.c:64
expf
#define expf(x)
Definition: libm.h:283
av_fft_permute
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
HeadphoneContext::eof_hrirs
int eof_hrirs
Definition: af_headphone.c:50
HeadphoneContext::headphone_inputs::delay_r
int delay_r
Definition: af_headphone.c:86
av_get_channel_layout
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
Definition: channel_layout.c:139
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
HeadphoneContext::ifft
FFTContext * ifft[2]
Definition: af_headphone.c:78
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:494
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1788
ThreadData::temp_src
float ** temp_src
Definition: af_headphone.c:166
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
ff_insert_inpad
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:266
HeadphoneContext::air_len
int air_len
Definition: af_headphone.c:53
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
HeadphoneContext::ir_len
int ir_len
Definition: af_headphone.c:52
fail
#define fail()
Definition: checkasm.h:123
activate
static int activate(AVFilterContext *ctx)
Definition: af_headphone.c:626
HeadphoneContext::headphone_inputs::delay_l
int delay_l
Definition: af_headphone.c:85
ThreadData::ringbuffer
float ** ringbuffer
Definition: af_headphone.c:165
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:86
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_headphone.c:746
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: af_headphone.c:853
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:605
HRIR_MULTI
#define HRIR_MULTI
Definition: af_headphone.c:39
HRIR_STEREO
#define HRIR_STEREO
Definition: af_headphone.c:38
AV_CH_LOW_FREQUENCY
#define AV_CH_LOW_FREQUENCY
Definition: channel_layout.h:52
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:356
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1602
s
#define s(width, name)
Definition: cbs_vp9.c:257
HeadphoneContext::data_hrtf
FFTComplex * data_hrtf[2]
Definition: af_headphone.c:79
HeadphoneContext::buffer_length
int buffer_length
Definition: af_headphone.c:67
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:184
filters.h
ctx
AVFormatContext * ctx
Definition: movenc.c:48
HeadphoneContext::lfe_gain
float lfe_gain
Definition: af_headphone.c:62
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_headphone.c:824
arg
const char * arg
Definition: jacosubdec.c:66
if
if(ret)
Definition: filter_design.txt:179
headphone_convolute
static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:171
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1495
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: af_headphone.c:854
ThreadData::temp_fft
FFTComplex ** temp_fft
Definition: af_headphone.c:167
parse_map
static void parse_map(AVFilterContext *ctx)
Definition: af_headphone.c:113
outputs
static const AVFilterPad outputs[]
Definition: af_headphone.c:872
src
#define src
Definition: vp8dsp.c:254
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:350
avfft.h
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_headphone.c:694
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(headphone)
float_dsp.h
headphone_frame
static int headphone_frame(HeadphoneContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_headphone.c:360
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
HeadphoneContext::in
struct HeadphoneContext::headphone_inputs * in
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
HeadphoneContext::write
int write[2]
Definition: af_headphone.c:65
size
int size
Definition: twinvq_data.h:11134
HeadphoneContext::headphone_inputs::frame
AVFrame * frame
Definition: af_headphone.c:83
ff_af_headphone
AVFilter ff_af_headphone
Definition: af_headphone.c:881
HeadphoneContext::gain
float gain
Definition: af_headphone.c:61
FFTComplex::im
FFTSample im
Definition: avfft.h:38
AVFloatDSPContext
Definition: float_dsp.h:24
FFTComplex::re
FFTSample re
Definition: avfft.h:38
HeadphoneContext
Definition: af_headphone.c:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
HeadphoneContext::delay
int * delay[2]
Definition: af_headphone.c:72
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
N
#define N
Definition: af_mcompand.c:54
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:445
headphone_options
static const AVOption headphone_options[]
Definition: af_headphone.c:856
HeadphoneContext::nb_inputs
int nb_inputs
Definition: af_headphone.c:57
HeadphoneContext::have_hrirs
int have_hrirs
Definition: af_headphone.c:49
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:226
HeadphoneContext::headphone_inputs::ir_len
int ir_len
Definition: af_headphone.c:84
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
HeadphoneContext::temp_fft
FFTComplex * temp_fft[2]
Definition: af_headphone.c:75
FFTContext
Definition: fft.h:88
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
HeadphoneContext::map
char * map
Definition: af_headphone.c:44
av_channel_layout_extract_channel
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
Get the channel with the given index in channel_layout.
Definition: channel_layout.c:265
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_headphone.c:759
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
HeadphoneContext::hrir_fmt
int hrir_fmt
Definition: af_headphone.c:70
ThreadData::n_clippings
int * n_clippings
Definition: af_headphone.c:164
len
int len
Definition: vorbis_enc_data.h:452
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1456
AVFilter
Filter definition.
Definition: avfilter.h:144
ret
ret
Definition: filter_design.txt:187
HeadphoneContext::mapping
int mapping[64]
Definition: af_headphone.c:55
HeadphoneContext::headphone_inputs
Definition: af_headphone.c:82
ThreadData::write
int * write
Definition: af_headphone.c:161
av_fft_init
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:439
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
channel_layout.h
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
headphone_fast_convolute
static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:245
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1625
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:43
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1083
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:85
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
HeadphoneContext::temp_afft
FFTComplex * temp_afft[2]
Definition: af_headphone.c:76
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
HeadphoneContext::fdsp
AVFloatDSPContext * fdsp
Definition: af_headphone.c:81
ThreadData::ir
float ** ir
Definition: af_headphone.c:163
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:593
HeadphoneContext::temp_src
float * temp_src[2]
Definition: af_headphone.c:74
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
HeadphoneContext::type
int type
Definition: af_headphone.c:45
HeadphoneContext::lfe_channel
int lfe_channel
Definition: af_headphone.c:47
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
av_fft_calc
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
HeadphoneContext::nb_irs
int nb_irs
Definition: af_headphone.c:59
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
HeadphoneContext::fft
FFTContext * fft[2]
Definition: af_headphone.c:78
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_headphone.c:804
HeadphoneContext::n_fft
int n_fft
Definition: af_headphone.c:68
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
FFTComplex
Definition: avfft.h:37
re
float re
Definition: fft.c:82
check_ir
static int check_ir(AVFilterLink *inlink, int input_number)
Definition: af_headphone.c:342
ThreadData::temp_afft
FFTComplex ** temp_afft
Definition: af_headphone.c:168
intmath.h