FFmpeg
af_sofalizer.c
Go to the documentation of this file.
1 /*****************************************************************************
2  * sofalizer.c : SOFAlizer filter for virtual binaural acoustics
3  *****************************************************************************
4  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda,
5  * Acoustics Research Institute (ARI), Vienna, Austria
6  *
7  * Authors: Andreas Fuchs <andi.fuchs.mail@gmail.com>
8  * Wolfgang Hrauda <wolfgang.hrauda@gmx.at>
9  *
10  * SOFAlizer project coordinator at ARI, main developer of SOFA:
11  * Piotr Majdak <piotr@majdak.at>
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU Lesser General Public License as published by
15  * the Free Software Foundation; either version 2.1 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public License
24  * along with this program; if not, write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
26  *****************************************************************************/
27 
28 #include <math.h>
29 #include <mysofa.h>
30 
31 #include "libavutil/mem.h"
32 #include "libavutil/tx.h"
33 #include "libavutil/avstring.h"
35 #include "libavutil/float_dsp.h"
36 #include "libavutil/intmath.h"
37 #include "libavutil/opt.h"
38 #include "avfilter.h"
39 #include "filters.h"
40 #include "formats.h"
41 #include "internal.h"
42 #include "audio.h"
43 
44 #define TIME_DOMAIN 0
45 #define FREQUENCY_DOMAIN 1
46 
47 typedef struct MySofa { /* contains data of one SOFA file */
48  struct MYSOFA_HRTF *hrtf;
49  struct MYSOFA_LOOKUP *lookup;
50  struct MYSOFA_NEIGHBORHOOD *neighborhood;
51  int ir_samples; /* length of one impulse response (IR) */
52  int n_samples; /* ir_samples to next power of 2 */
53  float *lir, *rir; /* IRs (time-domain) */
54  float *fir;
55  int max_delay;
56 } MySofa;
57 
58 typedef struct VirtualSpeaker {
59  uint8_t set;
60  float azim;
61  float elev;
63 
64 typedef struct SOFAlizerContext {
65  const AVClass *class;
66 
67  char *filename; /* name of SOFA file */
68  MySofa sofa; /* contains data of the SOFA file */
69 
70  int sample_rate; /* sample rate from SOFA file */
71  float *speaker_azim; /* azimuth of the virtual loudspeakers */
72  float *speaker_elev; /* elevation of the virtual loudspeakers */
73  char *speakers_pos; /* custom positions of the virtual loudspeakers */
74  float lfe_gain; /* initial gain for the LFE channel */
75  float gain_lfe; /* gain applied to LFE channel */
76  int lfe_channel; /* LFE channel position in channel layout */
77 
78  int n_conv; /* number of channels to convolute */
79 
80  /* buffer variables (for convolution) */
81  float *ringbuffer[2]; /* buffers input samples, length of one buffer: */
82  /* no. input ch. (incl. LFE) x buffer_length */
83  int write[2]; /* current write position to ringbuffer */
84  int buffer_length; /* is: longest IR plus max. delay in all SOFA files */
85  /* then choose next power of 2 */
86  int n_fft; /* number of samples in one FFT block */
88 
89  /* netCDF variables */
90  int *delay[2]; /* broadband delay for each channel/IR to be convolved */
91 
92  float *data_ir[2]; /* IRs for all channels to be convolved */
93  /* (this excludes the LFE) */
94  float *temp_src[2];
95  AVComplexFloat *in_fft[2]; /* Array to hold input FFT values */
96  AVComplexFloat *out_fft[2]; /* Array to hold output FFT values */
97  AVComplexFloat *temp_afft[2]; /* Array to accumulate FFT values prior to IFFT */
98 
99  /* control variables */
100  float gain; /* filter gain (in dB) */
101  float rotation; /* rotation of virtual loudspeakers (in degrees) */
102  float elevation; /* elevation of virtual loudspeakers (in deg.) */
103  float radius; /* distance virtual loudspeakers to listener (in metres) */
104  int type; /* processing type */
105  int framesize; /* size of buffer */
106  int normalize; /* should all IRs be normalized upon import ? */
107  int interpolate; /* should wanted IRs be interpolated from neighbors ? */
108  int minphase; /* should all IRs be minphased upon import ? */
109  float anglestep; /* neighbor search angle step, in agles */
110  float radstep; /* neighbor search radius step, in meters */
111 
113 
114  AVTXContext *fft[2], *ifft[2];
117 
120 
121 static int close_sofa(struct MySofa *sofa)
122 {
123  if (sofa->neighborhood)
124  mysofa_neighborhood_free(sofa->neighborhood);
125  sofa->neighborhood = NULL;
126  if (sofa->lookup)
127  mysofa_lookup_free(sofa->lookup);
128  sofa->lookup = NULL;
129  if (sofa->hrtf)
130  mysofa_free(sofa->hrtf);
131  sofa->hrtf = NULL;
132  av_freep(&sofa->fir);
133 
134  return 0;
135 }
136 
137 static int preload_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
138 {
139  struct SOFAlizerContext *s = ctx->priv;
140  struct MYSOFA_HRTF *mysofa;
141  char *license;
142  int ret;
143 
144  mysofa = mysofa_load(filename, &ret);
145  s->sofa.hrtf = mysofa;
146  if (ret || !mysofa) {
147  av_log(ctx, AV_LOG_ERROR, "Can't find SOFA-file '%s'\n", filename);
148  return AVERROR(EINVAL);
149  }
150 
151  ret = mysofa_check(mysofa);
152  if (ret != MYSOFA_OK) {
153  av_log(ctx, AV_LOG_ERROR, "Selected SOFA file is invalid. Please select valid SOFA file.\n");
154  return ret;
155  }
156 
157  if (s->normalize)
158  mysofa_loudness(s->sofa.hrtf);
159 
160  if (s->minphase)
161  mysofa_minphase(s->sofa.hrtf, 0.01f);
162 
163  mysofa_tocartesian(s->sofa.hrtf);
164 
165  s->sofa.lookup = mysofa_lookup_init(s->sofa.hrtf);
166  if (s->sofa.lookup == NULL)
167  return AVERROR(EINVAL);
168 
169  if (s->interpolate)
170  s->sofa.neighborhood = mysofa_neighborhood_init_withstepdefine(s->sofa.hrtf,
171  s->sofa.lookup,
172  s->anglestep,
173  s->radstep);
174 
175  s->sofa.fir = av_calloc(s->sofa.hrtf->N * s->sofa.hrtf->R, sizeof(*s->sofa.fir));
176  if (!s->sofa.fir)
177  return AVERROR(ENOMEM);
178 
179  if (mysofa->DataSamplingRate.elements != 1)
180  return AVERROR(EINVAL);
181  av_log(ctx, AV_LOG_DEBUG, "Original IR length: %d.\n", mysofa->N);
182  *samplingrate = mysofa->DataSamplingRate.values[0];
183  license = mysofa_getAttribute(mysofa->attributes, (char *)"License");
184  if (license)
185  av_log(ctx, AV_LOG_INFO, "SOFA license: %s\n", license);
186 
187  return 0;
188 }
189 
190 static int parse_channel_name(AVFilterContext *ctx, char **arg, int *rchannel)
191 {
192  int len;
193  enum AVChannel channel_id = 0;
194  char buf[8] = {0};
195 
196  /* try to parse a channel name, e.g. "FL" */
197  if (av_sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
198  channel_id = av_channel_from_string(buf);
199  if (channel_id < 0 || channel_id >= 64) {
200  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
201  return AVERROR(EINVAL);
202  }
203 
204  *rchannel = channel_id;
205  *arg += len;
206  return 0;
207  } else if (av_sscanf(*arg, "%d%n", &channel_id, &len) == 1) {
208  if (channel_id < 0 || channel_id >= 64) {
209  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%d\' as channel number.\n", channel_id);
210  return AVERROR(EINVAL);
211  }
212  *rchannel = channel_id;
213  *arg += len;
214  return 0;
215  }
216  return AVERROR(EINVAL);
217 }
218 
220 {
221  SOFAlizerContext *s = ctx->priv;
222  char *arg, *tokenizer, *p, *args = av_strdup(s->speakers_pos);
223 
224  if (!args)
225  return;
226  p = args;
227 
228  while ((arg = av_strtok(p, "|", &tokenizer))) {
229  float azim, elev;
230  int out_ch_id;
231 
232  p = NULL;
233  if (parse_channel_name(ctx, &arg, &out_ch_id)) {
234  continue;
235  }
236  if (av_sscanf(arg, "%f %f", &azim, &elev) == 2) {
237  s->vspkrpos[out_ch_id].set = 1;
238  s->vspkrpos[out_ch_id].azim = azim;
239  s->vspkrpos[out_ch_id].elev = elev;
240  } else if (av_sscanf(arg, "%f", &azim) == 1) {
241  s->vspkrpos[out_ch_id].set = 1;
242  s->vspkrpos[out_ch_id].azim = azim;
243  s->vspkrpos[out_ch_id].elev = 0;
244  }
245  }
246 
247  av_free(args);
248 }
249 
251  float *speaker_azim, float *speaker_elev)
252 {
253  struct SOFAlizerContext *s = ctx->priv;
254  AVChannelLayout *channel_layout = &ctx->inputs[0]->ch_layout;
255  float azim[64] = { 0 };
256  float elev[64] = { 0 };
257  int ch, n_conv = ctx->inputs[0]->ch_layout.nb_channels; /* get no. input channels */
258 
259  if (n_conv < 0 || n_conv > 64)
260  return AVERROR(EINVAL);
261 
262  s->lfe_channel = -1;
263 
264  if (s->speakers_pos)
266 
267  /* set speaker positions according to input channel configuration: */
268  for (ch = 0; ch < n_conv; ch++) {
269  int chan = av_channel_layout_channel_from_index(channel_layout, ch);
270 
271  switch (chan) {
272  case AV_CHAN_FRONT_LEFT: azim[ch] = 30; break;
273  case AV_CHAN_FRONT_RIGHT: azim[ch] = 330; break;
274  case AV_CHAN_FRONT_CENTER: azim[ch] = 0; break;
276  case AV_CHAN_LOW_FREQUENCY_2: s->lfe_channel = ch; break;
277  case AV_CHAN_BACK_LEFT: azim[ch] = 150; break;
278  case AV_CHAN_BACK_RIGHT: azim[ch] = 210; break;
279  case AV_CHAN_BACK_CENTER: azim[ch] = 180; break;
280  case AV_CHAN_SIDE_LEFT: azim[ch] = 90; break;
281  case AV_CHAN_SIDE_RIGHT: azim[ch] = 270; break;
282  case AV_CHAN_FRONT_LEFT_OF_CENTER: azim[ch] = 15; break;
283  case AV_CHAN_FRONT_RIGHT_OF_CENTER: azim[ch] = 345; break;
284  case AV_CHAN_TOP_CENTER: azim[ch] = 0;
285  elev[ch] = 90; break;
286  case AV_CHAN_TOP_FRONT_LEFT: azim[ch] = 30;
287  elev[ch] = 45; break;
288  case AV_CHAN_TOP_FRONT_CENTER: azim[ch] = 0;
289  elev[ch] = 45; break;
290  case AV_CHAN_TOP_FRONT_RIGHT: azim[ch] = 330;
291  elev[ch] = 45; break;
292  case AV_CHAN_TOP_BACK_LEFT: azim[ch] = 150;
293  elev[ch] = 45; break;
294  case AV_CHAN_TOP_BACK_RIGHT: azim[ch] = 210;
295  elev[ch] = 45; break;
296  case AV_CHAN_TOP_BACK_CENTER: azim[ch] = 180;
297  elev[ch] = 45; break;
298  case AV_CHAN_WIDE_LEFT: azim[ch] = 90; break;
299  case AV_CHAN_WIDE_RIGHT: azim[ch] = 270; break;
300  case AV_CHAN_SURROUND_DIRECT_LEFT: azim[ch] = 90; break;
301  case AV_CHAN_SURROUND_DIRECT_RIGHT: azim[ch] = 270; break;
302  case AV_CHAN_STEREO_LEFT: azim[ch] = 90; break;
303  case AV_CHAN_STEREO_RIGHT: azim[ch] = 270; break;
304  default:
305  return AVERROR(EINVAL);
306  }
307 
308  if (s->vspkrpos[ch].set) {
309  azim[ch] = s->vspkrpos[ch].azim;
310  elev[ch] = s->vspkrpos[ch].elev;
311  }
312  }
313 
314  memcpy(speaker_azim, azim, n_conv * sizeof(float));
315  memcpy(speaker_elev, elev, n_conv * sizeof(float));
316 
317  return 0;
318 
319 }
320 
321 typedef struct ThreadData {
322  AVFrame *in, *out;
323  int *write;
324  int **delay;
325  float **ir;
326  int *n_clippings;
327  float **ringbuffer;
328  float **temp_src;
332 } ThreadData;
333 
334 static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
335 {
336  SOFAlizerContext *s = ctx->priv;
337  ThreadData *td = arg;
338  AVFrame *in = td->in, *out = td->out;
339  int offset = jobnr;
340  int *write = &td->write[jobnr];
341  const int *const delay = td->delay[jobnr];
342  const float *const ir = td->ir[jobnr];
343  int *n_clippings = &td->n_clippings[jobnr];
344  float *ringbuffer = td->ringbuffer[jobnr];
345  float *temp_src = td->temp_src[jobnr];
346  const int ir_samples = s->sofa.ir_samples; /* length of one IR */
347  const int n_samples = s->sofa.n_samples;
348  const int planar = in->format == AV_SAMPLE_FMT_FLTP;
349  const int mult = 1 + !planar;
350  const float *src = (const float *)in->extended_data[0]; /* get pointer to audio input buffer */
351  float *dst = (float *)out->extended_data[jobnr * planar]; /* get pointer to audio output buffer */
352  const int in_channels = s->n_conv; /* number of input channels */
353  /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
354  const int buffer_length = s->buffer_length;
355  /* -1 for AND instead of MODULO (applied to powers of 2): */
356  const uint32_t modulo = (uint32_t)buffer_length - 1;
357  float *buffer[64]; /* holds ringbuffer for each input channel */
358  int wr = *write;
359  int read;
360  int i, l;
361 
362  if (!planar)
363  dst += offset;
364 
365  for (l = 0; l < in_channels; l++) {
366  /* get starting address of ringbuffer for each input channel */
367  buffer[l] = ringbuffer + l * buffer_length;
368  }
369 
370  for (i = 0; i < in->nb_samples; i++) {
371  const float *temp_ir = ir; /* using same set of IRs for each sample */
372 
373  dst[0] = 0;
374  if (planar) {
375  for (l = 0; l < in_channels; l++) {
376  const float *srcp = (const float *)in->extended_data[l];
377 
378  /* write current input sample to ringbuffer (for each channel) */
379  buffer[l][wr] = srcp[i];
380  }
381  } else {
382  for (l = 0; l < in_channels; l++) {
383  /* write current input sample to ringbuffer (for each channel) */
384  buffer[l][wr] = src[l];
385  }
386  }
387 
388  /* loop goes through all channels to be convolved */
389  for (l = 0; l < in_channels; l++) {
390  const float *const bptr = buffer[l];
391 
392  if (l == s->lfe_channel) {
393  /* LFE is an input channel but requires no convolution */
394  /* apply gain to LFE signal and add to output buffer */
395  dst[0] += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
396  temp_ir += n_samples;
397  continue;
398  }
399 
400  /* current read position in ringbuffer: input sample write position
401  * - delay for l-th ch. + diff. betw. IR length and buffer length
402  * (mod buffer length) */
403  read = (wr - delay[l] - (ir_samples - 1) + buffer_length) & modulo;
404 
405  if (read + ir_samples < buffer_length) {
406  memmove(temp_src, bptr + read, ir_samples * sizeof(*temp_src));
407  } else {
408  int len = FFMIN(n_samples - (read % ir_samples), buffer_length - read);
409 
410  memmove(temp_src, bptr + read, len * sizeof(*temp_src));
411  memmove(temp_src + len, bptr, (n_samples - len) * sizeof(*temp_src));
412  }
413 
414  /* multiply signal and IR, and add up the results */
415  dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, FFALIGN(ir_samples, 32));
416  temp_ir += n_samples;
417  }
418 
419  /* clippings counter */
420  if (fabsf(dst[0]) > 1)
421  n_clippings[0]++;
422 
423  /* move output buffer pointer by +2 to get to next sample of processed channel: */
424  dst += mult;
425  src += in_channels;
426  wr = (wr + 1) & modulo; /* update ringbuffer write position */
427  }
428 
429  *write = wr; /* remember write position in ringbuffer for next call */
430 
431  return 0;
432 }
433 
434 static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
435 {
436  SOFAlizerContext *s = ctx->priv;
437  ThreadData *td = arg;
438  AVFrame *in = td->in, *out = td->out;
439  int offset = jobnr;
440  int *write = &td->write[jobnr];
441  AVComplexFloat *hrtf = s->data_hrtf[jobnr]; /* get pointers to current HRTF data */
442  int *n_clippings = &td->n_clippings[jobnr];
443  float *ringbuffer = td->ringbuffer[jobnr];
444  const int ir_samples = s->sofa.ir_samples; /* length of one IR */
445  const int planar = in->format == AV_SAMPLE_FMT_FLTP;
446  const int mult = 1 + !planar;
447  float *dst = (float *)out->extended_data[jobnr * planar]; /* get pointer to audio output buffer */
448  const int in_channels = s->n_conv; /* number of input channels */
449  /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
450  const int buffer_length = s->buffer_length;
451  /* -1 for AND instead of MODULO (applied to powers of 2): */
452  const uint32_t modulo = (uint32_t)buffer_length - 1;
453  AVComplexFloat *fft_in = s->in_fft[jobnr]; /* temporary array for FFT input data */
454  AVComplexFloat *fft_out = s->out_fft[jobnr]; /* temporary array for FFT output data */
455  AVComplexFloat *fft_acc = s->temp_afft[jobnr];
456  AVTXContext *ifft = s->ifft[jobnr];
457  av_tx_fn itx_fn = s->itx_fn[jobnr];
458  AVTXContext *fft = s->fft[jobnr];
459  av_tx_fn tx_fn = s->tx_fn[jobnr];
460  const int n_conv = s->n_conv;
461  const int n_fft = s->n_fft;
462  const float fft_scale = 1.0f / s->n_fft;
463  AVComplexFloat *hrtf_offset;
464  int wr = *write;
465  int n_read;
466  int i, j;
467 
468  if (!planar)
469  dst += offset;
470 
471  /* find minimum between number of samples and output buffer length:
472  * (important, if one IR is longer than the output buffer) */
473  n_read = FFMIN(ir_samples, in->nb_samples);
474  for (j = 0; j < n_read; j++) {
475  /* initialize output buf with saved signal from overflow buf */
476  dst[mult * j] = ringbuffer[wr];
477  ringbuffer[wr] = 0.0f; /* re-set read samples to zero */
478  /* update ringbuffer read/write position */
479  wr = (wr + 1) & modulo;
480  }
481 
482  /* initialize rest of output buffer with 0 */
483  for (j = n_read; j < in->nb_samples; j++) {
484  dst[mult * j] = 0;
485  }
486 
487  /* fill FFT accumulation with 0 */
488  memset(fft_acc, 0, sizeof(AVComplexFloat) * n_fft);
489 
490  for (i = 0; i < n_conv; i++) {
491  const float *src = (const float *)in->extended_data[i * planar]; /* get pointer to audio input buffer */
492 
493  if (i == s->lfe_channel) { /* LFE */
494  if (in->format == AV_SAMPLE_FMT_FLT) {
495  for (j = 0; j < in->nb_samples; j++) {
496  /* apply gain to LFE signal and add to output buffer */
497  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
498  }
499  } else {
500  for (j = 0; j < in->nb_samples; j++) {
501  /* apply gain to LFE signal and add to output buffer */
502  dst[j] += src[j] * s->gain_lfe;
503  }
504  }
505  continue;
506  }
507 
508  /* outer loop: go through all input channels to be convolved */
509  offset = i * n_fft; /* no. samples already processed */
510  hrtf_offset = hrtf + offset;
511 
512  /* fill FFT input with 0 (we want to zero-pad) */
513  memset(fft_in, 0, sizeof(AVComplexFloat) * n_fft);
514 
515  if (in->format == AV_SAMPLE_FMT_FLT) {
516  for (j = 0; j < in->nb_samples; j++) {
517  /* prepare input for FFT */
518  /* write all samples of current input channel to FFT input array */
519  fft_in[j].re = src[j * in_channels + i];
520  }
521  } else {
522  for (j = 0; j < in->nb_samples; j++) {
523  /* prepare input for FFT */
524  /* write all samples of current input channel to FFT input array */
525  fft_in[j].re = src[j];
526  }
527  }
528 
529  /* transform input signal of current channel to frequency domain */
530  tx_fn(fft, fft_out, fft_in, sizeof(*fft_in));
531 
532  for (j = 0; j < n_fft; j++) {
533  const AVComplexFloat *hcomplex = hrtf_offset + j;
534  const float re = fft_out[j].re;
535  const float im = fft_out[j].im;
536 
537  /* complex multiplication of input signal and HRTFs */
538  /* output channel (real): */
539  fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
540  /* output channel (imag): */
541  fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
542  }
543  }
544 
545  /* transform output signal of current channel back to time domain */
546  itx_fn(ifft, fft_out, fft_acc, sizeof(*fft_acc));
547 
548  for (j = 0; j < in->nb_samples; j++) {
549  /* write output signal of current channel to output buffer */
550  dst[mult * j] += fft_out[j].re * fft_scale;
551  }
552 
553  for (j = 0; j < ir_samples - 1; j++) { /* overflow length is IR length - 1 */
554  /* write the rest of output signal to overflow buffer */
555  int write_pos = (wr + j) & modulo;
556 
557  *(ringbuffer + write_pos) += fft_out[in->nb_samples + j].re * fft_scale;
558  }
559 
560  /* go through all samples of current output buffer: count clippings */
561  for (i = 0; i < out->nb_samples; i++) {
562  /* clippings counter */
563  if (fabsf(dst[i * mult]) > 1) { /* if current output sample > 1 */
564  n_clippings[0]++;
565  }
566  }
567 
568  /* remember read/write position in ringbuffer for next call */
569  *write = wr;
570 
571  return 0;
572 }
573 
575 {
576  AVFilterContext *ctx = inlink->dst;
577  SOFAlizerContext *s = ctx->priv;
578  AVFilterLink *outlink = ctx->outputs[0];
579  int n_clippings[2] = { 0 };
580  ThreadData td;
581  AVFrame *out;
582 
583  out = ff_get_audio_buffer(outlink, in->nb_samples);
584  if (!out) {
585  av_frame_free(&in);
586  return AVERROR(ENOMEM);
587  }
589 
590  td.in = in; td.out = out; td.write = s->write;
591  td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
592  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
593  td.in_fft = s->in_fft;
594  td.out_fft = s->out_fft;
595  td.temp_afft = s->temp_afft;
596 
597  if (s->type == TIME_DOMAIN) {
599  } else if (s->type == FREQUENCY_DOMAIN) {
601  }
602 
603  /* display error message if clipping occurred */
604  if (n_clippings[0] + n_clippings[1] > 0) {
605  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
606  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
607  }
608 
609  av_frame_free(&in);
610  return ff_filter_frame(outlink, out);
611 }
612 
614 {
615  AVFilterLink *inlink = ctx->inputs[0];
616  AVFilterLink *outlink = ctx->outputs[0];
617  SOFAlizerContext *s = ctx->priv;
618  AVFrame *in;
619  int ret;
620 
622 
623  if (s->nb_samples)
624  ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
625  else
627  if (ret < 0)
628  return ret;
629  if (ret > 0)
630  return filter_frame(inlink, in);
631 
634 
635  return FFERROR_NOT_READY;
636 }
637 
639 {
640  struct SOFAlizerContext *s = ctx->priv;
642  int ret, sample_rates[] = { 48000, -1 };
643  static const enum AVSampleFormat sample_fmts[] = {
646  };
647 
649  if (ret)
650  return ret;
651 
653  if (!layouts)
654  return AVERROR(ENOMEM);
655 
656  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts);
657  if (ret)
658  return ret;
659 
660  layouts = NULL;
662  if (ret)
663  return ret;
664 
665  ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts);
666  if (ret)
667  return ret;
668 
669  sample_rates[0] = s->sample_rate;
671 }
672 
673 static int getfilter_float(AVFilterContext *ctx, float x, float y, float z,
674  float *left, float *right,
675  float *delay_left, float *delay_right)
676 {
677  struct SOFAlizerContext *s = ctx->priv;
678  float c[3], delays[2];
679  float *fl, *fr;
680  int nearest;
681  int *neighbors;
682  float *res;
683 
684  c[0] = x, c[1] = y, c[2] = z;
685  nearest = mysofa_lookup(s->sofa.lookup, c);
686  if (nearest < 0)
687  return AVERROR(EINVAL);
688 
689  if (s->interpolate) {
690  neighbors = mysofa_neighborhood(s->sofa.neighborhood, nearest);
691  res = mysofa_interpolate(s->sofa.hrtf, c,
692  nearest, neighbors,
693  s->sofa.fir, delays);
694  } else {
695  if (s->sofa.hrtf->DataDelay.elements > s->sofa.hrtf->R) {
696  delays[0] = s->sofa.hrtf->DataDelay.values[nearest * s->sofa.hrtf->R];
697  delays[1] = s->sofa.hrtf->DataDelay.values[nearest * s->sofa.hrtf->R + 1];
698  } else {
699  delays[0] = s->sofa.hrtf->DataDelay.values[0];
700  delays[1] = s->sofa.hrtf->DataDelay.values[1];
701  }
702  res = s->sofa.hrtf->DataIR.values + nearest * s->sofa.hrtf->N * s->sofa.hrtf->R;
703  }
704 
705  *delay_left = delays[0];
706  *delay_right = delays[1];
707 
708  fl = res;
709  fr = res + s->sofa.hrtf->N;
710 
711  memcpy(left, fl, sizeof(float) * s->sofa.hrtf->N);
712  memcpy(right, fr, sizeof(float) * s->sofa.hrtf->N);
713 
714  return 0;
715 }
716 
717 static int load_data(AVFilterContext *ctx, int azim, int elev, float radius, int sample_rate)
718 {
719  struct SOFAlizerContext *s = ctx->priv;
720  int n_samples;
721  int ir_samples;
722  int n_conv = s->n_conv; /* no. channels to convolve */
723  int n_fft;
724  float delay_l; /* broadband delay for each IR */
725  float delay_r;
726  int nb_input_channels = ctx->inputs[0]->ch_layout.nb_channels; /* no. input channels */
727  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10); /* gain - 3dB/channel */
728  AVComplexFloat *data_hrtf_l = NULL;
729  AVComplexFloat *data_hrtf_r = NULL;
730  AVComplexFloat *fft_out_l = NULL;
731  AVComplexFloat *fft_out_r = NULL;
732  AVComplexFloat *fft_in_l = NULL;
733  AVComplexFloat *fft_in_r = NULL;
734  float *data_ir_l = NULL;
735  float *data_ir_r = NULL;
736  int offset = 0; /* used for faster pointer arithmetics in for-loop */
737  int i, j, azim_orig = azim, elev_orig = elev;
738  int ret = 0;
739  int n_current;
740  int n_max = 0;
741 
742  av_log(ctx, AV_LOG_DEBUG, "IR length: %d.\n", s->sofa.hrtf->N);
743  s->sofa.ir_samples = s->sofa.hrtf->N;
744  s->sofa.n_samples = 1 << (32 - ff_clz(s->sofa.ir_samples));
745 
746  n_samples = s->sofa.n_samples;
747  ir_samples = s->sofa.ir_samples;
748 
749  if (s->type == TIME_DOMAIN) {
750  s->data_ir[0] = av_calloc(n_samples, sizeof(float) * s->n_conv);
751  s->data_ir[1] = av_calloc(n_samples, sizeof(float) * s->n_conv);
752 
753  if (!s->data_ir[0] || !s->data_ir[1]) {
754  ret = AVERROR(ENOMEM);
755  goto fail;
756  }
757  }
758 
759  s->delay[0] = av_calloc(s->n_conv, sizeof(int));
760  s->delay[1] = av_calloc(s->n_conv, sizeof(int));
761 
762  if (!s->delay[0] || !s->delay[1]) {
763  ret = AVERROR(ENOMEM);
764  goto fail;
765  }
766 
767  /* get temporary IR for L and R channel */
768  data_ir_l = av_calloc(n_conv * n_samples, sizeof(*data_ir_l));
769  data_ir_r = av_calloc(n_conv * n_samples, sizeof(*data_ir_r));
770  if (!data_ir_r || !data_ir_l) {
771  ret = AVERROR(ENOMEM);
772  goto fail;
773  }
774 
775  if (s->type == TIME_DOMAIN) {
776  s->temp_src[0] = av_calloc(n_samples, sizeof(float));
777  s->temp_src[1] = av_calloc(n_samples, sizeof(float));
778  if (!s->temp_src[0] || !s->temp_src[1]) {
779  ret = AVERROR(ENOMEM);
780  goto fail;
781  }
782  }
783 
784  s->speaker_azim = av_calloc(s->n_conv, sizeof(*s->speaker_azim));
785  s->speaker_elev = av_calloc(s->n_conv, sizeof(*s->speaker_elev));
786  if (!s->speaker_azim || !s->speaker_elev) {
787  ret = AVERROR(ENOMEM);
788  goto fail;
789  }
790 
791  /* get speaker positions */
792  if ((ret = get_speaker_pos(ctx, s->speaker_azim, s->speaker_elev)) < 0) {
793  av_log(ctx, AV_LOG_ERROR, "Couldn't get speaker positions. Input channel configuration not supported.\n");
794  goto fail;
795  }
796 
797  for (i = 0; i < s->n_conv; i++) {
798  float coordinates[3];
799 
800  /* load and store IRs and corresponding delays */
801  azim = (int)(s->speaker_azim[i] + azim_orig) % 360;
802  elev = (int)(s->speaker_elev[i] + elev_orig) % 90;
803 
804  coordinates[0] = azim;
805  coordinates[1] = elev;
806  coordinates[2] = radius;
807 
808  mysofa_s2c(coordinates);
809 
810  /* get id of IR closest to desired position */
811  ret = getfilter_float(ctx, coordinates[0], coordinates[1], coordinates[2],
812  data_ir_l + n_samples * i,
813  data_ir_r + n_samples * i,
814  &delay_l, &delay_r);
815  if (ret < 0)
816  goto fail;
817 
818  s->delay[0][i] = delay_l * sample_rate;
819  s->delay[1][i] = delay_r * sample_rate;
820 
821  s->sofa.max_delay = FFMAX3(s->sofa.max_delay, s->delay[0][i], s->delay[1][i]);
822  }
823 
824  /* get size of ringbuffer (longest IR plus max. delay) */
825  /* then choose next power of 2 for performance optimization */
826  n_current = n_samples + s->sofa.max_delay;
827  /* length of longest IR plus max. delay */
828  n_max = FFMAX(n_max, n_current);
829 
830  /* buffer length is longest IR plus max. delay -> next power of 2
831  (32 - count leading zeros gives required exponent) */
832  s->buffer_length = 1 << (32 - ff_clz(n_max));
833  s->n_fft = n_fft = 1 << (32 - ff_clz(n_max + s->framesize));
834 
835  if (s->type == FREQUENCY_DOMAIN) {
836  float scale = 1.f;
837 
838  av_tx_uninit(&s->fft[0]);
839  av_tx_uninit(&s->fft[1]);
840  ret = av_tx_init(&s->fft[0], &s->tx_fn[0], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
841  if (ret < 0)
842  goto fail;
843  ret = av_tx_init(&s->fft[1], &s->tx_fn[1], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
844  if (ret < 0)
845  goto fail;
846  av_tx_uninit(&s->ifft[0]);
847  av_tx_uninit(&s->ifft[1]);
848  ret = av_tx_init(&s->ifft[0], &s->itx_fn[0], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
849  if (ret < 0)
850  goto fail;
851  ret = av_tx_init(&s->ifft[1], &s->itx_fn[1], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
852  if (ret < 0)
853  goto fail;
854  }
855 
856  if (s->type == TIME_DOMAIN) {
857  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
858  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
859  } else if (s->type == FREQUENCY_DOMAIN) {
860  /* get temporary HRTF memory for L and R channel */
861  data_hrtf_l = av_malloc_array(n_fft, sizeof(*data_hrtf_l) * n_conv);
862  data_hrtf_r = av_malloc_array(n_fft, sizeof(*data_hrtf_r) * n_conv);
863  if (!data_hrtf_r || !data_hrtf_l) {
864  ret = AVERROR(ENOMEM);
865  goto fail;
866  }
867 
868  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
869  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
870  s->in_fft[0] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
871  s->in_fft[1] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
872  s->out_fft[0] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
873  s->out_fft[1] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
874  s->temp_afft[0] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
875  s->temp_afft[1] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
876  if (!s->in_fft[0] || !s->in_fft[1] ||
877  !s->out_fft[0] || !s->out_fft[1] ||
878  !s->temp_afft[0] || !s->temp_afft[1]) {
879  ret = AVERROR(ENOMEM);
880  goto fail;
881  }
882  }
883 
884  if (!s->ringbuffer[0] || !s->ringbuffer[1]) {
885  ret = AVERROR(ENOMEM);
886  goto fail;
887  }
888 
889  if (s->type == FREQUENCY_DOMAIN) {
890  fft_out_l = av_calloc(n_fft, sizeof(*fft_out_l));
891  fft_out_r = av_calloc(n_fft, sizeof(*fft_out_r));
892  fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
893  fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
894  if (!fft_in_l || !fft_in_r ||
895  !fft_out_l || !fft_out_r) {
896  ret = AVERROR(ENOMEM);
897  goto fail;
898  }
899  }
900 
901  for (i = 0; i < s->n_conv; i++) {
902  float *lir, *rir;
903 
904  offset = i * n_samples; /* no. samples already written */
905 
906  lir = data_ir_l + offset;
907  rir = data_ir_r + offset;
908 
909  if (s->type == TIME_DOMAIN) {
910  for (j = 0; j < ir_samples; j++) {
911  /* load reversed IRs of the specified source position
912  * sample-by-sample for left and right ear; and apply gain */
913  s->data_ir[0][offset + j] = lir[ir_samples - 1 - j] * gain_lin;
914  s->data_ir[1][offset + j] = rir[ir_samples - 1 - j] * gain_lin;
915  }
916  } else if (s->type == FREQUENCY_DOMAIN) {
917  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
918  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
919 
920  offset = i * n_fft; /* no. samples already written */
921  for (j = 0; j < ir_samples; j++) {
922  /* load non-reversed IRs of the specified source position
923  * sample-by-sample and apply gain,
924  * L channel is loaded to real part, R channel to imag part,
925  * IRs are shifted by L and R delay */
926  fft_in_l[s->delay[0][i] + j].re = lir[j] * gain_lin;
927  fft_in_r[s->delay[1][i] + j].re = rir[j] * gain_lin;
928  }
929 
930  /* actually transform to frequency domain (IRs -> HRTFs) */
931  s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(*fft_in_l));
932  memcpy(data_hrtf_l + offset, fft_out_l, n_fft * sizeof(*fft_out_l));
933  s->tx_fn[1](s->fft[1], fft_out_r, fft_in_r, sizeof(*fft_in_r));
934  memcpy(data_hrtf_r + offset, fft_out_r, n_fft * sizeof(*fft_out_r));
935  }
936  }
937 
938  if (s->type == FREQUENCY_DOMAIN) {
939  s->data_hrtf[0] = av_malloc_array(n_fft * s->n_conv, sizeof(AVComplexFloat));
940  s->data_hrtf[1] = av_malloc_array(n_fft * s->n_conv, sizeof(AVComplexFloat));
941  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
942  ret = AVERROR(ENOMEM);
943  goto fail;
944  }
945 
946  memcpy(s->data_hrtf[0], data_hrtf_l, /* copy HRTF data to */
947  sizeof(AVComplexFloat) * n_conv * n_fft); /* filter struct */
948  memcpy(s->data_hrtf[1], data_hrtf_r,
949  sizeof(AVComplexFloat) * n_conv * n_fft);
950  }
951 
952 fail:
953  av_freep(&data_hrtf_l); /* free temporary HRTF memory */
954  av_freep(&data_hrtf_r);
955 
956  av_freep(&data_ir_l); /* free temprary IR memory */
957  av_freep(&data_ir_r);
958 
959  av_freep(&fft_out_l); /* free temporary FFT memory */
960  av_freep(&fft_out_r);
961 
962  av_freep(&fft_in_l); /* free temporary FFT memory */
963  av_freep(&fft_in_r);
964 
965  return ret;
966 }
967 
969 {
970  SOFAlizerContext *s = ctx->priv;
971  int ret;
972 
973  if (!s->filename) {
974  av_log(ctx, AV_LOG_ERROR, "Valid SOFA filename must be set.\n");
975  return AVERROR(EINVAL);
976  }
977 
978  /* preload SOFA file, */
979  ret = preload_sofa(ctx, s->filename, &s->sample_rate);
980  if (ret) {
981  /* file loading error */
982  av_log(ctx, AV_LOG_ERROR, "Error while loading SOFA file: '%s'\n", s->filename);
983  } else { /* no file loading error, resampling not required */
984  av_log(ctx, AV_LOG_DEBUG, "File '%s' loaded.\n", s->filename);
985  }
986 
987  if (ret) {
988  av_log(ctx, AV_LOG_ERROR, "No valid SOFA file could be loaded. Please specify valid SOFA file.\n");
989  return ret;
990  }
991 
992  s->fdsp = avpriv_float_dsp_alloc(0);
993  if (!s->fdsp)
994  return AVERROR(ENOMEM);
995 
996  return 0;
997 }
998 
1000 {
1001  AVFilterContext *ctx = inlink->dst;
1002  SOFAlizerContext *s = ctx->priv;
1003  int ret;
1004 
1005  if (s->type == FREQUENCY_DOMAIN)
1006  s->nb_samples = s->framesize;
1007 
1008  /* gain -3 dB per channel */
1009  s->gain_lfe = expf((s->gain - 3 * inlink->ch_layout.nb_channels + s->lfe_gain) / 20 * M_LN10);
1010 
1011  s->n_conv = inlink->ch_layout.nb_channels;
1012 
1013  /* load IRs to data_ir[0] and data_ir[1] for required directions */
1014  if ((ret = load_data(ctx, s->rotation, s->elevation, s->radius, inlink->sample_rate)) < 0)
1015  return ret;
1016 
1017  av_log(ctx, AV_LOG_DEBUG, "Samplerate: %d Channels to convolute: %d, Length of ringbuffer: %d x %d\n",
1018  inlink->sample_rate, s->n_conv, inlink->ch_layout.nb_channels, s->buffer_length);
1019 
1020  return 0;
1021 }
1022 
1024 {
1025  SOFAlizerContext *s = ctx->priv;
1026 
1027  close_sofa(&s->sofa);
1028  av_tx_uninit(&s->ifft[0]);
1029  av_tx_uninit(&s->ifft[1]);
1030  av_tx_uninit(&s->fft[0]);
1031  av_tx_uninit(&s->fft[1]);
1032  s->ifft[0] = NULL;
1033  s->ifft[1] = NULL;
1034  s->fft[0] = NULL;
1035  s->fft[1] = NULL;
1036  av_freep(&s->delay[0]);
1037  av_freep(&s->delay[1]);
1038  av_freep(&s->data_ir[0]);
1039  av_freep(&s->data_ir[1]);
1040  av_freep(&s->ringbuffer[0]);
1041  av_freep(&s->ringbuffer[1]);
1042  av_freep(&s->speaker_azim);
1043  av_freep(&s->speaker_elev);
1044  av_freep(&s->temp_src[0]);
1045  av_freep(&s->temp_src[1]);
1046  av_freep(&s->temp_afft[0]);
1047  av_freep(&s->temp_afft[1]);
1048  av_freep(&s->in_fft[0]);
1049  av_freep(&s->in_fft[1]);
1050  av_freep(&s->out_fft[0]);
1051  av_freep(&s->out_fft[1]);
1052  av_freep(&s->data_hrtf[0]);
1053  av_freep(&s->data_hrtf[1]);
1054  av_freep(&s->fdsp);
1055 }
1056 
1057 #define OFFSET(x) offsetof(SOFAlizerContext, x)
1058 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1059 
1060 static const AVOption sofalizer_options[] = {
1061  { "sofa", "sofa filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
1062  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
1063  { "rotation", "set rotation" , OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -360, 360, .flags = FLAGS },
1064  { "elevation", "set elevation", OFFSET(elevation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -90, 90, .flags = FLAGS },
1065  { "radius", "set radius", OFFSET(radius), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 5, .flags = FLAGS },
1066  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, .unit = "type" },
1067  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, .unit = "type" },
1068  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, .unit = "type" },
1069  { "speakers", "set speaker custom positions", OFFSET(speakers_pos), AV_OPT_TYPE_STRING, {.str=0}, 0, 0, .flags = FLAGS },
1070  { "lfegain", "set lfe gain", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20,40, .flags = FLAGS },
1071  { "framesize", "set frame size", OFFSET(framesize), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
1072  { "normalize", "normalize IRs", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, .flags = FLAGS },
1073  { "interpolate","interpolate IRs from neighbors", OFFSET(interpolate),AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
1074  { "minphase", "minphase IRs", OFFSET(minphase), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
1075  { "anglestep", "set neighbor search angle step", OFFSET(anglestep), AV_OPT_TYPE_FLOAT, {.dbl=.5}, 0.01, 10, .flags = FLAGS },
1076  { "radstep", "set neighbor search radius step", OFFSET(radstep), AV_OPT_TYPE_FLOAT, {.dbl=.01}, 0.01, 1, .flags = FLAGS },
1077  { NULL }
1078 };
1079 
1080 AVFILTER_DEFINE_CLASS(sofalizer);
1081 
1082 static const AVFilterPad inputs[] = {
1083  {
1084  .name = "default",
1085  .type = AVMEDIA_TYPE_AUDIO,
1086  .config_props = config_input,
1087  },
1088 };
1089 
1091  .name = "sofalizer",
1092  .description = NULL_IF_CONFIG_SMALL("SOFAlizer (Spatially Oriented Format for Acoustics)."),
1093  .priv_size = sizeof(SOFAlizerContext),
1094  .priv_class = &sofalizer_class,
1095  .init = init,
1096  .activate = activate,
1097  .uninit = uninit,
1101  .flags = AVFILTER_FLAG_SLICE_THREADS,
1102 };
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:97
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
SOFAlizerContext::write
int write[2]
Definition: af_sofalizer.c:83
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
td
#define td
Definition: regdef.h:70
TIME_DOMAIN
#define TIME_DOMAIN
Definition: af_sofalizer.c:44
SOFAlizerContext::filename
char * filename
Definition: af_sofalizer.c:67
SOFAlizerContext::nb_samples
int nb_samples
Definition: af_sofalizer.c:87
SOFAlizerContext::speakers_pos
char * speakers_pos
Definition: af_sofalizer.c:73
SOFAlizerContext::radstep
float radstep
Definition: af_sofalizer.c:110
SOFAlizerContext
Definition: af_sofalizer.c:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ThreadData::out_fft
AVComplexFloat ** out_fft
Definition: af_headphone.c:140
SOFAlizerContext::vspkrpos
VirtualSpeaker vspkrpos[64]
Definition: af_sofalizer.c:112
out
FILE * out
Definition: movenc.c:55
parse_channel_name
static int parse_channel_name(AVFilterContext *ctx, char **arg, int *rchannel)
Definition: af_sofalizer.c:190
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:379
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:674
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:337
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_CHAN_WIDE_LEFT
@ AV_CHAN_WIDE_LEFT
Definition: channel_layout.h:72
AVTXContext
Definition: tx_priv.h:235
ff_set_common_samplerates_from_list
int ff_set_common_samplerates_from_list(AVFilterContext *ctx, const int *samplerates)
Equivalent to ff_set_common_samplerates(ctx, ff_make_format_list(samplerates))
Definition: formats.c:816
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ff_clz
#define ff_clz
Definition: intmath.h:143
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
SOFAlizerContext::in_fft
AVComplexFloat * in_fft[2]
Definition: af_sofalizer.c:95
SOFAlizerContext::lfe_channel
int lfe_channel
Definition: af_sofalizer.c:76
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
av_channel_layout_channel_from_index
enum AVChannel av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx)
Get the channel with the given index in a channel layout.
Definition: channel_layout.c:665
AVOption
AVOption.
Definition: opt.h:346
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
expf
#define expf(x)
Definition: libm.h:283
AVComplexFloat
Definition: tx.h:27
SOFAlizerContext::type
int type
Definition: af_sofalizer.c:104
SOFAlizerContext::anglestep
float anglestep
Definition: af_sofalizer.c:109
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:527
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
ThreadData::temp_src
float ** temp_src
Definition: af_headphone.c:139
SOFAlizerContext::gain_lfe
float gain_lfe
Definition: af_sofalizer.c:75
SOFAlizerContext::n_conv
int n_conv
Definition: af_sofalizer.c:78
formats.h
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1442
AV_CHAN_SURROUND_DIRECT_LEFT
@ AV_CHAN_SURROUND_DIRECT_LEFT
Definition: channel_layout.h:74
AVComplexFloat::im
float im
Definition: tx.h:28
AV_CHAN_TOP_BACK_RIGHT
@ AV_CHAN_TOP_BACK_RIGHT
Definition: channel_layout.h:67
fail
#define fail()
Definition: checkasm.h:179
VirtualSpeaker::elev
float elev
Definition: af_sofalizer.c:61
ThreadData::ringbuffer
float ** ringbuffer
Definition: af_headphone.c:138
parse_speaker_pos
static void parse_speaker_pos(AVFilterContext *ctx)
Definition: af_sofalizer.c:219
SOFAlizerContext::sofa
MySofa sofa
Definition: af_sofalizer.c:68
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
SOFAlizerContext::sample_rate
int sample_rate
Definition: af_sofalizer.c:70
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
MySofa::lir
float * lir
Definition: af_sofalizer.c:53
AV_CHAN_STEREO_RIGHT
@ AV_CHAN_STEREO_RIGHT
See above.
Definition: channel_layout.h:71
MySofa::n_samples
int n_samples
Definition: af_sofalizer.c:52
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
SOFAlizerContext::interpolate
int interpolate
Definition: af_sofalizer.c:107
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
FREQUENCY_DOMAIN
#define FREQUENCY_DOMAIN
Definition: af_sofalizer.c:45
s
#define s(width, name)
Definition: cbs_vp9.c:198
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_sofalizer.c:999
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
AV_CHAN_SIDE_RIGHT
@ AV_CHAN_SIDE_RIGHT
Definition: channel_layout.h:60
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:874
get_speaker_pos
static int get_speaker_pos(AVFilterContext *ctx, float *speaker_azim, float *speaker_elev)
Definition: af_sofalizer.c:250
filters.h
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
load_data
static int load_data(AVFilterContext *ctx, int azim, int elev, float radius, int sample_rate)
Definition: af_sofalizer.c:717
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:49
SOFAlizerContext::data_hrtf
AVComplexFloat * data_hrtf[2]
Definition: af_sofalizer.c:116
SOFAlizerContext::data_ir
float * data_ir[2]
Definition: af_sofalizer.c:92
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_sofalizer.c:968
SOFAlizerContext::framesize
int framesize
Definition: af_sofalizer.c:105
sofalizer_fast_convolute
static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_sofalizer.c:434
MySofa::lookup
struct MYSOFA_LOOKUP * lookup
Definition: af_sofalizer.c:49
MySofa::ir_samples
int ir_samples
Definition: af_sofalizer.c:51
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
SOFAlizerContext::fdsp
AVFloatDSPContext * fdsp
Definition: af_sofalizer.c:118
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
activate
static int activate(AVFilterContext *ctx)
Definition: af_sofalizer.c:613
SOFAlizerContext::ringbuffer
float * ringbuffer[2]
Definition: af_sofalizer.c:81
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:962
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1462
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
SOFAlizerContext::delay
int * delay[2]
Definition: af_sofalizer.c:90
SOFAlizerContext::buffer_length
int buffer_length
Definition: af_sofalizer.c:84
MySofa::rir
float * rir
Definition: af_sofalizer.c:53
AV_CHAN_TOP_BACK_CENTER
@ AV_CHAN_TOP_BACK_CENTER
Definition: channel_layout.h:66
MySofa::max_delay
int max_delay
Definition: af_sofalizer.c:55
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:33
AV_CHAN_TOP_CENTER
@ AV_CHAN_TOP_CENTER
Definition: channel_layout.h:61
ThreadData::in_fft
AVComplexFloat ** in_fft
Definition: af_headphone.c:141
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, const AVChannelLayout *channel_layout)
Definition: formats.c:522
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_sofalizer.c:574
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CHAN_FRONT_RIGHT_OF_CENTER
@ AV_CHAN_FRONT_RIGHT_OF_CENTER
Definition: channel_layout.h:57
float_dsp.h
AV_CHAN_FRONT_RIGHT
@ AV_CHAN_FRONT_RIGHT
Definition: channel_layout.h:51
AV_CHAN_FRONT_CENTER
@ AV_CHAN_FRONT_CENTER
Definition: channel_layout.h:52
inputs
static const AVFilterPad inputs[]
Definition: af_sofalizer.c:1082
SOFAlizerContext::speaker_azim
float * speaker_azim
Definition: af_sofalizer.c:71
SOFAlizerContext::temp_afft
AVComplexFloat * temp_afft[2]
Definition: af_sofalizer.c:97
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
VirtualSpeaker
Definition: af_sofalizer.c:58
SOFAlizerContext::n_fft
int n_fft
Definition: af_sofalizer.c:86
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
SOFAlizerContext::rotation
float rotation
Definition: af_sofalizer.c:101
AV_CHAN_LOW_FREQUENCY
@ AV_CHAN_LOW_FREQUENCY
Definition: channel_layout.h:53
AV_CHAN_BACK_RIGHT
@ AV_CHAN_BACK_RIGHT
Definition: channel_layout.h:55
AVComplexFloat::re
float re
Definition: tx.h:28
AV_CHAN_SIDE_LEFT
@ AV_CHAN_SIDE_LEFT
Definition: channel_layout.h:59
AVFloatDSPContext
Definition: float_dsp.h:22
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:462
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_sofalizer.c:638
OFFSET
#define OFFSET(x)
Definition: af_sofalizer.c:1057
AV_CHAN_TOP_FRONT_RIGHT
@ AV_CHAN_TOP_FRONT_RIGHT
Definition: channel_layout.h:64
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
AV_CHAN_FRONT_LEFT_OF_CENTER
@ AV_CHAN_FRONT_LEFT_OF_CENTER
Definition: channel_layout.h:56
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:613
preload_sofa
static int preload_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
Definition: af_sofalizer.c:137
SOFAlizerContext::radius
float radius
Definition: af_sofalizer.c:103
interpolate
static void interpolate(float *out, float v1, float v2, int size)
Definition: twinvq.c:85
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:424
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
SOFAlizerContext::tx_fn
av_tx_fn tx_fn[2]
Definition: af_sofalizer.c:115
getfilter_float
static int getfilter_float(AVFilterContext *ctx, float x, float y, float z, float *left, float *right, float *delay_left, float *delay_right)
Definition: af_sofalizer.c:673
AVChannel
AVChannel
Definition: channel_layout.h:47
normalize
Definition: normalize.py:1
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:455
AV_CHAN_SURROUND_DIRECT_RIGHT
@ AV_CHAN_SURROUND_DIRECT_RIGHT
Definition: channel_layout.h:75
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
FLAGS
#define FLAGS
Definition: af_sofalizer.c:1058
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:436
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
sofalizer_options
static const AVOption sofalizer_options[]
Definition: af_sofalizer.c:1060
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ThreadData::n_clippings
int * n_clippings
Definition: af_headphone.c:137
ThreadData::delay
int ** delay
Definition: af_sofalizer.c:324
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
MySofa
Definition: af_sofalizer.c:47
AV_CHAN_STEREO_LEFT
@ AV_CHAN_STEREO_LEFT
Stereo downmix.
Definition: channel_layout.h:69
SOFAlizerContext::lfe_gain
float lfe_gain
Definition: af_sofalizer.c:74
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AVFilter
Filter definition.
Definition: avfilter.h:166
SOFAlizerContext::fft
AVTXContext * fft[2]
Definition: af_sofalizer.c:114
ret
ret
Definition: filter_design.txt:187
ThreadData::write
int * write
Definition: af_headphone.c:135
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_sofalizer.c:1023
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_CHAN_BACK_CENTER
@ AV_CHAN_BACK_CENTER
Definition: channel_layout.h:58
av_channel_from_string
enum AVChannel av_channel_from_string(const char *str)
This is the inverse function of av_channel_name().
Definition: channel_layout.c:146
ThreadData::temp_afft
AVComplexFloat ** temp_afft
Definition: af_headphone.c:142
SOFAlizerContext::out_fft
AVComplexFloat * out_fft[2]
Definition: af_sofalizer.c:96
sofalizer_convolute
static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_sofalizer.c:334
channel_layout.h
AV_CHAN_LOW_FREQUENCY_2
@ AV_CHAN_LOW_FREQUENCY_2
Definition: channel_layout.h:76
AV_CHAN_TOP_BACK_LEFT
@ AV_CHAN_TOP_BACK_LEFT
Definition: channel_layout.h:65
SOFAlizerContext::minphase
int minphase
Definition: af_sofalizer.c:108
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
MySofa::hrtf
struct MYSOFA_HRTF * hrtf
Definition: af_sofalizer.c:48
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
VirtualSpeaker::set
uint8_t set
Definition: af_sofalizer.c:59
AV_CHAN_BACK_LEFT
@ AV_CHAN_BACK_LEFT
Definition: channel_layout.h:54
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
MySofa::neighborhood
struct MYSOFA_NEIGHBORHOOD * neighborhood
Definition: af_sofalizer.c:50
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_CHAN_TOP_FRONT_CENTER
@ AV_CHAN_TOP_FRONT_CENTER
Definition: channel_layout.h:63
mem.h
audio.h
ff_af_sofalizer
const AVFilter ff_af_sofalizer
Definition: af_sofalizer.c:1090
AV_CHAN_WIDE_RIGHT
@ AV_CHAN_WIDE_RIGHT
Definition: channel_layout.h:73
M_LN10
#define M_LN10
Definition: mathematics.h:49
VirtualSpeaker::azim
float azim
Definition: af_sofalizer.c:60
SOFAlizerContext::speaker_elev
float * speaker_elev
Definition: af_sofalizer.c:72
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
SOFAlizerContext::ifft
AVTXContext * ifft[2]
Definition: af_sofalizer.c:114
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
AV_CHAN_TOP_FRONT_LEFT
@ AV_CHAN_TOP_FRONT_LEFT
Definition: channel_layout.h:62
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
SOFAlizerContext::gain
float gain
Definition: af_sofalizer.c:100
ThreadData::ir
float ** ir
Definition: af_headphone.c:136
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
SOFAlizerContext::temp_src
float * temp_src[2]
Definition: af_sofalizer.c:94
AV_CHAN_FRONT_LEFT
@ AV_CHAN_FRONT_LEFT
Definition: channel_layout.h:50
MySofa::fir
float * fir
Definition: af_sofalizer.c:54
avstring.h
SOFAlizerContext::itx_fn
av_tx_fn itx_fn[2]
Definition: af_sofalizer.c:115
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
int
int
Definition: ffmpeg_filter.c:424
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
SOFAlizerContext::normalize
int normalize
Definition: af_sofalizer.c:106
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
SOFAlizerContext::elevation
float elevation
Definition: af_sofalizer.c:102
read
static uint32_t BS_FUNC() read(BSCTX *bc, unsigned int n)
Return n bits from the buffer, n has to be in the 0-32 range.
Definition: bitstream_template.h:231
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(sofalizer)
tx.h
close_sofa
static int close_sofa(struct MySofa *sofa)
Definition: af_sofalizer.c:121
intmath.h