FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
af_sofalizer.c
Go to the documentation of this file.
1 /*****************************************************************************
2  * sofalizer.c : SOFAlizer filter for virtual binaural acoustics
3  *****************************************************************************
4  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda,
5  * Acoustics Research Institute (ARI), Vienna, Austria
6  *
7  * Authors: Andreas Fuchs <andi.fuchs.mail@gmail.com>
8  * Wolfgang Hrauda <wolfgang.hrauda@gmx.at>
9  *
10  * SOFAlizer project coordinator at ARI, main developer of SOFA:
11  * Piotr Majdak <piotr@majdak.at>
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU Lesser General Public License as published by
15  * the Free Software Foundation; either version 2.1 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public License
24  * along with this program; if not, write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
26  *****************************************************************************/
27 
28 #include <math.h>
29 #include <netcdf.h>
30 
31 #include "libavcodec/avfft.h"
32 #include "libavutil/avstring.h"
34 #include "libavutil/float_dsp.h"
35 #include "libavutil/intmath.h"
36 #include "libavutil/opt.h"
37 #include "avfilter.h"
38 #include "internal.h"
39 #include "audio.h"
40 
41 #define TIME_DOMAIN 0
42 #define FREQUENCY_DOMAIN 1
43 
44 typedef struct NCSofa { /* contains data of one SOFA file */
45  int ncid; /* netCDF ID of the opened SOFA file */
46  int n_samples; /* length of one impulse response (IR) */
47  int m_dim; /* number of measurement positions */
48  int *data_delay; /* broadband delay of each IR */
49  /* all measurement positions for each receiver (i.e. ear): */
50  float *sp_a; /* azimuth angles */
51  float *sp_e; /* elevation angles */
52  float *sp_r; /* radii */
53  /* data at each measurement position for each receiver: */
54  float *data_ir; /* IRs (time-domain) */
55 } NCSofa;
56 
57 typedef struct VirtualSpeaker {
59  float azim;
60  float elev;
62 
63 typedef struct SOFAlizerContext {
64  const AVClass *class;
65 
66  char *filename; /* name of SOFA file */
67  NCSofa sofa; /* contains data of the SOFA file */
68 
69  int sample_rate; /* sample rate from SOFA file */
70  float *speaker_azim; /* azimuth of the virtual loudspeakers */
71  float *speaker_elev; /* elevation of the virtual loudspeakers */
72  char *speakers_pos; /* custom positions of the virtual loudspeakers */
73  float gain_lfe; /* gain applied to LFE channel */
74  int lfe_channel; /* LFE channel position in channel layout */
75 
76  int n_conv; /* number of channels to convolute */
77 
78  /* buffer variables (for convolution) */
79  float *ringbuffer[2]; /* buffers input samples, length of one buffer: */
80  /* no. input ch. (incl. LFE) x buffer_length */
81  int write[2]; /* current write position to ringbuffer */
82  int buffer_length; /* is: longest IR plus max. delay in all SOFA files */
83  /* then choose next power of 2 */
84  int n_fft; /* number of samples in one FFT block */
85 
86  /* netCDF variables */
87  int *delay[2]; /* broadband delay for each channel/IR to be convolved */
88 
89  float *data_ir[2]; /* IRs for all channels to be convolved */
90  /* (this excludes the LFE) */
91  float *temp_src[2];
93 
94  /* control variables */
95  float gain; /* filter gain (in dB) */
96  float rotation; /* rotation of virtual loudspeakers (in degrees) */
97  float elevation; /* elevation of virtual loudspeakers (in deg.) */
98  float radius; /* distance virtual loudspeakers to listener (in metres) */
99  int type; /* processing type */
100 
102 
103  FFTContext *fft[2], *ifft[2];
105 
108 
109 static int close_sofa(struct NCSofa *sofa)
110 {
111  av_freep(&sofa->data_delay);
112  av_freep(&sofa->sp_a);
113  av_freep(&sofa->sp_e);
114  av_freep(&sofa->sp_r);
115  av_freep(&sofa->data_ir);
116  nc_close(sofa->ncid);
117  sofa->ncid = 0;
118 
119  return 0;
120 }
121 
122 static int load_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
123 {
124  struct SOFAlizerContext *s = ctx->priv;
125  /* variables associated with content of SOFA file: */
126  int ncid, n_dims, n_vars, n_gatts, n_unlim_dim_id, status;
127  char data_delay_dim_name[NC_MAX_NAME];
128  float *sp_a, *sp_e, *sp_r, *data_ir;
129  char *sofa_conventions;
130  char dim_name[NC_MAX_NAME]; /* names of netCDF dimensions */
131  size_t *dim_length; /* lengths of netCDF dimensions */
132  char *text;
133  unsigned int sample_rate;
134  int data_delay_dim_id[2];
135  int samplingrate_id;
136  int data_delay_id;
137  int n_samples;
138  int m_dim_id = -1;
139  int n_dim_id = -1;
140  int data_ir_id;
141  size_t att_len;
142  int m_dim;
143  int *data_delay;
144  int sp_id;
145  int i, ret;
146 
147  s->sofa.ncid = 0;
148  status = nc_open(filename, NC_NOWRITE, &ncid); /* open SOFA file read-only */
149  if (status != NC_NOERR) {
150  av_log(ctx, AV_LOG_ERROR, "Can't find SOFA-file '%s'\n", filename);
151  return AVERROR(EINVAL);
152  }
153 
154  /* get number of dimensions, vars, global attributes and Id of unlimited dimensions: */
155  nc_inq(ncid, &n_dims, &n_vars, &n_gatts, &n_unlim_dim_id);
156 
157  /* -- get number of measurements ("M") and length of one IR ("N") -- */
158  dim_length = av_malloc_array(n_dims, sizeof(*dim_length));
159  if (!dim_length) {
160  nc_close(ncid);
161  return AVERROR(ENOMEM);
162  }
163 
164  for (i = 0; i < n_dims; i++) { /* go through all dimensions of file */
165  nc_inq_dim(ncid, i, (char *)&dim_name, &dim_length[i]); /* get dimensions */
166  if (!strncmp("M", (const char *)&dim_name, 1)) /* get ID of dimension "M" */
167  m_dim_id = i;
168  if (!strncmp("N", (const char *)&dim_name, 1)) /* get ID of dimension "N" */
169  n_dim_id = i;
170  }
171 
172  if ((m_dim_id == -1) || (n_dim_id == -1)) { /* dimension "M" or "N" couldn't be found */
173  av_log(ctx, AV_LOG_ERROR, "Can't find required dimensions in SOFA file.\n");
174  av_freep(&dim_length);
175  nc_close(ncid);
176  return AVERROR(EINVAL);
177  }
178 
179  n_samples = dim_length[n_dim_id]; /* get length of one IR */
180  m_dim = dim_length[m_dim_id]; /* get number of measurements */
181 
182  av_freep(&dim_length);
183 
184  /* -- check file type -- */
185  /* get length of attritube "Conventions" */
186  status = nc_inq_attlen(ncid, NC_GLOBAL, "Conventions", &att_len);
187  if (status != NC_NOERR) {
188  av_log(ctx, AV_LOG_ERROR, "Can't get length of attribute \"Conventions\".\n");
189  nc_close(ncid);
190  return AVERROR_INVALIDDATA;
191  }
192 
193  /* check whether file is SOFA file */
194  text = av_malloc(att_len + 1);
195  if (!text) {
196  nc_close(ncid);
197  return AVERROR(ENOMEM);
198  }
199 
200  nc_get_att_text(ncid, NC_GLOBAL, "Conventions", text);
201  *(text + att_len) = 0;
202  if (strncmp("SOFA", text, 4)) {
203  av_log(ctx, AV_LOG_ERROR, "Not a SOFA file!\n");
204  av_freep(&text);
205  nc_close(ncid);
206  return AVERROR(EINVAL);
207  }
208  av_freep(&text);
209 
210  status = nc_inq_attlen(ncid, NC_GLOBAL, "License", &att_len);
211  if (status == NC_NOERR) {
212  text = av_malloc(att_len + 1);
213  if (text) {
214  nc_get_att_text(ncid, NC_GLOBAL, "License", text);
215  *(text + att_len) = 0;
216  av_log(ctx, AV_LOG_INFO, "SOFA file License: %s\n", text);
217  av_freep(&text);
218  }
219  }
220 
221  status = nc_inq_attlen(ncid, NC_GLOBAL, "SourceDescription", &att_len);
222  if (status == NC_NOERR) {
223  text = av_malloc(att_len + 1);
224  if (text) {
225  nc_get_att_text(ncid, NC_GLOBAL, "SourceDescription", text);
226  *(text + att_len) = 0;
227  av_log(ctx, AV_LOG_INFO, "SOFA file SourceDescription: %s\n", text);
228  av_freep(&text);
229  }
230  }
231 
232  status = nc_inq_attlen(ncid, NC_GLOBAL, "Comment", &att_len);
233  if (status == NC_NOERR) {
234  text = av_malloc(att_len + 1);
235  if (text) {
236  nc_get_att_text(ncid, NC_GLOBAL, "Comment", text);
237  *(text + att_len) = 0;
238  av_log(ctx, AV_LOG_INFO, "SOFA file Comment: %s\n", text);
239  av_freep(&text);
240  }
241  }
242 
243  status = nc_inq_attlen(ncid, NC_GLOBAL, "SOFAConventions", &att_len);
244  if (status != NC_NOERR) {
245  av_log(ctx, AV_LOG_ERROR, "Can't get length of attribute \"SOFAConventions\".\n");
246  nc_close(ncid);
247  return AVERROR_INVALIDDATA;
248  }
249 
250  sofa_conventions = av_malloc(att_len + 1);
251  if (!sofa_conventions) {
252  nc_close(ncid);
253  return AVERROR(ENOMEM);
254  }
255 
256  nc_get_att_text(ncid, NC_GLOBAL, "SOFAConventions", sofa_conventions);
257  *(sofa_conventions + att_len) = 0;
258  if (strncmp("SimpleFreeFieldHRIR", sofa_conventions, att_len)) {
259  av_log(ctx, AV_LOG_ERROR, "Not a SimpleFreeFieldHRIR file!\n");
260  av_freep(&sofa_conventions);
261  nc_close(ncid);
262  return AVERROR(EINVAL);
263  }
264  av_freep(&sofa_conventions);
265 
266  /* -- get sampling rate of HRTFs -- */
267  /* read ID, then value */
268  status = nc_inq_varid(ncid, "Data.SamplingRate", &samplingrate_id);
269  status += nc_get_var_uint(ncid, samplingrate_id, &sample_rate);
270  if (status != NC_NOERR) {
271  av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.SamplingRate.\n");
272  nc_close(ncid);
273  return AVERROR(EINVAL);
274  }
275  *samplingrate = sample_rate; /* remember sampling rate */
276 
277  /* -- allocate memory for one value for each measurement position: -- */
278  sp_a = s->sofa.sp_a = av_malloc_array(m_dim, sizeof(float));
279  sp_e = s->sofa.sp_e = av_malloc_array(m_dim, sizeof(float));
280  sp_r = s->sofa.sp_r = av_malloc_array(m_dim, sizeof(float));
281  /* delay and IR values required for each ear and measurement position: */
282  data_delay = s->sofa.data_delay = av_calloc(m_dim, 2 * sizeof(int));
283  data_ir = s->sofa.data_ir = av_calloc(m_dim * FFALIGN(n_samples, 16), sizeof(float) * 2);
284 
285  if (!data_delay || !sp_a || !sp_e || !sp_r || !data_ir) {
286  /* if memory could not be allocated */
287  close_sofa(&s->sofa);
288  return AVERROR(ENOMEM);
289  }
290 
291  /* get impulse responses (HRTFs): */
292  /* get corresponding ID */
293  status = nc_inq_varid(ncid, "Data.IR", &data_ir_id);
294  status += nc_get_var_float(ncid, data_ir_id, data_ir); /* read and store IRs */
295  if (status != NC_NOERR) {
296  av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.IR!\n");
297  ret = AVERROR(EINVAL);
298  goto error;
299  }
300 
301  /* get source positions of the HRTFs in the SOFA file: */
302  status = nc_inq_varid(ncid, "SourcePosition", &sp_id); /* get corresponding ID */
303  status += nc_get_vara_float(ncid, sp_id, (size_t[2]){ 0, 0 } ,
304  (size_t[2]){ m_dim, 1}, sp_a); /* read & store azimuth angles */
305  status += nc_get_vara_float(ncid, sp_id, (size_t[2]){ 0, 1 } ,
306  (size_t[2]){ m_dim, 1}, sp_e); /* read & store elevation angles */
307  status += nc_get_vara_float(ncid, sp_id, (size_t[2]){ 0, 2 } ,
308  (size_t[2]){ m_dim, 1}, sp_r); /* read & store radii */
309  if (status != NC_NOERR) { /* if any source position variable coudn't be read */
310  av_log(ctx, AV_LOG_ERROR, "Couldn't read SourcePosition.\n");
311  ret = AVERROR(EINVAL);
312  goto error;
313  }
314 
315  /* read Data.Delay, check for errors and fit it to data_delay */
316  status = nc_inq_varid(ncid, "Data.Delay", &data_delay_id);
317  status += nc_inq_vardimid(ncid, data_delay_id, &data_delay_dim_id[0]);
318  status += nc_inq_dimname(ncid, data_delay_dim_id[0], data_delay_dim_name);
319  if (status != NC_NOERR) {
320  av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.Delay.\n");
321  ret = AVERROR(EINVAL);
322  goto error;
323  }
324 
325  /* Data.Delay dimension check */
326  /* dimension of Data.Delay is [I R]: */
327  if (!strncmp(data_delay_dim_name, "I", 2)) {
328  /* check 2 characters to assure string is 0-terminated after "I" */
329  int delay[2]; /* delays get from SOFA file: */
330  int *data_delay_r;
331 
332  av_log(ctx, AV_LOG_DEBUG, "Data.Delay has dimension [I R]\n");
333  status = nc_get_var_int(ncid, data_delay_id, &delay[0]);
334  if (status != NC_NOERR) {
335  av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.Delay\n");
336  ret = AVERROR(EINVAL);
337  goto error;
338  }
339  data_delay_r = data_delay + m_dim;
340  for (i = 0; i < m_dim; i++) { /* extend given dimension [I R] to [M R] */
341  /* assign constant delay value for all measurements to data_delay fields */
342  data_delay[i] = delay[0];
343  data_delay_r[i] = delay[1];
344  }
345  /* dimension of Data.Delay is [M R] */
346  } else if (!strncmp(data_delay_dim_name, "M", 2)) {
347  av_log(ctx, AV_LOG_ERROR, "Data.Delay in dimension [M R]\n");
348  /* get delays from SOFA file: */
349  status = nc_get_var_int(ncid, data_delay_id, data_delay);
350  if (status != NC_NOERR) {
351  av_log(ctx, AV_LOG_ERROR, "Couldn't read Data.Delay\n");
352  ret = AVERROR(EINVAL);
353  goto error;
354  }
355  } else { /* dimension of Data.Delay is neither [I R] nor [M R] */
356  av_log(ctx, AV_LOG_ERROR, "Data.Delay does not have the required dimensions [I R] or [M R].\n");
357  ret = AVERROR(EINVAL);
358  goto error;
359  }
360 
361  /* save information in SOFA struct: */
362  s->sofa.m_dim = m_dim; /* no. measurement positions */
363  s->sofa.n_samples = n_samples; /* length on one IR */
364  s->sofa.ncid = ncid; /* netCDF ID of SOFA file */
365  nc_close(ncid); /* close SOFA file */
366 
367  av_log(ctx, AV_LOG_DEBUG, "m_dim: %d n_samples %d\n", m_dim, n_samples);
368 
369  return 0;
370 
371 error:
372  close_sofa(&s->sofa);
373  return ret;
374 }
375 
376 static int parse_channel_name(char **arg, int *rchannel)
377 {
378  char buf[8];
379  int len, i, channel_id = 0;
380  int64_t layout, layout0;
381 
382  /* try to parse a channel name, e.g. "FL" */
383  if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
384  layout0 = layout = av_get_channel_layout(buf);
385  /* channel_id <- first set bit in layout */
386  for (i = 32; i > 0; i >>= 1) {
387  if (layout >= (int64_t)1 << i) {
388  channel_id += i;
389  layout >>= i;
390  }
391  }
392  /* reject layouts that are not a single channel */
393  if (channel_id >= 64 || layout0 != (int64_t)1 << channel_id)
394  return AVERROR(EINVAL);
395  *rchannel = channel_id;
396  *arg += len;
397  return 0;
398  }
399  return AVERROR(EINVAL);
400 }
401 
402 static void parse_speaker_pos(AVFilterContext *ctx, int64_t in_channel_layout)
403 {
404  SOFAlizerContext *s = ctx->priv;
405  char *arg, *tokenizer, *p, *args = av_strdup(s->speakers_pos);
406 
407  if (!args)
408  return;
409  p = args;
410 
411  while ((arg = av_strtok(p, "|", &tokenizer))) {
412  float azim, elev;
413  int out_ch_id;
414 
415  p = NULL;
416  if (parse_channel_name(&arg, &out_ch_id))
417  continue;
418  if (sscanf(arg, "%f %f", &azim, &elev) == 2) {
419  s->vspkrpos[out_ch_id].set = 1;
420  s->vspkrpos[out_ch_id].azim = azim;
421  s->vspkrpos[out_ch_id].elev = elev;
422  } else if (sscanf(arg, "%f", &azim) == 1) {
423  s->vspkrpos[out_ch_id].set = 1;
424  s->vspkrpos[out_ch_id].azim = azim;
425  s->vspkrpos[out_ch_id].elev = 0;
426  }
427  }
428 
429  av_free(args);
430 }
431 
433  float *speaker_azim, float *speaker_elev)
434 {
435  struct SOFAlizerContext *s = ctx->priv;
436  uint64_t channels_layout = ctx->inputs[0]->channel_layout;
437  float azim[16] = { 0 };
438  float elev[16] = { 0 };
439  int m, ch, n_conv = ctx->inputs[0]->channels; /* get no. input channels */
440 
441  if (n_conv > 16)
442  return AVERROR(EINVAL);
443 
444  s->lfe_channel = -1;
445 
446  if (s->speakers_pos)
447  parse_speaker_pos(ctx, channels_layout);
448 
449  /* set speaker positions according to input channel configuration: */
450  for (m = 0, ch = 0; ch < n_conv && m < 64; m++) {
451  uint64_t mask = channels_layout & (1 << m);
452 
453  switch (mask) {
454  case AV_CH_FRONT_LEFT: azim[ch] = 30; break;
455  case AV_CH_FRONT_RIGHT: azim[ch] = 330; break;
456  case AV_CH_FRONT_CENTER: azim[ch] = 0; break;
457  case AV_CH_LOW_FREQUENCY:
458  case AV_CH_LOW_FREQUENCY_2: s->lfe_channel = ch; break;
459  case AV_CH_BACK_LEFT: azim[ch] = 150; break;
460  case AV_CH_BACK_RIGHT: azim[ch] = 210; break;
461  case AV_CH_BACK_CENTER: azim[ch] = 180; break;
462  case AV_CH_SIDE_LEFT: azim[ch] = 90; break;
463  case AV_CH_SIDE_RIGHT: azim[ch] = 270; break;
464  case AV_CH_FRONT_LEFT_OF_CENTER: azim[ch] = 15; break;
465  case AV_CH_FRONT_RIGHT_OF_CENTER: azim[ch] = 345; break;
466  case AV_CH_TOP_CENTER: azim[ch] = 0;
467  elev[ch] = 90; break;
468  case AV_CH_TOP_FRONT_LEFT: azim[ch] = 30;
469  elev[ch] = 45; break;
470  case AV_CH_TOP_FRONT_CENTER: azim[ch] = 0;
471  elev[ch] = 45; break;
472  case AV_CH_TOP_FRONT_RIGHT: azim[ch] = 330;
473  elev[ch] = 45; break;
474  case AV_CH_TOP_BACK_LEFT: azim[ch] = 150;
475  elev[ch] = 45; break;
476  case AV_CH_TOP_BACK_RIGHT: azim[ch] = 210;
477  elev[ch] = 45; break;
478  case AV_CH_TOP_BACK_CENTER: azim[ch] = 180;
479  elev[ch] = 45; break;
480  case AV_CH_WIDE_LEFT: azim[ch] = 90; break;
481  case AV_CH_WIDE_RIGHT: azim[ch] = 270; break;
482  case AV_CH_SURROUND_DIRECT_LEFT: azim[ch] = 90; break;
483  case AV_CH_SURROUND_DIRECT_RIGHT: azim[ch] = 270; break;
484  case AV_CH_STEREO_LEFT: azim[ch] = 90; break;
485  case AV_CH_STEREO_RIGHT: azim[ch] = 270; break;
486  case 0: break;
487  default:
488  return AVERROR(EINVAL);
489  }
490 
491  if (s->vspkrpos[m].set) {
492  azim[ch] = s->vspkrpos[m].azim;
493  elev[ch] = s->vspkrpos[m].elev;
494  }
495 
496  if (mask)
497  ch++;
498  }
499 
500  memcpy(speaker_azim, azim, n_conv * sizeof(float));
501  memcpy(speaker_elev, elev, n_conv * sizeof(float));
502 
503  return 0;
504 
505 }
506 
507 static int max_delay(struct NCSofa *sofa)
508 {
509  int i, max = 0;
510 
511  for (i = 0; i < sofa->m_dim * 2; i++) {
512  /* search maximum delay in given SOFA file */
513  max = FFMAX(max, sofa->data_delay[i]);
514  }
515 
516  return max;
517 }
518 
519 static int find_m(SOFAlizerContext *s, int azim, int elev, float radius)
520 {
521  /* get source positions and M of currently selected SOFA file */
522  float *sp_a = s->sofa.sp_a; /* azimuth angle */
523  float *sp_e = s->sofa.sp_e; /* elevation angle */
524  float *sp_r = s->sofa.sp_r; /* radius */
525  int m_dim = s->sofa.m_dim; /* no. measurements */
526  int best_id = 0; /* index m currently closest to desired source pos. */
527  float delta = 1000; /* offset between desired and currently best pos. */
528  float current;
529  int i;
530 
531  for (i = 0; i < m_dim; i++) {
532  /* search through all measurements in currently selected SOFA file */
533  /* distance of current to desired source position: */
534  current = fabs(sp_a[i] - azim) +
535  fabs(sp_e[i] - elev) +
536  fabs(sp_r[i] - radius);
537  if (current <= delta) {
538  /* if current distance is smaller than smallest distance so far */
539  delta = current;
540  best_id = i; /* remember index */
541  }
542  }
543 
544  return best_id;
545 }
546 
548 {
549  struct SOFAlizerContext *s = ctx->priv;
550  float compensate;
551  float energy = 0;
552  float *ir;
553  int m;
554 
555  if (s->sofa.ncid) {
556  /* find IR at front center position in the SOFA file (IR closest to 0°,0°,1m) */
557  struct NCSofa *sofa = &s->sofa;
558  m = find_m(s, 0, 0, 1);
559  /* get energy of that IR and compensate volume */
560  ir = sofa->data_ir + 2 * m * sofa->n_samples;
561  if (sofa->n_samples & 31) {
562  energy = avpriv_scalarproduct_float_c(ir, ir, sofa->n_samples);
563  } else {
564  energy = s->fdsp->scalarproduct_float(ir, ir, sofa->n_samples);
565  }
566  compensate = 256 / (sofa->n_samples * sqrt(energy));
567  av_log(ctx, AV_LOG_DEBUG, "Compensate-factor: %f\n", compensate);
568  ir = sofa->data_ir;
569  /* apply volume compensation to IRs */
570  if (sofa->n_samples & 31) {
571  int i;
572  for (i = 0; i < sofa->n_samples * sofa->m_dim * 2; i++) {
573  ir[i] = ir[i] * compensate;
574  }
575  } else {
576  s->fdsp->vector_fmul_scalar(ir, ir, compensate, sofa->n_samples * sofa->m_dim * 2);
577  emms_c();
578  }
579  }
580 
581  return 0;
582 }
583 
584 typedef struct ThreadData {
586  int *write;
587  int **delay;
588  float **ir;
590  float **ringbuffer;
591  float **temp_src;
593 } ThreadData;
594 
595 static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
596 {
597  SOFAlizerContext *s = ctx->priv;
598  ThreadData *td = arg;
599  AVFrame *in = td->in, *out = td->out;
600  int offset = jobnr;
601  int *write = &td->write[jobnr];
602  const int *const delay = td->delay[jobnr];
603  const float *const ir = td->ir[jobnr];
604  int *n_clippings = &td->n_clippings[jobnr];
605  float *ringbuffer = td->ringbuffer[jobnr];
606  float *temp_src = td->temp_src[jobnr];
607  const int n_samples = s->sofa.n_samples; /* length of one IR */
608  const float *src = (const float *)in->data[0]; /* get pointer to audio input buffer */
609  float *dst = (float *)out->data[0]; /* get pointer to audio output buffer */
610  const int in_channels = s->n_conv; /* number of input channels */
611  /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
612  const int buffer_length = s->buffer_length;
613  /* -1 for AND instead of MODULO (applied to powers of 2): */
614  const uint32_t modulo = (uint32_t)buffer_length - 1;
615  float *buffer[16]; /* holds ringbuffer for each input channel */
616  int wr = *write;
617  int read;
618  int i, l;
619 
620  dst += offset;
621  for (l = 0; l < in_channels; l++) {
622  /* get starting address of ringbuffer for each input channel */
623  buffer[l] = ringbuffer + l * buffer_length;
624  }
625 
626  for (i = 0; i < in->nb_samples; i++) {
627  const float *temp_ir = ir; /* using same set of IRs for each sample */
628 
629  *dst = 0;
630  for (l = 0; l < in_channels; l++) {
631  /* write current input sample to ringbuffer (for each channel) */
632  *(buffer[l] + wr) = src[l];
633  }
634 
635  /* loop goes through all channels to be convolved */
636  for (l = 0; l < in_channels; l++) {
637  const float *const bptr = buffer[l];
638 
639  if (l == s->lfe_channel) {
640  /* LFE is an input channel but requires no convolution */
641  /* apply gain to LFE signal and add to output buffer */
642  *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
643  temp_ir += FFALIGN(n_samples, 16);
644  continue;
645  }
646 
647  /* current read position in ringbuffer: input sample write position
648  * - delay for l-th ch. + diff. betw. IR length and buffer length
649  * (mod buffer length) */
650  read = (wr - *(delay + l) - (n_samples - 1) + buffer_length) & modulo;
651 
652  if (read + n_samples < buffer_length) {
653  memcpy(temp_src, bptr + read, n_samples * sizeof(*temp_src));
654  } else {
655  int len = FFMIN(n_samples - (read % n_samples), buffer_length - read);
656 
657  memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
658  memcpy(temp_src + len, bptr, (n_samples - len) * sizeof(*temp_src));
659  }
660 
661  /* multiply signal and IR, and add up the results */
662  dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, n_samples);
663  temp_ir += FFALIGN(n_samples, 16);
664  }
665 
666  /* clippings counter */
667  if (fabs(*dst) > 1)
668  *n_clippings += 1;
669 
670  /* move output buffer pointer by +2 to get to next sample of processed channel: */
671  dst += 2;
672  src += in_channels;
673  wr = (wr + 1) & modulo; /* update ringbuffer write position */
674  }
675 
676  *write = wr; /* remember write position in ringbuffer for next call */
677 
678  return 0;
679 }
680 
681 static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
682 {
683  SOFAlizerContext *s = ctx->priv;
684  ThreadData *td = arg;
685  AVFrame *in = td->in, *out = td->out;
686  int offset = jobnr;
687  int *write = &td->write[jobnr];
688  FFTComplex *hrtf = s->data_hrtf[jobnr]; /* get pointers to current HRTF data */
689  int *n_clippings = &td->n_clippings[jobnr];
690  float *ringbuffer = td->ringbuffer[jobnr];
691  const int n_samples = s->sofa.n_samples; /* length of one IR */
692  const float *src = (const float *)in->data[0]; /* get pointer to audio input buffer */
693  float *dst = (float *)out->data[0]; /* get pointer to audio output buffer */
694  const int in_channels = s->n_conv; /* number of input channels */
695  /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
696  const int buffer_length = s->buffer_length;
697  /* -1 for AND instead of MODULO (applied to powers of 2): */
698  const uint32_t modulo = (uint32_t)buffer_length - 1;
699  FFTComplex *fft_in = s->temp_fft[jobnr]; /* temporary array for FFT input/output data */
700  FFTContext *ifft = s->ifft[jobnr];
701  FFTContext *fft = s->fft[jobnr];
702  const int n_conv = s->n_conv;
703  const int n_fft = s->n_fft;
704  int wr = *write;
705  int n_read;
706  int i, j;
707 
708  dst += offset;
709 
710  /* find minimum between number of samples and output buffer length:
711  * (important, if one IR is longer than the output buffer) */
712  n_read = FFMIN(s->sofa.n_samples, in->nb_samples);
713  for (j = 0; j < n_read; j++) {
714  /* initialize output buf with saved signal from overflow buf */
715  dst[2 * j] = ringbuffer[wr];
716  ringbuffer[wr] = 0.0; /* re-set read samples to zero */
717  /* update ringbuffer read/write position */
718  wr = (wr + 1) & modulo;
719  }
720 
721  /* initialize rest of output buffer with 0 */
722  for (j = n_read; j < in->nb_samples; j++) {
723  dst[2 * j] = 0;
724  }
725 
726  for (i = 0; i < n_conv; i++) {
727  if (i == s->lfe_channel) { /* LFE */
728  for (j = 0; j < in->nb_samples; j++) {
729  /* apply gain to LFE signal and add to output buffer */
730  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
731  }
732  continue;
733  }
734 
735  /* outer loop: go through all input channels to be convolved */
736  offset = i * n_fft; /* no. samples already processed */
737 
738  /* fill FFT input with 0 (we want to zero-pad) */
739  memset(fft_in, 0, sizeof(FFTComplex) * n_fft);
740 
741  for (j = 0; j < in->nb_samples; j++) {
742  /* prepare input for FFT */
743  /* write all samples of current input channel to FFT input array */
744  fft_in[j].re = src[j * in_channels + i];
745  }
746 
747  /* transform input signal of current channel to frequency domain */
748  av_fft_permute(fft, fft_in);
749  av_fft_calc(fft, fft_in);
750  for (j = 0; j < n_fft; j++) {
751  const float re = fft_in[j].re;
752  const float im = fft_in[j].im;
753 
754  /* complex multiplication of input signal and HRTFs */
755  /* output channel (real): */
756  fft_in[j].re = re * (hrtf + offset + j)->re - im * (hrtf + offset + j)->im;
757  /* output channel (imag): */
758  fft_in[j].im = re * (hrtf + offset + j)->im + im * (hrtf + offset + j)->re;
759  }
760 
761  /* transform output signal of current channel back to time domain */
762  av_fft_permute(ifft, fft_in);
763  av_fft_calc(ifft, fft_in);
764 
765  for (j = 0; j < in->nb_samples; j++) {
766  /* write output signal of current channel to output buffer */
767  dst[2 * j] += fft_in[j].re / (float)n_fft;
768  }
769 
770  for (j = 0; j < n_samples - 1; j++) { /* overflow length is IR length - 1 */
771  /* write the rest of output signal to overflow buffer */
772  int write_pos = (wr + j) & modulo;
773 
774  *(ringbuffer + write_pos) += fft_in[in->nb_samples + j].re / (float)n_fft;
775  }
776  }
777 
778  /* go through all samples of current output buffer: count clippings */
779  for (i = 0; i < out->nb_samples; i++) {
780  /* clippings counter */
781  if (fabs(*dst) > 1) { /* if current output sample > 1 */
782  *n_clippings = *n_clippings + 1;
783  }
784 
785  /* move output buffer pointer by +2 to get to next sample of processed channel: */
786  dst += 2;
787  }
788 
789  /* remember read/write position in ringbuffer for next call */
790  *write = wr;
791 
792  return 0;
793 }
794 
795 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
796 {
797  AVFilterContext *ctx = inlink->dst;
798  SOFAlizerContext *s = ctx->priv;
799  AVFilterLink *outlink = ctx->outputs[0];
800  int n_clippings[2] = { 0 };
801  ThreadData td;
802  AVFrame *out;
803 
804  out = ff_get_audio_buffer(outlink, in->nb_samples);
805  if (!out) {
806  av_frame_free(&in);
807  return AVERROR(ENOMEM);
808  }
809  av_frame_copy_props(out, in);
810 
811  td.in = in; td.out = out; td.write = s->write;
812  td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
813  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
814  td.temp_fft = s->temp_fft;
815 
816  if (s->type == TIME_DOMAIN) {
817  ctx->internal->execute(ctx, sofalizer_convolute, &td, NULL, 2);
818  } else {
819  ctx->internal->execute(ctx, sofalizer_fast_convolute, &td, NULL, 2);
820  }
821  emms_c();
822 
823  /* display error message if clipping occurred */
824  if (n_clippings[0] + n_clippings[1] > 0) {
825  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
826  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
827  }
828 
829  av_frame_free(&in);
830  return ff_filter_frame(outlink, out);
831 }
832 
834 {
835  struct SOFAlizerContext *s = ctx->priv;
838  int ret, sample_rates[] = { 48000, -1 };
839 
840  ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
841  if (ret)
842  return ret;
843  ret = ff_set_common_formats(ctx, formats);
844  if (ret)
845  return ret;
846 
847  layouts = ff_all_channel_layouts();
848  if (!layouts)
849  return AVERROR(ENOMEM);
850 
851  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
852  if (ret)
853  return ret;
854 
855  layouts = NULL;
857  if (ret)
858  return ret;
859 
860  ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
861  if (ret)
862  return ret;
863 
864  sample_rates[0] = s->sample_rate;
865  formats = ff_make_format_list(sample_rates);
866  if (!formats)
867  return AVERROR(ENOMEM);
868  return ff_set_common_samplerates(ctx, formats);
869 }
870 
871 static int load_data(AVFilterContext *ctx, int azim, int elev, float radius)
872 {
873  struct SOFAlizerContext *s = ctx->priv;
874  const int n_samples = s->sofa.n_samples;
875  int n_conv = s->n_conv; /* no. channels to convolve */
876  int n_fft = s->n_fft;
877  int delay_l[16]; /* broadband delay for each IR */
878  int delay_r[16];
879  int nb_input_channels = ctx->inputs[0]->channels; /* no. input channels */
880  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10); /* gain - 3dB/channel */
881  FFTComplex *data_hrtf_l = NULL;
882  FFTComplex *data_hrtf_r = NULL;
883  FFTComplex *fft_in_l = NULL;
884  FFTComplex *fft_in_r = NULL;
885  float *data_ir_l = NULL;
886  float *data_ir_r = NULL;
887  int offset = 0; /* used for faster pointer arithmetics in for-loop */
888  int m[16]; /* measurement index m of IR closest to required source positions */
889  int i, j, azim_orig = azim, elev_orig = elev;
890 
891  if (!s->sofa.ncid) { /* if an invalid SOFA file has been selected */
892  av_log(ctx, AV_LOG_ERROR, "Selected SOFA file is invalid. Please select valid SOFA file.\n");
893  return AVERROR_INVALIDDATA;
894  }
895 
896  if (s->type == TIME_DOMAIN) {
897  s->temp_src[0] = av_calloc(FFALIGN(n_samples, 16), sizeof(float));
898  s->temp_src[1] = av_calloc(FFALIGN(n_samples, 16), sizeof(float));
899 
900  /* get temporary IR for L and R channel */
901  data_ir_l = av_calloc(n_conv * FFALIGN(n_samples, 16), sizeof(*data_ir_l));
902  data_ir_r = av_calloc(n_conv * FFALIGN(n_samples, 16), sizeof(*data_ir_r));
903  if (!data_ir_r || !data_ir_l || !s->temp_src[0] || !s->temp_src[1]) {
904  av_free(data_ir_l);
905  av_free(data_ir_r);
906  return AVERROR(ENOMEM);
907  }
908  } else {
909  /* get temporary HRTF memory for L and R channel */
910  data_hrtf_l = av_malloc_array(n_fft, sizeof(*data_hrtf_l) * n_conv);
911  data_hrtf_r = av_malloc_array(n_fft, sizeof(*data_hrtf_r) * n_conv);
912  if (!data_hrtf_r || !data_hrtf_l) {
913  av_free(data_hrtf_l);
914  av_free(data_hrtf_r);
915  return AVERROR(ENOMEM);
916  }
917  }
918 
919  for (i = 0; i < s->n_conv; i++) {
920  /* load and store IRs and corresponding delays */
921  azim = (int)(s->speaker_azim[i] + azim_orig) % 360;
922  elev = (int)(s->speaker_elev[i] + elev_orig) % 90;
923  /* get id of IR closest to desired position */
924  m[i] = find_m(s, azim, elev, radius);
925 
926  /* load the delays associated with the current IRs */
927  delay_l[i] = *(s->sofa.data_delay + 2 * m[i]);
928  delay_r[i] = *(s->sofa.data_delay + 2 * m[i] + 1);
929 
930  if (s->type == TIME_DOMAIN) {
931  offset = i * FFALIGN(n_samples, 16); /* no. samples already written */
932  for (j = 0; j < n_samples; j++) {
933  /* load reversed IRs of the specified source position
934  * sample-by-sample for left and right ear; and apply gain */
935  *(data_ir_l + offset + j) = /* left channel */
936  *(s->sofa.data_ir + 2 * m[i] * n_samples + n_samples - 1 - j) * gain_lin;
937  *(data_ir_r + offset + j) = /* right channel */
938  *(s->sofa.data_ir + 2 * m[i] * n_samples + n_samples - 1 - j + n_samples) * gain_lin;
939  }
940  } else {
941  fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
942  fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
943  if (!fft_in_l || !fft_in_r) {
944  av_free(data_hrtf_l);
945  av_free(data_hrtf_r);
946  av_free(fft_in_l);
947  av_free(fft_in_r);
948  return AVERROR(ENOMEM);
949  }
950 
951  offset = i * n_fft; /* no. samples already written */
952  for (j = 0; j < n_samples; j++) {
953  /* load non-reversed IRs of the specified source position
954  * sample-by-sample and apply gain,
955  * L channel is loaded to real part, R channel to imag part,
956  * IRs ared shifted by L and R delay */
957  fft_in_l[delay_l[i] + j].re = /* left channel */
958  *(s->sofa.data_ir + 2 * m[i] * n_samples + j) * gain_lin;
959  fft_in_r[delay_r[i] + j].re = /* right channel */
960  *(s->sofa.data_ir + (2 * m[i] + 1) * n_samples + j) * gain_lin;
961  }
962 
963  /* actually transform to frequency domain (IRs -> HRTFs) */
964  av_fft_permute(s->fft[0], fft_in_l);
965  av_fft_calc(s->fft[0], fft_in_l);
966  memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
967  av_fft_permute(s->fft[0], fft_in_r);
968  av_fft_calc(s->fft[0], fft_in_r);
969  memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
970  }
971 
972  av_log(ctx, AV_LOG_DEBUG, "Index: %d, Azimuth: %f, Elevation: %f, Radius: %f of SOFA file.\n",
973  m[i], *(s->sofa.sp_a + m[i]), *(s->sofa.sp_e + m[i]), *(s->sofa.sp_r + m[i]));
974  }
975 
976  if (s->type == TIME_DOMAIN) {
977  /* copy IRs and delays to allocated memory in the SOFAlizerContext struct: */
978  memcpy(s->data_ir[0], data_ir_l, sizeof(float) * n_conv * FFALIGN(n_samples, 16));
979  memcpy(s->data_ir[1], data_ir_r, sizeof(float) * n_conv * FFALIGN(n_samples, 16));
980 
981  av_freep(&data_ir_l); /* free temporary IR memory */
982  av_freep(&data_ir_r);
983  } else {
984  s->data_hrtf[0] = av_malloc_array(n_fft * s->n_conv, sizeof(FFTComplex));
985  s->data_hrtf[1] = av_malloc_array(n_fft * s->n_conv, sizeof(FFTComplex));
986  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
987  av_freep(&data_hrtf_l);
988  av_freep(&data_hrtf_r);
989  av_freep(&fft_in_l);
990  av_freep(&fft_in_r);
991  return AVERROR(ENOMEM); /* memory allocation failed */
992  }
993 
994  memcpy(s->data_hrtf[0], data_hrtf_l, /* copy HRTF data to */
995  sizeof(FFTComplex) * n_conv * n_fft); /* filter struct */
996  memcpy(s->data_hrtf[1], data_hrtf_r,
997  sizeof(FFTComplex) * n_conv * n_fft);
998 
999  av_freep(&data_hrtf_l); /* free temporary HRTF memory */
1000  av_freep(&data_hrtf_r);
1001 
1002  av_freep(&fft_in_l); /* free temporary FFT memory */
1003  av_freep(&fft_in_r);
1004  }
1005 
1006  memcpy(s->delay[0], &delay_l[0], sizeof(int) * s->n_conv);
1007  memcpy(s->delay[1], &delay_r[0], sizeof(int) * s->n_conv);
1008 
1009  return 0;
1010 }
1011 
1013 {
1014  SOFAlizerContext *s = ctx->priv;
1015  int ret;
1016 
1017  if (!s->filename) {
1018  av_log(ctx, AV_LOG_ERROR, "Valid SOFA filename must be set.\n");
1019  return AVERROR(EINVAL);
1020  }
1021 
1022  /* load SOFA file, */
1023  /* initialize file IDs to 0 before attempting to load SOFA files,
1024  * this assures that in case of error, only the memory of already
1025  * loaded files is free'd */
1026  s->sofa.ncid = 0;
1027  ret = load_sofa(ctx, s->filename, &s->sample_rate);
1028  if (ret) {
1029  /* file loading error */
1030  av_log(ctx, AV_LOG_ERROR, "Error while loading SOFA file: '%s'\n", s->filename);
1031  } else { /* no file loading error, resampling not required */
1032  av_log(ctx, AV_LOG_DEBUG, "File '%s' loaded.\n", s->filename);
1033  }
1034 
1035  if (ret) {
1036  av_log(ctx, AV_LOG_ERROR, "No valid SOFA file could be loaded. Please specify valid SOFA file.\n");
1037  return ret;
1038  }
1039 
1040  s->fdsp = avpriv_float_dsp_alloc(0);
1041  if (!s->fdsp)
1042  return AVERROR(ENOMEM);
1043 
1044  return 0;
1045 }
1046 
1047 static int config_input(AVFilterLink *inlink)
1048 {
1049  AVFilterContext *ctx = inlink->dst;
1050  SOFAlizerContext *s = ctx->priv;
1051  int nb_input_channels = inlink->channels; /* no. input channels */
1052  int n_max_ir = 0;
1053  int n_current;
1054  int n_max = 0;
1055  int ret;
1056 
1057  if (s->type == FREQUENCY_DOMAIN) {
1058  inlink->partial_buf_size =
1059  inlink->min_samples =
1060  inlink->max_samples = inlink->sample_rate;
1061  }
1062 
1063  /* gain -3 dB per channel, -6 dB to get LFE on a similar level */
1064  s->gain_lfe = expf((s->gain - 3 * inlink->channels - 6) / 20 * M_LN10);
1065 
1066  s->n_conv = nb_input_channels;
1067 
1068  /* get size of ringbuffer (longest IR plus max. delay) */
1069  /* then choose next power of 2 for performance optimization */
1070  n_current = s->sofa.n_samples + max_delay(&s->sofa);
1071  if (n_current > n_max) {
1072  /* length of longest IR plus max. delay (in all SOFA files) */
1073  n_max = n_current;
1074  /* length of longest IR (without delay, in all SOFA files) */
1075  n_max_ir = s->sofa.n_samples;
1076  }
1077  /* buffer length is longest IR plus max. delay -> next power of 2
1078  (32 - count leading zeros gives required exponent) */
1079  s->buffer_length = 1 << (32 - ff_clz(n_max));
1080  s->n_fft = 1 << (32 - ff_clz(n_max + inlink->sample_rate));
1081 
1082  if (s->type == FREQUENCY_DOMAIN) {
1083  av_fft_end(s->fft[0]);
1084  av_fft_end(s->fft[1]);
1085  s->fft[0] = av_fft_init(log2(s->n_fft), 0);
1086  s->fft[1] = av_fft_init(log2(s->n_fft), 0);
1087  av_fft_end(s->ifft[0]);
1088  av_fft_end(s->ifft[1]);
1089  s->ifft[0] = av_fft_init(log2(s->n_fft), 1);
1090  s->ifft[1] = av_fft_init(log2(s->n_fft), 1);
1091 
1092  if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
1093  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
1094  return AVERROR(ENOMEM);
1095  }
1096  }
1097 
1098  /* Allocate memory for the impulse responses, delays and the ringbuffers */
1099  /* size: (longest IR) * (number of channels to convolute) */
1100  s->data_ir[0] = av_calloc(FFALIGN(n_max_ir, 16), sizeof(float) * s->n_conv);
1101  s->data_ir[1] = av_calloc(FFALIGN(n_max_ir, 16), sizeof(float) * s->n_conv);
1102  /* length: number of channels to convolute */
1103  s->delay[0] = av_malloc_array(s->n_conv, sizeof(float));
1104  s->delay[1] = av_malloc_array(s->n_conv, sizeof(float));
1105  /* length: (buffer length) * (number of input channels),
1106  * OR: buffer length (if frequency domain processing)
1107  * calloc zero-initializes the buffer */
1108 
1109  if (s->type == TIME_DOMAIN) {
1110  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
1111  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
1112  } else {
1113  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
1114  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
1115  s->temp_fft[0] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
1116  s->temp_fft[1] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
1117  if (!s->temp_fft[0] || !s->temp_fft[1])
1118  return AVERROR(ENOMEM);
1119  }
1120 
1121  /* length: number of channels to convolute */
1122  s->speaker_azim = av_calloc(s->n_conv, sizeof(*s->speaker_azim));
1123  s->speaker_elev = av_calloc(s->n_conv, sizeof(*s->speaker_elev));
1124 
1125  /* memory allocation failed: */
1126  if (!s->data_ir[0] || !s->data_ir[1] || !s->delay[1] ||
1127  !s->delay[0] || !s->ringbuffer[0] || !s->ringbuffer[1] ||
1128  !s->speaker_azim || !s->speaker_elev)
1129  return AVERROR(ENOMEM);
1130 
1131  compensate_volume(ctx);
1132 
1133  /* get speaker positions */
1134  if ((ret = get_speaker_pos(ctx, s->speaker_azim, s->speaker_elev)) < 0) {
1135  av_log(ctx, AV_LOG_ERROR, "Couldn't get speaker positions. Input channel configuration not supported.\n");
1136  return ret;
1137  }
1138 
1139  /* load IRs to data_ir[0] and data_ir[1] for required directions */
1140  if ((ret = load_data(ctx, s->rotation, s->elevation, s->radius)) < 0)
1141  return ret;
1142 
1143  av_log(ctx, AV_LOG_DEBUG, "Samplerate: %d Channels to convolute: %d, Length of ringbuffer: %d x %d\n",
1144  inlink->sample_rate, s->n_conv, nb_input_channels, s->buffer_length);
1145 
1146  return 0;
1147 }
1148 
1150 {
1151  SOFAlizerContext *s = ctx->priv;
1152 
1153  if (s->sofa.ncid) {
1154  av_freep(&s->sofa.sp_a);
1155  av_freep(&s->sofa.sp_e);
1156  av_freep(&s->sofa.sp_r);
1157  av_freep(&s->sofa.data_delay);
1158  av_freep(&s->sofa.data_ir);
1159  }
1160  av_fft_end(s->ifft[0]);
1161  av_fft_end(s->ifft[1]);
1162  av_fft_end(s->fft[0]);
1163  av_fft_end(s->fft[1]);
1164  av_freep(&s->delay[0]);
1165  av_freep(&s->delay[1]);
1166  av_freep(&s->data_ir[0]);
1167  av_freep(&s->data_ir[1]);
1168  av_freep(&s->ringbuffer[0]);
1169  av_freep(&s->ringbuffer[1]);
1170  av_freep(&s->speaker_azim);
1171  av_freep(&s->speaker_elev);
1172  av_freep(&s->temp_src[0]);
1173  av_freep(&s->temp_src[1]);
1174  av_freep(&s->temp_fft[0]);
1175  av_freep(&s->temp_fft[1]);
1176  av_freep(&s->data_hrtf[0]);
1177  av_freep(&s->data_hrtf[1]);
1178  av_freep(&s->fdsp);
1179 }
1180 
1181 #define OFFSET(x) offsetof(SOFAlizerContext, x)
1182 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1183 
1184 static const AVOption sofalizer_options[] = {
1185  { "sofa", "sofa filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
1186  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
1187  { "rotation", "set rotation" , OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -360, 360, .flags = FLAGS },
1188  { "elevation", "set elevation", OFFSET(elevation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -90, 90, .flags = FLAGS },
1189  { "radius", "set radius", OFFSET(radius), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 3, .flags = FLAGS },
1190  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
1191  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
1192  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
1193  { "speakers", "set speaker custom positions", OFFSET(speakers_pos), AV_OPT_TYPE_STRING, {.str=0}, 0, 0, .flags = FLAGS },
1194  { NULL }
1195 };
1196 
1197 AVFILTER_DEFINE_CLASS(sofalizer);
1198 
1199 static const AVFilterPad inputs[] = {
1200  {
1201  .name = "default",
1202  .type = AVMEDIA_TYPE_AUDIO,
1203  .config_props = config_input,
1204  .filter_frame = filter_frame,
1205  },
1206  { NULL }
1207 };
1208 
1209 static const AVFilterPad outputs[] = {
1210  {
1211  .name = "default",
1212  .type = AVMEDIA_TYPE_AUDIO,
1213  },
1214  { NULL }
1215 };
1216 
1218  .name = "sofalizer",
1219  .description = NULL_IF_CONFIG_SMALL("SOFAlizer (Spatially Oriented Format for Acoustics)."),
1220  .priv_size = sizeof(SOFAlizerContext),
1221  .priv_class = &sofalizer_class,
1222  .init = init,
1223  .uninit = uninit,
1225  .inputs = inputs,
1226  .outputs = outputs,
1228 };
static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_sofalizer.c:681
#define NULL
Definition: coverity.c:32
FFTComplex * data_hrtf[2]
Definition: af_sofalizer.c:104
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
AVFrame * out
Definition: af_sofalizer.c:585
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
#define AV_CH_TOP_FRONT_RIGHT
AVOption.
Definition: opt.h:245
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
float re
Definition: fft.c:82
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Main libavfilter public API header.
int * n_clippings
Definition: af_sofalizer.c:589
AVFILTER_DEFINE_CLASS(sofalizer)
#define AV_CH_TOP_FRONT_LEFT
FFTContext * fft[2]
Definition: af_sofalizer.c:103
#define AV_CH_TOP_FRONT_CENTER
#define AV_CH_LOW_FREQUENCY_2
static enum AVSampleFormat formats[]
Definition: avresample.c:163
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Calculate the scalar product of two vectors of floats.
Definition: float_dsp.h:159
FFTSample re
Definition: avfft.h:38
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
#define AV_CH_SURROUND_DIRECT_RIGHT
#define AV_CH_LAYOUT_STEREO
AVFilter ff_af_sofalizer
#define log2(x)
Definition: libm.h:404
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:260
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:59
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:315
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
float avpriv_scalarproduct_float_c(const float *v1, const float *v2, int len)
Return the scalar product of two vectors.
Definition: float_dsp.c:108
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1189
AVFrame * in
Definition: af_sofalizer.c:585
#define AV_CH_WIDE_LEFT
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
float delta
AVOptions.
#define AV_CH_TOP_BACK_LEFT
float ** temp_src
Definition: af_sofalizer.c:591
#define AV_CH_WIDE_RIGHT
#define AV_CH_TOP_BACK_CENTER
#define AV_CH_LOW_FREQUENCY
#define FLAGS
#define AV_CH_BACK_LEFT
static int find_m(SOFAlizerContext *s, int azim, int elev, float radius)
Definition: af_sofalizer.c:519
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
float * data_ir
Definition: af_sofalizer.c:54
A filter pad used for either input or output.
Definition: internal.h:53
#define expf(x)
Definition: libm.h:283
AVFloatDSPContext * fdsp
Definition: af_sofalizer.c:106
static int load_data(AVFilterContext *ctx, int azim, int elev, float radius)
Definition: af_sofalizer.c:871
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define td
Definition: regdef.h:70
FFTComplex ** temp_fft
Definition: af_sofalizer.c:592
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:343
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:64
static const uint16_t mask[17]
Definition: lzw.c:38
static int get_speaker_pos(AVFilterContext *ctx, float *speaker_azim, float *speaker_elev)
Definition: af_sofalizer.c:432
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_sofalizer.c:795
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
static int query_formats(AVFilterContext *ctx)
Definition: af_sofalizer.c:833
static int close_sofa(struct NCSofa *sofa)
Definition: af_sofalizer.c:109
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
float * sp_e
Definition: af_sofalizer.c:51
void * priv
private data for use by the filter
Definition: avfilter.h:322
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
const char * arg
Definition: jacosubdec.c:66
float ** ringbuffer
Definition: af_sofalizer.c:590
float * data_ir[2]
Definition: af_sofalizer.c:89
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:337
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
static int max_delay(struct NCSofa *sofa)
Definition: af_sofalizer.c:507
#define FFMAX(a, b)
Definition: common.h:94
static const int sample_rates[]
Definition: dcaenc.h:32
int * data_delay
Definition: af_sofalizer.c:48
#define AV_CH_STEREO_RIGHT
See AV_CH_STEREO_LEFT.
#define AV_CH_TOP_CENTER
float * ringbuffer[2]
Definition: af_sofalizer.c:79
float * speaker_azim
Definition: af_sofalizer.c:70
Definition: fft.h:88
audio channel layout utility functions
#define FFMIN(a, b)
Definition: common.h:96
#define ff_clz
Definition: intmath.h:142
AVS_Value args
Definition: avisynth_c.h:699
AVFormatContext * ctx
Definition: movenc.c:48
#define TIME_DOMAIN
Definition: af_sofalizer.c:41
void(* vector_fmul_scalar)(float *dst, const float *src, float mul, int len)
Multiply a vector of floats by a scalar float.
Definition: float_dsp.h:69
#define AV_CH_FRONT_LEFT_OF_CENTER
#define AV_CH_FRONT_CENTER
static int load_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
Definition: af_sofalizer.c:122
int n_samples
Definition: af_sofalizer.c:46
static const AVFilterPad outputs[]
#define src
Definition: vp9dsp.c:530
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:401
#define AV_CH_FRONT_RIGHT_OF_CENTER
A list of supported channel layouts.
Definition: formats.h:85
static const AVOption sofalizer_options[]
sample_rate
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:267
FFT functions.
static int config_input(AVFilterLink *inlink)
float * sp_a
Definition: af_sofalizer.c:50
#define AV_CH_FRONT_LEFT
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
void * buf
Definition: avisynth_c.h:690
int ** delay
Definition: af_sofalizer.c:587
GLint GLenum type
Definition: opengl_enc.c:105
#define AV_CH_TOP_BACK_RIGHT
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
int m_dim
Definition: af_sofalizer.c:47
static int parse_channel_name(char **arg, int *rchannel)
Definition: af_sofalizer.c:376
int ncid
Definition: af_sofalizer.c:45
float im
Definition: fft.c:82
const char * name
Filter name.
Definition: avfilter.h:148
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:119
float * speaker_elev
Definition: af_sofalizer.c:71
static const AVFilterPad inputs[]
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:319
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:347
static int flags
Definition: cpu.c:47
#define AV_CH_BACK_CENTER
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
#define AV_CH_SIDE_RIGHT
static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_sofalizer.c:595
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
#define M_LN10
Definition: mathematics.h:43
FFTContext * ifft[2]
Definition: af_sofalizer.c:103
float * sp_r
Definition: af_sofalizer.c:52
static void fft(const int32_t in[2 *256], cplx32 out[256])
Definition: dcaenc.c:339
FFTSample im
Definition: avfft.h:38
#define FREQUENCY_DOMAIN
Definition: af_sofalizer.c:42
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
avfilter_execute_func * execute
Definition: internal.h:153
static av_cold int init(AVFilterContext *ctx)
#define av_free(p)
int len
static void parse_speaker_pos(AVFilterContext *ctx, int64_t in_channel_layout)
Definition: af_sofalizer.c:402
float * temp_src[2]
Definition: af_sofalizer.c:91
A list of supported formats for one end of a filter link.
Definition: formats.h:64
uint64_t layout
#define AV_CH_SURROUND_DIRECT_LEFT
An instance of a filter.
Definition: avfilter.h:307
#define AV_CH_FRONT_RIGHT
VirtualSpeaker vspkrpos[64]
Definition: af_sofalizer.c:101
FILE * out
Definition: movenc.c:54
static int compensate_volume(AVFilterContext *ctx)
Definition: af_sofalizer.c:547
float ** ir
Definition: af_sofalizer.c:588
#define av_freep(p)
#define OFFSET(x)
#define av_malloc_array(a, b)
#define AV_CH_SIDE_LEFT
internal API functions
FFTComplex * temp_fft[2]
Definition: af_sofalizer.c:92
static av_cold void uninit(AVFilterContext *ctx)
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:241
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:589
GLuint buffer
Definition: opengl_enc.c:102
#define AV_CH_BACK_RIGHT
#define AV_CH_STEREO_LEFT
Stereo downmix.