FFmpeg
af_dialoguenhance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public License
8  * as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
22 #include "libavutil/opt.h"
23 #include "libavutil/tx.h"
24 #include "audio.h"
25 #include "avfilter.h"
26 #include "filters.h"
27 #include "internal.h"
28 #include "window_func.h"
29 
30 #include <float.h>
31 
33  const AVClass *class;
34 
36 
37  int fft_size;
38  int overlap;
39 
40  float *window;
41  float prev_vad;
42 
50 
53 } AudioDialogueEnhanceContext;
54 
55 #define OFFSET(x) offsetof(AudioDialogueEnhanceContext, x)
56 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
57 
58 static const AVOption dialoguenhance_options[] = {
59  { "original", "set original center factor", OFFSET(original), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
60  { "enhance", "set dialogue enhance factor",OFFSET(enhance), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 3, FLAGS },
61  { "voice", "set voice detection factor", OFFSET(voice), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 2,32, FLAGS },
62  {NULL}
63 };
64 
65 AVFILTER_DEFINE_CLASS(dialoguenhance);
66 
68 {
70  AVFilterChannelLayouts *in_layout = NULL, *out_layout = NULL;
71  int ret;
72 
73  if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLTP )) < 0 ||
74  (ret = ff_set_common_formats (ctx , formats )) < 0 ||
76  (ret = ff_channel_layouts_ref(in_layout, &ctx->inputs[0]->outcfg.channel_layouts)) < 0 ||
78  (ret = ff_channel_layouts_ref(out_layout, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
79  return ret;
80 
82 }
83 
85 {
86  AVFilterContext *ctx = inlink->dst;
87  AudioDialogueEnhanceContext *s = ctx->priv;
88  float scale = 1.f, iscale, overlap;
89  int ret;
90 
91  s->fft_size = inlink->sample_rate > 100000 ? 8192 : inlink->sample_rate > 50000 ? 4096 : 2048;
92  s->overlap = s->fft_size / 4;
93 
94  s->window = av_calloc(s->fft_size, sizeof(*s->window));
95  if (!s->window)
96  return AVERROR(ENOMEM);
97 
98  s->in_frame = ff_get_audio_buffer(inlink, s->fft_size * 4);
99  s->center_frame = ff_get_audio_buffer(inlink, s->fft_size * 4);
100  s->out_dist_frame = ff_get_audio_buffer(inlink, s->fft_size * 4);
101  s->windowed_frame = ff_get_audio_buffer(inlink, s->fft_size * 4);
102  s->windowed_out = ff_get_audio_buffer(inlink, s->fft_size * 4);
103  s->windowed_prev = ff_get_audio_buffer(inlink, s->fft_size * 4);
104  if (!s->in_frame || !s->windowed_out || !s->windowed_prev ||
105  !s->out_dist_frame || !s->windowed_frame || !s->center_frame)
106  return AVERROR(ENOMEM);
107 
108  generate_window_func(s->window, s->fft_size, WFUNC_SINE, &overlap);
109 
110  iscale = 1.f / s->fft_size;
111 
112  ret = av_tx_init(&s->tx_ctx[0], &s->tx_fn, AV_TX_FLOAT_RDFT, 0, s->fft_size, &scale, 0);
113  if (ret < 0)
114  return ret;
115 
116  ret = av_tx_init(&s->tx_ctx[1], &s->tx_fn, AV_TX_FLOAT_RDFT, 0, s->fft_size, &scale, 0);
117  if (ret < 0)
118  return ret;
119 
120  ret = av_tx_init(&s->itx_ctx, &s->itx_fn, AV_TX_FLOAT_RDFT, 1, s->fft_size, &iscale, 0);
121  if (ret < 0)
122  return ret;
123 
124  return 0;
125 }
126 
127 static void apply_window(AudioDialogueEnhanceContext *s,
128  const float *in_frame, float *out_frame, const int add_to_out_frame)
129 {
130  const float *window = s->window;
131 
132  if (add_to_out_frame) {
133  for (int i = 0; i < s->fft_size; i++)
134  out_frame[i] += in_frame[i] * window[i];
135  } else {
136  for (int i = 0; i < s->fft_size; i++)
137  out_frame[i] = in_frame[i] * window[i];
138  }
139 }
140 
141 static float sqrf(float x)
142 {
143  return x * x;
144 }
145 
147  AVComplexFloat *center, int N)
148 {
149  for (int i = 0; i < N; i++) {
150  const float l_re = left[i].re;
151  const float l_im = left[i].im;
152  const float r_re = right[i].re;
153  const float r_im = right[i].im;
154  const float a = 0.5f * (1.f - sqrtf((sqrf(l_re - r_re) + sqrf(l_im - r_im))/
155  (sqrf(l_re + r_re) + sqrf(l_im + r_im) + FLT_EPSILON)));
156 
157  center[i].re = a * (l_re + r_re);
158  center[i].im = a * (l_im + r_im);
159  }
160 }
161 
162 static float flux(float *curf, float *prevf, int N)
163 {
164  AVComplexFloat *cur = (AVComplexFloat *)curf;
165  AVComplexFloat *prev = (AVComplexFloat *)prevf;
166  float sum = 0.f;
167 
168  for (int i = 0; i < N; i++) {
169  float c_re = cur[i].re;
170  float c_im = cur[i].im;
171  float p_re = prev[i].re;
172  float p_im = prev[i].im;
173 
174  sum += sqrf(hypotf(c_re, c_im) - hypotf(p_re, p_im));
175  }
176 
177  return sum;
178 }
179 
180 static float fluxlr(float *lf, float *lpf,
181  float *rf, float *rpf,
182  int N)
183 {
184  AVComplexFloat *l = (AVComplexFloat *)lf;
187  AVComplexFloat *rp = (AVComplexFloat *)rpf;
188  float sum = 0.f;
189 
190  for (int i = 0; i < N; i++) {
191  float c_re = l[i].re - r[i].re;
192  float c_im = l[i].im - r[i].im;
193  float p_re = lp[i].re - rp[i].re;
194  float p_im = lp[i].im - rp[i].im;
195 
196  sum += sqrf(hypotf(c_re, c_im) - hypotf(p_re, p_im));
197  }
198 
199  return sum;
200 }
201 
202 static float calc_vad(float fc, float flr, float a)
203 {
204  const float vad = a * (fc / (fc + flr) - 0.5f);
205 
206  return av_clipf(vad, 0.f, 1.f);
207 }
208 
209 static void get_final(float *c, float *l,
210  float *r, float vad, int N,
211  float original, float enhance)
212 {
213  AVComplexFloat *center = (AVComplexFloat *)c;
215  AVComplexFloat *right = (AVComplexFloat *)r;
216 
217  for (int i = 0; i < N; i++) {
218  float cP = sqrf(center[i].re) + sqrf(center[i].im);
219  float lrP = sqrf(left[i].re - right[i].re) + sqrf(left[i].im - right[i].im);
220  float G = cP / (cP + lrP + FLT_EPSILON);
221  float re, im;
222 
223  re = center[i].re * (original + vad * G * enhance);
224  im = center[i].im * (original + vad * G * enhance);
225 
226  center[i].re = re;
227  center[i].im = im;
228  }
229 }
230 
232 {
233  AudioDialogueEnhanceContext *s = ctx->priv;
234  float *center = (float *)s->center_frame->extended_data[0];
235  float *center_prev = (float *)s->center_frame->extended_data[1];
236  float *left_in = (float *)s->in_frame->extended_data[0];
237  float *right_in = (float *)s->in_frame->extended_data[1];
238  float *left_out = (float *)s->out_dist_frame->extended_data[0];
239  float *right_out = (float *)s->out_dist_frame->extended_data[1];
240  float *left_samples = (float *)s->in->extended_data[0];
241  float *right_samples = (float *)s->in->extended_data[1];
242  float *windowed_left = (float *)s->windowed_frame->extended_data[0];
243  float *windowed_right = (float *)s->windowed_frame->extended_data[1];
244  float *windowed_oleft = (float *)s->windowed_out->extended_data[0];
245  float *windowed_oright = (float *)s->windowed_out->extended_data[1];
246  float *windowed_pleft = (float *)s->windowed_prev->extended_data[0];
247  float *windowed_pright = (float *)s->windowed_prev->extended_data[1];
248  float *left_osamples = (float *)out->extended_data[0];
249  float *right_osamples = (float *)out->extended_data[1];
250  float *center_osamples = (float *)out->extended_data[2];
251  const int offset = s->fft_size - s->overlap;
252  float vad;
253 
254  // shift in/out buffers
255  memmove(left_in, &left_in[s->overlap], offset * sizeof(float));
256  memmove(right_in, &right_in[s->overlap], offset * sizeof(float));
257  memmove(left_out, &left_out[s->overlap], offset * sizeof(float));
258  memmove(right_out, &right_out[s->overlap], offset * sizeof(float));
259 
260  memcpy(&left_in[offset], left_samples, s->overlap * sizeof(float));
261  memcpy(&right_in[offset], right_samples, s->overlap * sizeof(float));
262  memset(&left_out[offset], 0, s->overlap * sizeof(float));
263  memset(&right_out[offset], 0, s->overlap * sizeof(float));
264 
265  apply_window(s, left_in, windowed_left, 0);
266  apply_window(s, right_in, windowed_right, 0);
267 
268  s->tx_fn(s->tx_ctx[0], windowed_oleft, windowed_left, sizeof(float));
269  s->tx_fn(s->tx_ctx[1], windowed_oright, windowed_right, sizeof(float));
270 
271  get_centere((AVComplexFloat *)windowed_oleft,
272  (AVComplexFloat *)windowed_oright,
273  (AVComplexFloat *)center,
274  s->fft_size / 2 + 1);
275 
276  vad = calc_vad(flux(center, center_prev, s->fft_size / 2 + 1),
277  fluxlr(windowed_oleft, windowed_pleft,
278  windowed_oright, windowed_pright, s->fft_size / 2 + 1), s->voice);
279  vad = vad * 0.1 + 0.9 * s->prev_vad;
280  s->prev_vad = vad;
281 
282  memcpy(center_prev, center, s->fft_size * sizeof(float));
283  memcpy(windowed_pleft, windowed_oleft, s->fft_size * sizeof(float));
284  memcpy(windowed_pright, windowed_oright, s->fft_size * sizeof(float));
285 
286  get_final(center, windowed_oleft, windowed_oright, vad, s->fft_size / 2 + 1,
287  s->original, s->enhance);
288 
289  s->itx_fn(s->itx_ctx, windowed_oleft, center, sizeof(float));
290 
291  apply_window(s, windowed_oleft, left_out, 1);
292 
293  for (int i = 0; i < s->overlap; i++) {
294  // 4 times overlap with squared hanning window results in 1.5 time increase in amplitude
295  if (!ctx->is_disabled)
296  center_osamples[i] = left_out[i] / 1.5f;
297  else
298  center_osamples[i] = 0.f;
299  left_osamples[i] = left_in[i];
300  right_osamples[i] = right_in[i];
301  }
302 
303  return 0;
304 }
305 
307 {
308  AVFilterContext *ctx = inlink->dst;
309  AVFilterLink *outlink = ctx->outputs[0];
310  AudioDialogueEnhanceContext *s = ctx->priv;
311  AVFrame *out;
312  int ret;
313 
314  out = ff_get_audio_buffer(outlink, s->overlap);
315  if (!out) {
316  ret = AVERROR(ENOMEM);
317  goto fail;
318  }
319 
320  s->in = in;
321  de_stereo(ctx, out);
322 
323  out->pts = in->pts;
324  out->nb_samples = in->nb_samples;
325  ret = ff_filter_frame(outlink, out);
326 fail:
327  av_frame_free(&in);
328  s->in = NULL;
329  return ret < 0 ? ret : 0;
330 }
331 
333 {
334  AVFilterLink *inlink = ctx->inputs[0];
335  AVFilterLink *outlink = ctx->outputs[0];
336  AudioDialogueEnhanceContext *s = ctx->priv;
337  AVFrame *in = NULL;
338  int ret = 0, status;
339  int64_t pts;
340 
342 
343  ret = ff_inlink_consume_samples(inlink, s->overlap, s->overlap, &in);
344  if (ret < 0)
345  return ret;
346 
347  if (ret > 0) {
348  return filter_frame(inlink, in);
349  } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
350  ff_outlink_set_status(outlink, status, pts);
351  return 0;
352  } else {
353  if (ff_inlink_queued_samples(inlink) >= s->overlap) {
355  } else if (ff_outlink_frame_wanted(outlink)) {
357  }
358  return 0;
359  }
360 }
361 
363 {
364  AudioDialogueEnhanceContext *s = ctx->priv;
365 
366  av_freep(&s->window);
367 
368  av_frame_free(&s->in_frame);
369  av_frame_free(&s->center_frame);
370  av_frame_free(&s->out_dist_frame);
371  av_frame_free(&s->windowed_frame);
372  av_frame_free(&s->windowed_out);
373  av_frame_free(&s->windowed_prev);
374 
375  av_tx_uninit(&s->tx_ctx[0]);
376  av_tx_uninit(&s->tx_ctx[1]);
377  av_tx_uninit(&s->itx_ctx);
378 }
379 
380 static const AVFilterPad inputs[] = {
381  {
382  .name = "default",
383  .type = AVMEDIA_TYPE_AUDIO,
384  .config_props = config_input,
385  },
386 };
387 
388 static const AVFilterPad outputs[] = {
389  {
390  .name = "default",
391  .type = AVMEDIA_TYPE_AUDIO,
392  },
393 };
394 
396  .name = "dialoguenhance",
397  .description = NULL_IF_CONFIG_SMALL("Audio Dialogue Enhancement."),
398  .priv_size = sizeof(AudioDialogueEnhanceContext),
399  .priv_class = &dialoguenhance_class,
400  .uninit = uninit,
405  .activate = activate,
406  .process_command = ff_filter_process_command,
407 };
formats
formats
Definition: signature.h:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:100
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
inputs
static const AVFilterPad inputs[]
Definition: af_dialoguenhance.c:380
AudioDialogueEnhancementContext::in_frame
AVFrame * in_frame
Definition: af_dialoguenhance.c:44
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:999
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:591
apply_window
static void apply_window(AudioDialogueEnhanceContext *s, const float *in_frame, float *out_frame, const int add_to_out_frame)
Definition: af_dialoguenhance.c:127
AVTXContext
Definition: tx_priv.h:201
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
im
float im
Definition: fft.c:79
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:432
AVOption
AVOption.
Definition: opt.h:251
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:167
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_dialoguenhance.c:306
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:739
float.h
calc_vad
static float calc_vad(float fc, float flr, float a)
Definition: af_dialoguenhance.c:202
AVComplexFloat
Definition: tx.h:27
outputs
static const AVFilterPad outputs[]
Definition: af_dialoguenhance.c:388
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:551
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:354
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
AudioDialogueEnhancementContext::overlap
int overlap
Definition: af_dialoguenhance.c:38
AudioDialogueEnhancementContext::window
float * window
Definition: af_dialoguenhance.c:40
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:649
get_centere
static void get_centere(AVComplexFloat *left, AVComplexFloat *right, AVComplexFloat *center, int N)
Definition: af_dialoguenhance.c:146
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_dialoguenhance.c:362
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
lpf
static float * lpf(float Fn, float Fc, float tbw, int *num_taps, float att, float *beta, int round)
Definition: asrc_sinc.c:175
AVComplexFloat::im
float im
Definition: tx.h:28
window
static SDL_Window * window
Definition: ffplay.c:365
AudioDialogueEnhancementContext::windowed_prev
AVFrame * windowed_prev
Definition: af_dialoguenhance.c:48
AudioDialogueEnhancementContext::voice
double voice
Definition: af_dialoguenhance.c:35
fail
#define fail()
Definition: checkasm.h:131
de_stereo
static int de_stereo(AVFilterContext *ctx, AVFrame *out)
Definition: af_dialoguenhance.c:231
ff_af_dialoguenhance
const AVFilter ff_af_dialoguenhance
Definition: af_dialoguenhance.c:395
AV_CHANNEL_LAYOUT_SURROUND
#define AV_CHANNEL_LAYOUT_SURROUND
Definition: channel_layout.h:357
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
pts
static int64_t pts
Definition: transcode_aac.c:654
AudioDialogueEnhancementContext::windowed_out
AVFrame * windowed_out
Definition: af_dialoguenhance.c:47
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
AudioDialogueEnhancementContext::tx_ctx
AVTXContext * tx_ctx[2]
Definition: af_dialoguenhance.c:51
AudioDialogueEnhancementContext::windowed_frame
AVFrame * windowed_frame
Definition: af_dialoguenhance.c:46
AudioDialogueEnhancementContext::itx_fn
av_tx_fn itx_fn
Definition: af_dialoguenhance.c:52
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:749
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:111
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1511
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
filters.h
AudioDialogueEnhancementContext::out_dist_frame
AVFrame * out_dist_frame
Definition: af_dialoguenhance.c:45
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FLAGS
#define FLAGS
Definition: af_dialoguenhance.c:56
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
AudioDialogueEnhancementContext::original
double original
Definition: af_dialoguenhance.c:35
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1413
NULL
#define NULL
Definition: coverity.c:32
OFFSET
#define OFFSET(x)
Definition: af_dialoguenhance.c:55
AudioDialogueEnhancementContext::prev_vad
float prev_vad
Definition: af_dialoguenhance.c:41
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:449
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
generate_window_func
static void generate_window_func(float *lut, int N, int win_func, float *overlap)
Definition: window_func.h:61
get_final
static void get_final(float *c, float *l, float *r, float vad, int N, float original, float enhance)
Definition: af_dialoguenhance.c:209
av_clipf
av_clipf
Definition: af_crystalizer.c:122
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, const AVChannelLayout *channel_layout)
Definition: formats.c:466
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1348
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
f
f
Definition: af_crystalizer.c:122
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AudioDialogueEnhancementContext::fft_size
int fft_size
Definition: af_dialoguenhance.c:37
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:290
AVComplexFloat::re
float re
Definition: tx.h:28
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:863
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
N
#define N
Definition: af_mcompand.c:53
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:251
AudioDialogueEnhancementContext
Definition: af_dialoguenhance.c:32
internal.h
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:405
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
dialoguenhance_options
static const AVOption dialoguenhance_options[]
Definition: af_dialoguenhance.c:58
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1373
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
AudioDialogueEnhancementContext::in
AVFrame * in
Definition: af_dialoguenhance.c:43
AudioDialogueEnhancementContext::tx_fn
av_tx_fn tx_fn
Definition: af_dialoguenhance.c:52
flux
static float flux(float *curf, float *prevf, int N)
Definition: af_dialoguenhance.c:162
AVFilter
Filter definition.
Definition: avfilter.h:171
fluxlr
static float fluxlr(float *lf, float *lpf, float *rf, float *rpf, int N)
Definition: af_dialoguenhance.c:180
G
#define G
Definition: huffyuvdsp.h:33
ret
ret
Definition: filter_design.txt:187
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:88
window_func.h
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_dialoguenhance.c:84
channel_layout.h
WFUNC_SINE
@ WFUNC_SINE
Definition: window_func.h:31
avfilter.h
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_dialoguenhance.c:67
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
activate
static int activate(AVFilterContext *ctx)
Definition: af_dialoguenhance.c:332
audio.h
AudioDialogueEnhancementContext::itx_ctx
AVTXContext * itx_ctx
Definition: af_dialoguenhance.c:51
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:160
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AudioDialogueEnhancementContext::center_frame
AVFrame * center_frame
Definition: af_dialoguenhance.c:49
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dialoguenhance)
sqrf
static float sqrf(float x)
Definition: af_dialoguenhance.c:141
AudioDialogueEnhancementContext::enhance
double enhance
Definition: af_dialoguenhance.c:35
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:191
tx.h
re
float re
Definition: fft.c:79