FFmpeg
af_adenorm.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "libavutil/avassert.h"
21 #include "libavutil/opt.h"
22 #include "audio.h"
23 #include "avfilter.h"
24 #include "internal.h"
25 
26 enum FilterType {
32 };
33 
34 typedef struct ADenormContext {
35  const AVClass *class;
36 
37  double level;
38  double level_db;
39  int type;
40  int64_t in_samples;
41 
42  void (*filter)(AVFilterContext *ctx, void *dst,
43  const void *src, int nb_samples);
45 
47 {
48  static const enum AVSampleFormat sample_fmts[] = {
51  };
53  if (ret < 0)
54  return ret;
55 
57  if (ret < 0)
58  return ret;
59 
61 }
62 
63 static void dc_denorm_fltp(AVFilterContext *ctx, void *dstp,
64  const void *srcp, int nb_samples)
65 {
66  ADenormContext *s = ctx->priv;
67  const float *src = (const float *)srcp;
68  float *dst = (float *)dstp;
69  const float dc = s->level;
70 
71  for (int n = 0; n < nb_samples; n++) {
72  dst[n] = src[n] + dc;
73  }
74 }
75 
76 static void dc_denorm_dblp(AVFilterContext *ctx, void *dstp,
77  const void *srcp, int nb_samples)
78 {
79  ADenormContext *s = ctx->priv;
80  const double *src = (const double *)srcp;
81  double *dst = (double *)dstp;
82  const double dc = s->level;
83 
84  for (int n = 0; n < nb_samples; n++) {
85  dst[n] = src[n] + dc;
86  }
87 }
88 
89 static void ac_denorm_fltp(AVFilterContext *ctx, void *dstp,
90  const void *srcp, int nb_samples)
91 {
92  ADenormContext *s = ctx->priv;
93  const float *src = (const float *)srcp;
94  float *dst = (float *)dstp;
95  const float dc = s->level;
96  const int64_t N = s->in_samples;
97 
98  for (int n = 0; n < nb_samples; n++) {
99  dst[n] = src[n] + dc * (((N + n) & 1) ? -1.f : 1.f);
100  }
101 }
102 
103 static void ac_denorm_dblp(AVFilterContext *ctx, void *dstp,
104  const void *srcp, int nb_samples)
105 {
106  ADenormContext *s = ctx->priv;
107  const double *src = (const double *)srcp;
108  double *dst = (double *)dstp;
109  const double dc = s->level;
110  const int64_t N = s->in_samples;
111 
112  for (int n = 0; n < nb_samples; n++) {
113  dst[n] = src[n] + dc * (((N + n) & 1) ? -1. : 1.);
114  }
115 }
116 
117 static void sq_denorm_fltp(AVFilterContext *ctx, void *dstp,
118  const void *srcp, int nb_samples)
119 {
120  ADenormContext *s = ctx->priv;
121  const float *src = (const float *)srcp;
122  float *dst = (float *)dstp;
123  const float dc = s->level;
124  const int64_t N = s->in_samples;
125 
126  for (int n = 0; n < nb_samples; n++) {
127  dst[n] = src[n] + dc * ((((N + n) >> 8) & 1) ? -1.f : 1.f);
128  }
129 }
130 
131 static void sq_denorm_dblp(AVFilterContext *ctx, void *dstp,
132  const void *srcp, int nb_samples)
133 {
134  ADenormContext *s = ctx->priv;
135  const double *src = (const double *)srcp;
136  double *dst = (double *)dstp;
137  const double dc = s->level;
138  const int64_t N = s->in_samples;
139 
140  for (int n = 0; n < nb_samples; n++) {
141  dst[n] = src[n] + dc * ((((N + n) >> 8) & 1) ? -1. : 1.);
142  }
143 }
144 
145 static void ps_denorm_fltp(AVFilterContext *ctx, void *dstp,
146  const void *srcp, int nb_samples)
147 {
148  ADenormContext *s = ctx->priv;
149  const float *src = (const float *)srcp;
150  float *dst = (float *)dstp;
151  const float dc = s->level;
152  const int64_t N = s->in_samples;
153 
154  for (int n = 0; n < nb_samples; n++) {
155  dst[n] = src[n] + dc * (((N + n) & 255) ? 0.f : 1.f);
156  }
157 }
158 
159 static void ps_denorm_dblp(AVFilterContext *ctx, void *dstp,
160  const void *srcp, int nb_samples)
161 {
162  ADenormContext *s = ctx->priv;
163  const double *src = (const double *)srcp;
164  double *dst = (double *)dstp;
165  const double dc = s->level;
166  const int64_t N = s->in_samples;
167 
168  for (int n = 0; n < nb_samples; n++) {
169  dst[n] = src[n] + dc * (((N + n) & 255) ? 0. : 1.);
170  }
171 }
172 
173 static int config_output(AVFilterLink *outlink)
174 {
175  AVFilterContext *ctx = outlink->src;
176  ADenormContext *s = ctx->priv;
177 
178  switch (s->type) {
179  case DC_TYPE:
180  switch (outlink->format) {
181  case AV_SAMPLE_FMT_FLTP: s->filter = dc_denorm_fltp; break;
182  case AV_SAMPLE_FMT_DBLP: s->filter = dc_denorm_dblp; break;
183  }
184  break;
185  case AC_TYPE:
186  switch (outlink->format) {
187  case AV_SAMPLE_FMT_FLTP: s->filter = ac_denorm_fltp; break;
188  case AV_SAMPLE_FMT_DBLP: s->filter = ac_denorm_dblp; break;
189  }
190  break;
191  case SQ_TYPE:
192  switch (outlink->format) {
193  case AV_SAMPLE_FMT_FLTP: s->filter = sq_denorm_fltp; break;
194  case AV_SAMPLE_FMT_DBLP: s->filter = sq_denorm_dblp; break;
195  }
196  break;
197  case PS_TYPE:
198  switch (outlink->format) {
199  case AV_SAMPLE_FMT_FLTP: s->filter = ps_denorm_fltp; break;
200  case AV_SAMPLE_FMT_DBLP: s->filter = ps_denorm_dblp; break;
201  }
202  break;
203  default:
204  av_assert0(0);
205  }
206 
207  return 0;
208 }
209 
210 typedef struct ThreadData {
211  AVFrame *in, *out;
212 } ThreadData;
213 
214 static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
215 {
216  ADenormContext *s = ctx->priv;
217  ThreadData *td = arg;
218  AVFrame *out = td->out;
219  AVFrame *in = td->in;
220  const int start = (in->channels * jobnr) / nb_jobs;
221  const int end = (in->channels * (jobnr+1)) / nb_jobs;
222 
223  for (int ch = start; ch < end; ch++) {
224  s->filter(ctx, out->extended_data[ch],
225  in->extended_data[ch],
226  in->nb_samples);
227  }
228 
229  return 0;
230 }
231 
233 {
234  AVFilterContext *ctx = inlink->dst;
235  ADenormContext *s = ctx->priv;
236  AVFilterLink *outlink = ctx->outputs[0];
237  ThreadData td;
238  AVFrame *out;
239 
240  if (av_frame_is_writable(in)) {
241  out = in;
242  } else {
243  out = ff_get_audio_buffer(outlink, in->nb_samples);
244  if (!out) {
245  av_frame_free(&in);
246  return AVERROR(ENOMEM);
247  }
249  }
250 
251  s->level = exp(s->level_db / 20. * M_LN10);
252  td.in = in; td.out = out;
255 
256  s->in_samples += in->nb_samples;
257 
258  if (out != in)
259  av_frame_free(&in);
260  return ff_filter_frame(outlink, out);
261 }
262 
263 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
264  char *res, int res_len, int flags)
265 {
266  AVFilterLink *outlink = ctx->outputs[0];
267  int ret;
268 
269  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
270  if (ret < 0)
271  return ret;
272 
273  return config_output(outlink);
274 }
275 
276 static const AVFilterPad adenorm_inputs[] = {
277  {
278  .name = "default",
279  .type = AVMEDIA_TYPE_AUDIO,
280  .filter_frame = filter_frame,
281  },
282 };
283 
284 static const AVFilterPad adenorm_outputs[] = {
285  {
286  .name = "default",
287  .type = AVMEDIA_TYPE_AUDIO,
288  .config_props = config_output,
289  },
290 };
291 
292 #define OFFSET(x) offsetof(ADenormContext, x)
293 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
294 
295 static const AVOption adenorm_options[] = {
296  { "level", "set level", OFFSET(level_db), AV_OPT_TYPE_DOUBLE, {.dbl=-351}, -451, -90, FLAGS },
297  { "type", "set type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=DC_TYPE}, 0, NB_TYPES-1, FLAGS, "type" },
298  { "dc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=DC_TYPE}, 0, 0, FLAGS, "type"},
299  { "ac", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AC_TYPE}, 0, 0, FLAGS, "type"},
300  { "square",NULL, 0, AV_OPT_TYPE_CONST, {.i64=SQ_TYPE}, 0, 0, FLAGS, "type"},
301  { "pulse", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PS_TYPE}, 0, 0, FLAGS, "type"},
302  { NULL }
303 };
304 
305 AVFILTER_DEFINE_CLASS(adenorm);
306 
308  .name = "adenorm",
309  .description = NULL_IF_CONFIG_SMALL("Remedy denormals by adding extremely low-level noise."),
310  .query_formats = query_formats,
311  .priv_size = sizeof(ADenormContext),
314  .priv_class = &adenorm_class,
315  .process_command = process_command,
318 };
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
td
#define td
Definition: regdef.h:70
ac_denorm_fltp
static void ac_denorm_fltp(AVFilterContext *ctx, void *dstp, const void *srcp, int nb_samples)
Definition: af_adenorm.c:89
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_adenorm.c:46
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
PS_TYPE
@ PS_TYPE
Definition: af_adenorm.c:30
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_adenorm.c:263
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
adenorm_outputs
static const AVFilterPad adenorm_outputs[]
Definition: af_adenorm.c:284
ps_denorm_dblp
static void ps_denorm_dblp(AVFilterContext *ctx, void *dstp, const void *srcp, int nb_samples)
Definition: af_adenorm.c:159
ps_denorm_fltp
static void ps_denorm_fltp(AVFilterContext *ctx, void *dstp, const void *srcp, int nb_samples)
Definition: af_adenorm.c:145
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVOption
AVOption.
Definition: opt.h:247
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:687
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:492
ac_denorm_dblp
static void ac_denorm_dblp(AVFilterContext *ctx, void *dstp, const void *srcp, int nb_samples)
Definition: af_adenorm.c:103
ADenormContext::in_samples
int64_t in_samples
Definition: af_adenorm.c:40
NB_TYPES
@ NB_TYPES
Definition: af_adenorm.c:31
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_adenorm.c:173
dc_denorm_fltp
static void dc_denorm_fltp(AVFilterContext *ctx, void *dstp, const void *srcp, int nb_samples)
Definition: af_adenorm.c:63
FilterType
FilterType
Definition: af_adenorm.c:26
avassert.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_adenorm.c:232
OFFSET
#define OFFSET(x)
Definition: af_adenorm.c:292
FLAGS
#define FLAGS
Definition: af_adenorm.c:293
s
#define s(width, name)
Definition: cbs_vp9.c:257
sq_denorm_fltp
static void sq_denorm_fltp(AVFilterContext *ctx, void *dstp, const void *srcp, int nb_samples)
Definition: af_adenorm.c:117
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:592
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
ctx
AVFormatContext * ctx
Definition: movenc.c:48
DC_TYPE
@ DC_TYPE
Definition: af_adenorm.c:27
f
#define f(width, name)
Definition: cbs_vp9.c:255
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
src
#define src
Definition: vp8dsp.c:255
adenorm_inputs
static const AVFilterPad adenorm_inputs[]
Definition: af_adenorm.c:276
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:669
exp
int8_t exp
Definition: eval.c:72
ADenormContext::filter
void(* filter)(AVFilterContext *ctx, void *dst, const void *src, int nb_samples)
Definition: af_adenorm.c:42
ADenormContext
Definition: af_adenorm.c:34
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
ff_af_adenorm
const AVFilter ff_af_adenorm
Definition: af_adenorm.c:307
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:883
AC_TYPE
@ AC_TYPE
Definition: af_adenorm.c:28
N
#define N
Definition: af_mcompand.c:53
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:130
ADenormContext::level_db
double level_db
Definition: af_adenorm.c:38
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:369
adenorm_options
static const AVOption adenorm_options[]
Definition: af_adenorm.c:295
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:350
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(adenorm)
ADenormContext::level
double level
Definition: af_adenorm.c:37
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:804
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AVFilter
Filter definition.
Definition: avfilter.h:149
filter_channels
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_adenorm.c:214
ret
ret
Definition: filter_design.txt:187
dc_denorm_dblp
static void dc_denorm_dblp(AVFilterContext *ctx, void *dstp, const void *srcp, int nb_samples)
Definition: af_adenorm.c:76
channel_layout.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
SQ_TYPE
@ SQ_TYPE
Definition: af_adenorm.c:29
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:43
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:171
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
ADenormContext::type
int type
Definition: af_adenorm.c:39
sq_denorm_dblp
static void sq_denorm_dblp(AVFilterContext *ctx, void *dstp, const void *srcp, int nb_samples)
Definition: af_adenorm.c:131
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233