FFmpeg
vf_normalize.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Richard Ling
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * Normalize RGB video (aka histogram stretching, contrast stretching).
23  * See: https://en.wikipedia.org/wiki/Normalization_(image_processing)
24  *
25  * For each channel of each frame, the filter computes the input range and maps
26  * it linearly to the user-specified output range. The output range defaults
27  * to the full dynamic range from pure black to pure white.
28  *
29  * Naively maximising the dynamic range of each frame of video in isolation
30  * may cause flickering (rapid changes in brightness of static objects in the
31  * scene) when small dark or bright objects enter or leave the scene. This
32  * filter can apply temporal smoothing to the input range to reduce flickering.
33  * Temporal smoothing is similar to the auto-exposure (automatic gain control)
34  * on a video camera, which performs the same function; and, like a video
35  * camera, it may cause a period of over- or under-exposure of the video.
36  *
37  * The filter can normalize the R,G,B channels independently, which may cause
38  * color shifting, or link them together as a single channel, which prevents
39  * color shifting. More precisely, linked normalization preserves hue (as it's
40  * defined in HSV/HSL color spaces) while independent normalization does not.
41  * Independent normalization can be used to remove color casts, such as the
42  * blue cast from underwater video, restoring more natural colors. The filter
43  * can also combine independent and linked normalization in any ratio.
44  *
45  * Finally the overall strength of the filter can be adjusted, from no effect
46  * to full normalization.
47  *
48  * The 5 AVOptions are:
49  * blackpt, Colors which define the output range. The minimum input value
50  * whitept is mapped to the blackpt. The maximum input value is mapped to
51  * the whitept. The defaults are black and white respectively.
52  * Specifying white for blackpt and black for whitept will give
53  * color-inverted, normalized video. Shades of grey can be used
54  * to reduce the dynamic range (contrast). Specifying saturated
55  * colors here can create some interesting effects.
56  *
57  * smoothing The amount of temporal smoothing, expressed in frames (>=0).
58  * the minimum and maximum input values of each channel are
59  * smoothed using a rolling average over the current frame and
60  * that many previous frames of video. Defaults to 0 (no temporal
61  * smoothing).
62  *
63  * independence
64  * Controls the ratio of independent (color shifting) channel
65  * normalization to linked (color preserving) normalization. 0.0
66  * is fully linked, 1.0 is fully independent. Defaults to fully
67  * independent.
68  *
69  * strength Overall strength of the filter. 1.0 is full strength. 0.0 is
70  * a rather expensive no-op. Values in between can give a gentle
71  * boost to low-contrast video without creating an artificial
72  * over-processed look. The default is full strength.
73  */
74 
75 #include "libavutil/intreadwrite.h"
76 #include "libavutil/mem.h"
77 #include "libavutil/opt.h"
78 #include "libavutil/pixdesc.h"
79 #include "avfilter.h"
80 #include "drawutils.h"
81 #include "internal.h"
82 #include "video.h"
83 
84 typedef struct NormalizeHistory {
85  uint16_t *history; // History entries.
86  uint64_t history_sum; // Sum of history entries.
88 
89 typedef struct NormalizeLocal {
90  uint16_t in; // Original input byte value for this frame.
91  float smoothed; // Smoothed input value [0,255].
92  float out; // Output value [0,255]
94 
95 typedef struct NormalizeContext {
96  const AVClass *class;
97 
98  // Storage for the corresponding AVOptions
99  uint8_t blackpt[4];
100  uint8_t whitept[4];
103  float strength;
104 
105  uint8_t co[4]; // Offsets to R,G,B,A bytes respectively in each pixel
106  int depth;
107  int sblackpt[4];
108  int swhitept[4];
109  int num_components; // Number of components in the pixel format
110  int step;
111  int history_len; // Number of frames to average; based on smoothing factor
112  int frame_num; // Increments on each frame, starting from 0.
113 
114  // Per-extremum, per-channel history, for temporal smoothing.
115  NormalizeHistory min[3], max[3]; // Min and max for each channel in {R,G,B}.
116  uint16_t *history_mem; // Single allocation for above history entries
117 
118  uint16_t lut[3][65536]; // Lookup table
119 
121  void (*process)(struct NormalizeContext *s, AVFrame *in, AVFrame *out);
123 
124 #define OFFSET(x) offsetof(NormalizeContext, x)
125 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
126 #define FLAGSR AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
127 
128 static const AVOption normalize_options[] = {
129  { "blackpt", "output color to which darkest input color is mapped", OFFSET(blackpt), AV_OPT_TYPE_COLOR, { .str = "black" }, 0, 0, FLAGSR },
130  { "whitept", "output color to which brightest input color is mapped", OFFSET(whitept), AV_OPT_TYPE_COLOR, { .str = "white" }, 0, 0, FLAGSR },
131  { "smoothing", "amount of temporal smoothing of the input range, to reduce flicker", OFFSET(smoothing), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX/8, FLAGS },
132  { "independence", "proportion of independent to linked channel normalization", OFFSET(independence), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 1.0, FLAGSR },
133  { "strength", "strength of filter, from no effect to full normalization", OFFSET(strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 1.0, FLAGSR },
134  { NULL }
135 };
136 
138 
140 {
141  for (int c = 0; c < 3; c++)
142  min[c].in = max[c].in = in->data[0][s->co[c]];
143  for (int y = 0; y < in->height; y++) {
144  uint8_t *inp = in->data[0] + y * in->linesize[0];
145  for (int x = 0; x < in->width; x++) {
146  for (int c = 0; c < 3; c++) {
147  min[c].in = FFMIN(min[c].in, inp[s->co[c]]);
148  max[c].in = FFMAX(max[c].in, inp[s->co[c]]);
149  }
150  inp += s->step;
151  }
152  }
153 }
154 
156 {
157  for (int y = 0; y < in->height; y++) {
158  uint8_t *inp = in->data[0] + y * in->linesize[0];
159  uint8_t *outp = out->data[0] + y * out->linesize[0];
160  for (int x = 0; x < in->width; x++) {
161  for (int c = 0; c < 3; c++)
162  outp[s->co[c]] = s->lut[c][inp[s->co[c]]];
163  if (s->num_components == 4)
164  // Copy alpha as-is.
165  outp[s->co[3]] = inp[s->co[3]];
166  inp += s->step;
167  outp += s->step;
168  }
169  }
170 }
171 
173 {
174  min[0].in = max[0].in = in->data[2][0];
175  min[1].in = max[1].in = in->data[0][0];
176  min[2].in = max[2].in = in->data[1][0];
177  for (int y = 0; y < in->height; y++) {
178  uint8_t *inrp = in->data[2] + y * in->linesize[2];
179  uint8_t *ingp = in->data[0] + y * in->linesize[0];
180  uint8_t *inbp = in->data[1] + y * in->linesize[1];
181  for (int x = 0; x < in->width; x++) {
182  min[0].in = FFMIN(min[0].in, inrp[x]);
183  max[0].in = FFMAX(max[0].in, inrp[x]);
184  min[1].in = FFMIN(min[1].in, ingp[x]);
185  max[1].in = FFMAX(max[1].in, ingp[x]);
186  min[2].in = FFMIN(min[2].in, inbp[x]);
187  max[2].in = FFMAX(max[2].in, inbp[x]);
188  }
189  }
190 }
191 
193 {
194  for (int y = 0; y < in->height; y++) {
195  uint8_t *inrp = in->data[2] + y * in->linesize[2];
196  uint8_t *ingp = in->data[0] + y * in->linesize[0];
197  uint8_t *inbp = in->data[1] + y * in->linesize[1];
198  uint8_t *inap = in->data[3] + y * in->linesize[3];
199  uint8_t *outrp = out->data[2] + y * out->linesize[2];
200  uint8_t *outgp = out->data[0] + y * out->linesize[0];
201  uint8_t *outbp = out->data[1] + y * out->linesize[1];
202  uint8_t *outap = out->data[3] + y * out->linesize[3];
203  for (int x = 0; x < in->width; x++) {
204  outrp[x] = s->lut[0][inrp[x]];
205  outgp[x] = s->lut[1][ingp[x]];
206  outbp[x] = s->lut[2][inbp[x]];
207  if (s->num_components == 4)
208  outap[x] = inap[x];
209  }
210  }
211 }
212 
214 {
215  for (int c = 0; c < 3; c++)
216  min[c].in = max[c].in = AV_RN16(in->data[0] + 2 * s->co[c]);
217  for (int y = 0; y < in->height; y++) {
218  uint16_t *inp = (uint16_t *)(in->data[0] + y * in->linesize[0]);
219  for (int x = 0; x < in->width; x++) {
220  for (int c = 0; c < 3; c++) {
221  min[c].in = FFMIN(min[c].in, inp[s->co[c]]);
222  max[c].in = FFMAX(max[c].in, inp[s->co[c]]);
223  }
224  inp += s->step;
225  }
226  }
227 }
228 
230 {
231  for (int y = 0; y < in->height; y++) {
232  uint16_t *inp = (uint16_t *)(in->data[0] + y * in->linesize[0]);
233  uint16_t *outp = (uint16_t *)(out->data[0] + y * out->linesize[0]);
234  for (int x = 0; x < in->width; x++) {
235  for (int c = 0; c < 3; c++)
236  outp[s->co[c]] = s->lut[c][inp[s->co[c]]];
237  if (s->num_components == 4)
238  // Copy alpha as-is.
239  outp[s->co[3]] = inp[s->co[3]];
240  inp += s->step;
241  outp += s->step;
242  }
243  }
244 }
245 
247 {
248  min[0].in = max[0].in = AV_RN16(in->data[2]);
249  min[1].in = max[1].in = AV_RN16(in->data[0]);
250  min[2].in = max[2].in = AV_RN16(in->data[1]);
251  for (int y = 0; y < in->height; y++) {
252  uint16_t *inrp = (uint16_t *)(in->data[2] + y * in->linesize[2]);
253  uint16_t *ingp = (uint16_t *)(in->data[0] + y * in->linesize[0]);
254  uint16_t *inbp = (uint16_t *)(in->data[1] + y * in->linesize[1]);
255  for (int x = 0; x < in->width; x++) {
256  min[0].in = FFMIN(min[0].in, inrp[x]);
257  max[0].in = FFMAX(max[0].in, inrp[x]);
258  min[1].in = FFMIN(min[1].in, ingp[x]);
259  max[1].in = FFMAX(max[1].in, ingp[x]);
260  min[2].in = FFMIN(min[2].in, inbp[x]);
261  max[2].in = FFMAX(max[2].in, inbp[x]);
262  }
263  }
264 }
265 
267 {
268  for (int y = 0; y < in->height; y++) {
269  uint16_t *inrp = (uint16_t *)(in->data[2] + y * in->linesize[2]);
270  uint16_t *ingp = (uint16_t *)(in->data[0] + y * in->linesize[0]);
271  uint16_t *inbp = (uint16_t *)(in->data[1] + y * in->linesize[1]);
272  uint16_t *inap = (uint16_t *)(in->data[3] + y * in->linesize[3]);
273  uint16_t *outrp = (uint16_t *)(out->data[2] + y * out->linesize[2]);
274  uint16_t *outgp = (uint16_t *)(out->data[0] + y * out->linesize[0]);
275  uint16_t *outbp = (uint16_t *)(out->data[1] + y * out->linesize[1]);
276  uint16_t *outap = (uint16_t *)(out->data[3] + y * out->linesize[3]);
277  for (int x = 0; x < in->width; x++) {
278  outrp[x] = s->lut[0][inrp[x]];
279  outgp[x] = s->lut[1][ingp[x]];
280  outbp[x] = s->lut[2][inbp[x]];
281  if (s->num_components == 4)
282  outap[x] = inap[x];
283  }
284  }
285 }
286 
287 // This function is the main guts of the filter. Normalizes the input frame
288 // into the output frame. The frames are known to have the same dimensions
289 // and pixel format.
291 {
292  // Per-extremum, per-channel local variables.
293  NormalizeLocal min[3], max[3]; // Min and max for each channel in {R,G,B}.
294 
295  float rgb_min_smoothed; // Min input range for linked normalization
296  float rgb_max_smoothed; // Max input range for linked normalization
297  int c;
298 
299  // First, scan the input frame to find, for each channel, the minimum
300  // (min.in) and maximum (max.in) values present in the channel.
301  s->find_min_max(s, in, min, max);
302 
303  // Next, for each channel, push min.in and max.in into their respective
304  // histories, to determine the min.smoothed and max.smoothed for this frame.
305  {
306  int history_idx = s->frame_num % s->history_len;
307  // Assume the history is not yet full; num_history_vals is the number
308  // of frames received so far including the current frame.
309  int num_history_vals = s->frame_num + 1;
310  if (s->frame_num >= s->history_len) {
311  //The history is full; drop oldest value and cap num_history_vals.
312  for (c = 0; c < 3; c++) {
313  s->min[c].history_sum -= s->min[c].history[history_idx];
314  s->max[c].history_sum -= s->max[c].history[history_idx];
315  }
316  num_history_vals = s->history_len;
317  }
318  // For each extremum, update history_sum and calculate smoothed value
319  // as the rolling average of the history entries.
320  for (c = 0; c < 3; c++) {
321  s->min[c].history_sum += (s->min[c].history[history_idx] = min[c].in);
322  min[c].smoothed = s->min[c].history_sum / (float)num_history_vals;
323  s->max[c].history_sum += (s->max[c].history[history_idx] = max[c].in);
324  max[c].smoothed = s->max[c].history_sum / (float)num_history_vals;
325  }
326  }
327 
328  // Determine the input range for linked normalization. This is simply the
329  // minimum of the per-channel minimums, and the maximum of the per-channel
330  // maximums.
331  rgb_min_smoothed = FFMIN3(min[0].smoothed, min[1].smoothed, min[2].smoothed);
332  rgb_max_smoothed = FFMAX3(max[0].smoothed, max[1].smoothed, max[2].smoothed);
333 
334  // Now, process each channel to determine the input and output range and
335  // build the lookup tables.
336  for (c = 0; c < 3; c++) {
337  int in_val;
338  // Adjust the input range for this channel [min.smoothed,max.smoothed]
339  // by mixing in the correct proportion of the linked normalization
340  // input range [rgb_min_smoothed,rgb_max_smoothed].
341  min[c].smoothed = (min[c].smoothed * s->independence)
342  + (rgb_min_smoothed * (1.0f - s->independence));
343  max[c].smoothed = (max[c].smoothed * s->independence)
344  + (rgb_max_smoothed * (1.0f - s->independence));
345 
346  // Calculate the output range [min.out,max.out] as a ratio of the full-
347  // strength output range [blackpt,whitept] and the original input range
348  // [min.in,max.in], based on the user-specified filter strength.
349  min[c].out = (s->sblackpt[c] * s->strength)
350  + (min[c].in * (1.0f - s->strength));
351  max[c].out = (s->swhitept[c] * s->strength)
352  + (max[c].in * (1.0f - s->strength));
353 
354  // Now, build a lookup table which linearly maps the adjusted input range
355  // [min.smoothed,max.smoothed] to the output range [min.out,max.out].
356  // Perform the linear interpolation for each x:
357  // lut[x] = (int)(float(x - min.smoothed) * scale + max.out + 0.5)
358  // where scale = (max.out - min.out) / (max.smoothed - min.smoothed)
359  if (min[c].smoothed == max[c].smoothed) {
360  // There is no dynamic range to expand. No mapping for this channel.
361  for (in_val = min[c].in; in_val <= max[c].in; in_val++)
362  s->lut[c][in_val] = min[c].out;
363  } else {
364  // We must set lookup values for all values in the original input
365  // range [min.in,max.in]. Since the original input range may be
366  // larger than [min.smoothed,max.smoothed], some output values may
367  // fall outside the [0,255] dynamic range. We need to clamp them.
368  float scale = (max[c].out - min[c].out) / (max[c].smoothed - min[c].smoothed);
369  for (in_val = min[c].in; in_val <= max[c].in; in_val++) {
370  int out_val = (in_val - min[c].smoothed) * scale + min[c].out + 0.5f;
371  out_val = av_clip_uintp2_c(out_val, s->depth);
372  s->lut[c][in_val] = out_val;
373  }
374  }
375  }
376 
377  // Finally, process the pixels of the input frame using the lookup tables.
378  s->process(s, in, out);
379 
380  s->frame_num++;
381 }
382 
383 // Now we define all the functions accessible from the ff_vf_normalize class,
384 // which is ffmpeg's interface to our filter. See doc/filter_design.txt and
385 // doc/writing_filters.txt for descriptions of what these interface functions
386 // are expected to do.
387 
388 // The pixel formats that our filter supports. We should be able to process
389 // any 8-bit RGB formats. 16-bit support might be useful one day.
390 static const enum AVPixelFormat pixel_fmts[] = {
407 };
408 
409 // At this point we know the pixel format used for both input and output. We
410 // can also access the frame rate of the input video and allocate some memory
411 // appropriately
413 {
414  NormalizeContext *s = inlink->dst->priv;
415  // Store offsets to R,G,B,A bytes respectively in each pixel
417  int c, planar, scale;
418 
419  ff_fill_rgba_map(s->co, inlink->format);
420  s->depth = desc->comp[0].depth;
421  scale = 1 << (s->depth - 8);
422  s->num_components = desc->nb_components;
423  s->step = av_get_padded_bits_per_pixel(desc) >> (3 + (s->depth > 8));
424  // Convert smoothing value to history_len (a count of frames to average,
425  // must be at least 1). Currently this is a direct assignment, but the
426  // smoothing value was originally envisaged as a number of seconds. In
427  // future it would be nice to set history_len using a number of seconds,
428  // but VFR video is currently an obstacle to doing so.
429  s->history_len = s->smoothing + 1;
430  // Allocate the history buffers -- there are 6 -- one for each extrema.
431  // s->smoothing is limited to INT_MAX/8, so that (s->history_len * 6)
432  // can't overflow on 32bit causing a too-small allocation.
433  s->history_mem = av_malloc(s->history_len * 6 * sizeof(*s->history_mem));
434  if (s->history_mem == NULL)
435  return AVERROR(ENOMEM);
436 
437  for (c = 0; c < 3; c++) {
438  s->min[c].history = s->history_mem + (c*2) * s->history_len;
439  s->max[c].history = s->history_mem + (c*2+1) * s->history_len;
440  s->sblackpt[c] = scale * s->blackpt[c] + (s->blackpt[c] & (1 << (s->depth - 8)));
441  s->swhitept[c] = scale * s->whitept[c] + (s->whitept[c] & (1 << (s->depth - 8)));
442  }
443 
445 
446  if (s->depth <= 8) {
447  s->find_min_max = planar ? find_min_max_planar : find_min_max;
448  s->process = planar? process_planar : process;
449  } else {
450  s->find_min_max = planar ? find_min_max_planar_16 : find_min_max_16;
451  s->process = planar? process_planar_16 : process_16;
452  }
453 
454  return 0;
455 }
456 
457 // Free any memory allocations here
459 {
460  NormalizeContext *s = ctx->priv;
461 
462  av_freep(&s->history_mem);
463 }
464 
465 // This function is pretty much standard from doc/writing_filters.txt. It
466 // tries to do in-place filtering where possible, only allocating a new output
467 // frame when absolutely necessary.
469 {
470  AVFilterContext *ctx = inlink->dst;
471  AVFilterLink *outlink = ctx->outputs[0];
472  NormalizeContext *s = ctx->priv;
473  AVFrame *out;
474  // Set 'direct' if we can modify the input frame in-place. Otherwise we
475  // need to retrieve a new frame from the output link.
476  int direct = av_frame_is_writable(in) && !ctx->is_disabled;
477 
478  if (direct) {
479  out = in;
480  } else {
481  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
482  if (!out) {
483  av_frame_free(&in);
484  return AVERROR(ENOMEM);
485  }
487  }
488 
489  // Now we've got the input and output frames (which may be the same frame)
490  // perform the filtering with our custom function.
491  normalize(s, in, out);
492 
493  if (ctx->is_disabled) {
494  av_frame_free(&out);
495  return ff_filter_frame(outlink, in);
496  }
497 
498  if (!direct)
499  av_frame_free(&in);
500 
501  return ff_filter_frame(outlink, out);
502 }
503 
504 static const AVFilterPad inputs[] = {
505  {
506  .name = "default",
507  .type = AVMEDIA_TYPE_VIDEO,
508  .filter_frame = filter_frame,
509  .config_props = config_input,
510  },
511 };
512 
514  .name = "normalize",
515  .description = NULL_IF_CONFIG_SMALL("Normalize RGB video."),
516  .priv_size = sizeof(NormalizeContext),
517  .priv_class = &normalize_class,
518  .uninit = uninit,
523  .process_command = ff_filter_process_command,
524 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
NormalizeContext
Definition: vf_normalize.c:95
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:501
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:155
NormalizeContext::depth
int depth
Definition: vf_normalize.c:106
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
NormalizeContext::lut
uint16_t lut[3][65536]
Definition: vf_normalize.c:118
out
FILE * out
Definition: movenc.c:55
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AV_RN16
#define AV_RN16(p)
Definition: intreadwrite.h:358
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
NormalizeContext::history_len
int history_len
Definition: vf_normalize.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:279
AVFrame::width
int width
Definition: frame.h:447
find_min_max_planar_16
static void find_min_max_planar_16(NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:246
AVOption
AVOption.
Definition: opt.h:346
NormalizeContext::process
void(* process)(struct NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:121
NormalizeContext::whitept
uint8_t whitept[4]
Definition: vf_normalize.c:100
normalize_options
static const AVOption normalize_options[]
Definition: vf_normalize.c:128
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
find_min_max_planar
static void find_min_max_planar(NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:172
video.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:396
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
process_planar
static void process_planar(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:192
NormalizeContext::sblackpt
int sblackpt[4]
Definition: vf_normalize.c:107
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
NormalizeContext::max
NormalizeHistory max[3]
Definition: vf_normalize.c:115
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
NormalizeLocal
Definition: vf_normalize.c:89
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
av_cold
#define av_cold
Definition: attributes.h:90
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
float
float
Definition: af_crystalizer.c:121
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
pixel_fmts
static enum AVPixelFormat pixel_fmts[]
Definition: vf_normalize.c:390
NormalizeContext::independence
float independence
Definition: vf_normalize.c:102
ctx
AVFormatContext * ctx
Definition: movenc.c:49
NormalizeContext::history_mem
uint16_t * history_mem
Definition: vf_normalize.c:116
FLAGSR
#define FLAGSR
Definition: vf_normalize.c:126
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
find_min_max
static void find_min_max(NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:139
if
if(ret)
Definition: filter_design.txt:179
NormalizeLocal::out
float out
Definition: vf_normalize.c:92
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:468
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:469
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
AV_OPT_TYPE_COLOR
@ AV_OPT_TYPE_COLOR
Definition: opt.h:250
inputs
static const AVFilterPad inputs[]
Definition: vf_normalize.c:504
NormalizeContext::step
int step
Definition: vf_normalize.c:110
NormalizeContext::blackpt
uint8_t blackpt[4]
Definition: vf_normalize.c:99
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
NormalizeLocal::in
uint16_t in
Definition: vf_normalize.c:90
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
process_planar_16
static void process_planar_16(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:266
OFFSET
#define OFFSET(x)
Definition: vf_normalize.c:124
f
f
Definition: af_crystalizer.c:121
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
av_get_padded_bits_per_pixel
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
Definition: pixdesc.c:2930
process_16
static void process_16(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:229
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
FLAGS
#define FLAGS
Definition: vf_normalize.c:125
NormalizeHistory::history_sum
uint64_t history_sum
Definition: vf_normalize.c:86
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:464
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_normalize.c:458
NormalizeContext::min
NormalizeHistory min[3]
Definition: vf_normalize.c:115
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:645
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_normalize.c:468
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:887
NormalizeContext::find_min_max
void(* find_min_max)(struct NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:120
ff_vf_normalize
const AVFilter ff_vf_normalize
Definition: vf_normalize.c:513
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
NormalizeContext::co
uint8_t co[4]
Definition: vf_normalize.c:105
internal.h
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
normalize
Definition: normalize.py:1
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:473
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
NormalizeContext::smoothing
int smoothing
Definition: vf_normalize.c:101
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_normalize.c:412
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
find_min_max_16
static void find_min_max_16(NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:213
AVFilter
Filter definition.
Definition: avfilter.h:166
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:264
normalize
static void normalize(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:290
AVFrame::height
int height
Definition: frame.h:447
NormalizeContext::frame_num
int frame_num
Definition: vf_normalize.c:112
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
NormalizeContext::num_components
int num_components
Definition: vf_normalize.c:109
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
NormalizeContext::strength
float strength
Definition: vf_normalize.c:103
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:420
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:262
NormalizeHistory
Definition: vf_normalize.c:84
drawutils.h
NormalizeContext::swhitept
int swhitept[4]
Definition: vf_normalize.c:108
NormalizeHistory::history
uint16_t * history
Definition: vf_normalize.c:85
NormalizeLocal::smoothed
float smoothed
Definition: vf_normalize.c:91
min
float min
Definition: vorbis_enc_data.h:429
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(normalize)