FFmpeg
vf_normalize.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Richard Ling
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * Normalize RGB video (aka histogram stretching, contrast stretching).
23  * See: https://en.wikipedia.org/wiki/Normalization_(image_processing)
24  *
25  * For each channel of each frame, the filter computes the input range and maps
26  * it linearly to the user-specified output range. The output range defaults
27  * to the full dynamic range from pure black to pure white.
28  *
29  * Naively maximising the dynamic range of each frame of video in isolation
30  * may cause flickering (rapid changes in brightness of static objects in the
31  * scene) when small dark or bright objects enter or leave the scene. This
32  * filter can apply temporal smoothing to the input range to reduce flickering.
33  * Temporal smoothing is similar to the auto-exposure (automatic gain control)
34  * on a video camera, which performs the same function; and, like a video
35  * camera, it may cause a period of over- or under-exposure of the video.
36  *
37  * The filter can normalize the R,G,B channels independently, which may cause
38  * color shifting, or link them together as a single channel, which prevents
39  * color shifting. More precisely, linked normalization preserves hue (as it's
40  * defined in HSV/HSL color spaces) while independent normalization does not.
41  * Independent normalization can be used to remove color casts, such as the
42  * blue cast from underwater video, restoring more natural colors. The filter
43  * can also combine independent and linked normalization in any ratio.
44  *
45  * Finally the overall strength of the filter can be adjusted, from no effect
46  * to full normalization.
47  *
48  * The 5 AVOptions are:
49  * blackpt, Colors which define the output range. The minimum input value
50  * whitept is mapped to the blackpt. The maximum input value is mapped to
51  * the whitept. The defaults are black and white respectively.
52  * Specifying white for blackpt and black for whitept will give
53  * color-inverted, normalized video. Shades of grey can be used
54  * to reduce the dynamic range (contrast). Specifying saturated
55  * colors here can create some interesting effects.
56  *
57  * smoothing The amount of temporal smoothing, expressed in frames (>=0).
58  * the minimum and maximum input values of each channel are
59  * smoothed using a rolling average over the current frame and
60  * that many previous frames of video. Defaults to 0 (no temporal
61  * smoothing).
62  *
63  * independence
64  * Controls the ratio of independent (color shifting) channel
65  * normalization to linked (color preserving) normalization. 0.0
66  * is fully linked, 1.0 is fully independent. Defaults to fully
67  * independent.
68  *
69  * strength Overall strength of the filter. 1.0 is full strength. 0.0 is
70  * a rather expensive no-op. Values in between can give a gentle
71  * boost to low-contrast video without creating an artificial
72  * over-processed look. The default is full strength.
73  */
74 
75 #include "libavutil/imgutils.h"
76 #include "libavutil/intreadwrite.h"
77 #include "libavutil/opt.h"
78 #include "libavutil/pixdesc.h"
79 #include "avfilter.h"
80 #include "drawutils.h"
81 #include "formats.h"
82 #include "internal.h"
83 #include "video.h"
84 
85 typedef struct NormalizeHistory {
86  uint16_t *history; // History entries.
87  uint64_t history_sum; // Sum of history entries.
89 
90 typedef struct NormalizeLocal {
91  uint16_t in; // Original input byte value for this frame.
92  float smoothed; // Smoothed input value [0,255].
93  float out; // Output value [0,255]
95 
96 typedef struct NormalizeContext {
97  const AVClass *class;
98 
99  // Storage for the corresponding AVOptions
100  uint8_t blackpt[4];
101  uint8_t whitept[4];
104  float strength;
105 
106  uint8_t co[4]; // Offsets to R,G,B,A bytes respectively in each pixel
107  int depth;
108  int sblackpt[4];
109  int swhitept[4];
110  int num_components; // Number of components in the pixel format
111  int step;
112  int history_len; // Number of frames to average; based on smoothing factor
113  int frame_num; // Increments on each frame, starting from 0.
114 
115  // Per-extremum, per-channel history, for temporal smoothing.
116  NormalizeHistory min[3], max[3]; // Min and max for each channel in {R,G,B}.
117  uint16_t *history_mem; // Single allocation for above history entries
118 
119  uint16_t lut[3][65536]; // Lookup table
120 
124 
125 #define OFFSET(x) offsetof(NormalizeContext, x)
126 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
127 #define FLAGSR AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
128 
129 static const AVOption normalize_options[] = {
130  { "blackpt", "output color to which darkest input color is mapped", OFFSET(blackpt), AV_OPT_TYPE_COLOR, { .str = "black" }, 0, 0, FLAGSR },
131  { "whitept", "output color to which brightest input color is mapped", OFFSET(whitept), AV_OPT_TYPE_COLOR, { .str = "white" }, 0, 0, FLAGSR },
132  { "smoothing", "amount of temporal smoothing of the input range, to reduce flicker", OFFSET(smoothing), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX/8, FLAGS },
133  { "independence", "proportion of independent to linked channel normalization", OFFSET(independence), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 1.0, FLAGSR },
134  { "strength", "strength of filter, from no effect to full normalization", OFFSET(strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 1.0, FLAGSR },
135  { NULL }
136 };
137 
139 
141 {
142  for (int c = 0; c < 3; c++)
143  min[c].in = max[c].in = in->data[0][s->co[c]];
144  for (int y = 0; y < in->height; y++) {
145  uint8_t *inp = in->data[0] + y * in->linesize[0];
146  for (int x = 0; x < in->width; x++) {
147  for (int c = 0; c < 3; c++) {
148  min[c].in = FFMIN(min[c].in, inp[s->co[c]]);
149  max[c].in = FFMAX(max[c].in, inp[s->co[c]]);
150  }
151  inp += s->step;
152  }
153  }
154 }
155 
157 {
158  for (int y = 0; y < in->height; y++) {
159  uint8_t *inp = in->data[0] + y * in->linesize[0];
160  uint8_t *outp = out->data[0] + y * out->linesize[0];
161  for (int x = 0; x < in->width; x++) {
162  for (int c = 0; c < 3; c++)
163  outp[s->co[c]] = s->lut[c][inp[s->co[c]]];
164  if (s->num_components == 4)
165  // Copy alpha as-is.
166  outp[s->co[3]] = inp[s->co[3]];
167  inp += s->step;
168  outp += s->step;
169  }
170  }
171 }
172 
174 {
175  min[0].in = max[0].in = in->data[2][0];
176  min[1].in = max[1].in = in->data[0][0];
177  min[2].in = max[2].in = in->data[1][0];
178  for (int y = 0; y < in->height; y++) {
179  uint8_t *inrp = in->data[2] + y * in->linesize[2];
180  uint8_t *ingp = in->data[0] + y * in->linesize[0];
181  uint8_t *inbp = in->data[1] + y * in->linesize[1];
182  for (int x = 0; x < in->width; x++) {
183  min[0].in = FFMIN(min[0].in, inrp[x]);
184  max[0].in = FFMAX(max[0].in, inrp[x]);
185  min[1].in = FFMIN(min[1].in, ingp[x]);
186  max[1].in = FFMAX(max[1].in, ingp[x]);
187  min[2].in = FFMIN(min[2].in, inbp[x]);
188  max[2].in = FFMAX(max[2].in, inbp[x]);
189  }
190  }
191 }
192 
194 {
195  for (int y = 0; y < in->height; y++) {
196  uint8_t *inrp = in->data[2] + y * in->linesize[2];
197  uint8_t *ingp = in->data[0] + y * in->linesize[0];
198  uint8_t *inbp = in->data[1] + y * in->linesize[1];
199  uint8_t *inap = in->data[3] + y * in->linesize[3];
200  uint8_t *outrp = out->data[2] + y * out->linesize[2];
201  uint8_t *outgp = out->data[0] + y * out->linesize[0];
202  uint8_t *outbp = out->data[1] + y * out->linesize[1];
203  uint8_t *outap = out->data[3] + y * out->linesize[3];
204  for (int x = 0; x < in->width; x++) {
205  outrp[x] = s->lut[0][inrp[x]];
206  outgp[x] = s->lut[1][ingp[x]];
207  outbp[x] = s->lut[2][inbp[x]];
208  if (s->num_components == 4)
209  outap[x] = inap[x];
210  }
211  }
212 }
213 
215 {
216  for (int c = 0; c < 3; c++)
217  min[c].in = max[c].in = AV_RN16(in->data[0] + 2 * s->co[c]);
218  for (int y = 0; y < in->height; y++) {
219  uint16_t *inp = (uint16_t *)(in->data[0] + y * in->linesize[0]);
220  for (int x = 0; x < in->width; x++) {
221  for (int c = 0; c < 3; c++) {
222  min[c].in = FFMIN(min[c].in, inp[s->co[c]]);
223  max[c].in = FFMAX(max[c].in, inp[s->co[c]]);
224  }
225  inp += s->step;
226  }
227  }
228 }
229 
231 {
232  for (int y = 0; y < in->height; y++) {
233  uint16_t *inp = (uint16_t *)(in->data[0] + y * in->linesize[0]);
234  uint16_t *outp = (uint16_t *)(out->data[0] + y * out->linesize[0]);
235  for (int x = 0; x < in->width; x++) {
236  for (int c = 0; c < 3; c++)
237  outp[s->co[c]] = s->lut[c][inp[s->co[c]]];
238  if (s->num_components == 4)
239  // Copy alpha as-is.
240  outp[s->co[3]] = inp[s->co[3]];
241  inp += s->step;
242  outp += s->step;
243  }
244  }
245 }
246 
248 {
249  min[0].in = max[0].in = AV_RN16(in->data[2]);
250  min[1].in = max[1].in = AV_RN16(in->data[0]);
251  min[2].in = max[2].in = AV_RN16(in->data[1]);
252  for (int y = 0; y < in->height; y++) {
253  uint16_t *inrp = (uint16_t *)(in->data[2] + y * in->linesize[2]);
254  uint16_t *ingp = (uint16_t *)(in->data[0] + y * in->linesize[0]);
255  uint16_t *inbp = (uint16_t *)(in->data[1] + y * in->linesize[1]);
256  for (int x = 0; x < in->width; x++) {
257  min[0].in = FFMIN(min[0].in, inrp[x]);
258  max[0].in = FFMAX(max[0].in, inrp[x]);
259  min[1].in = FFMIN(min[1].in, ingp[x]);
260  max[1].in = FFMAX(max[1].in, ingp[x]);
261  min[2].in = FFMIN(min[2].in, inbp[x]);
262  max[2].in = FFMAX(max[2].in, inbp[x]);
263  }
264  }
265 }
266 
268 {
269  for (int y = 0; y < in->height; y++) {
270  uint16_t *inrp = (uint16_t *)(in->data[2] + y * in->linesize[2]);
271  uint16_t *ingp = (uint16_t *)(in->data[0] + y * in->linesize[0]);
272  uint16_t *inbp = (uint16_t *)(in->data[1] + y * in->linesize[1]);
273  uint16_t *inap = (uint16_t *)(in->data[3] + y * in->linesize[3]);
274  uint16_t *outrp = (uint16_t *)(out->data[2] + y * out->linesize[2]);
275  uint16_t *outgp = (uint16_t *)(out->data[0] + y * out->linesize[0]);
276  uint16_t *outbp = (uint16_t *)(out->data[1] + y * out->linesize[1]);
277  uint16_t *outap = (uint16_t *)(out->data[3] + y * out->linesize[3]);
278  for (int x = 0; x < in->width; x++) {
279  outrp[x] = s->lut[0][inrp[x]];
280  outgp[x] = s->lut[1][ingp[x]];
281  outbp[x] = s->lut[2][inbp[x]];
282  if (s->num_components == 4)
283  outap[x] = inap[x];
284  }
285  }
286 }
287 
288 // This function is the main guts of the filter. Normalizes the input frame
289 // into the output frame. The frames are known to have the same dimensions
290 // and pixel format.
292 {
293  // Per-extremum, per-channel local variables.
294  NormalizeLocal min[3], max[3]; // Min and max for each channel in {R,G,B}.
295 
296  float rgb_min_smoothed; // Min input range for linked normalization
297  float rgb_max_smoothed; // Max input range for linked normalization
298  int c;
299 
300  // First, scan the input frame to find, for each channel, the minimum
301  // (min.in) and maximum (max.in) values present in the channel.
302  s->find_min_max(s, in, min, max);
303 
304  // Next, for each channel, push min.in and max.in into their respective
305  // histories, to determine the min.smoothed and max.smoothed for this frame.
306  {
307  int history_idx = s->frame_num % s->history_len;
308  // Assume the history is not yet full; num_history_vals is the number
309  // of frames received so far including the current frame.
310  int num_history_vals = s->frame_num + 1;
311  if (s->frame_num >= s->history_len) {
312  //The history is full; drop oldest value and cap num_history_vals.
313  for (c = 0; c < 3; c++) {
314  s->min[c].history_sum -= s->min[c].history[history_idx];
315  s->max[c].history_sum -= s->max[c].history[history_idx];
316  }
317  num_history_vals = s->history_len;
318  }
319  // For each extremum, update history_sum and calculate smoothed value
320  // as the rolling average of the history entries.
321  for (c = 0; c < 3; c++) {
322  s->min[c].history_sum += (s->min[c].history[history_idx] = min[c].in);
323  min[c].smoothed = s->min[c].history_sum / (float)num_history_vals;
324  s->max[c].history_sum += (s->max[c].history[history_idx] = max[c].in);
325  max[c].smoothed = s->max[c].history_sum / (float)num_history_vals;
326  }
327  }
328 
329  // Determine the input range for linked normalization. This is simply the
330  // minimum of the per-channel minimums, and the maximum of the per-channel
331  // maximums.
332  rgb_min_smoothed = FFMIN3(min[0].smoothed, min[1].smoothed, min[2].smoothed);
333  rgb_max_smoothed = FFMAX3(max[0].smoothed, max[1].smoothed, max[2].smoothed);
334 
335  // Now, process each channel to determine the input and output range and
336  // build the lookup tables.
337  for (c = 0; c < 3; c++) {
338  int in_val;
339  // Adjust the input range for this channel [min.smoothed,max.smoothed]
340  // by mixing in the correct proportion of the linked normalization
341  // input range [rgb_min_smoothed,rgb_max_smoothed].
342  min[c].smoothed = (min[c].smoothed * s->independence)
343  + (rgb_min_smoothed * (1.0f - s->independence));
344  max[c].smoothed = (max[c].smoothed * s->independence)
345  + (rgb_max_smoothed * (1.0f - s->independence));
346 
347  // Calculate the output range [min.out,max.out] as a ratio of the full-
348  // strength output range [blackpt,whitept] and the original input range
349  // [min.in,max.in], based on the user-specified filter strength.
350  min[c].out = (s->sblackpt[c] * s->strength)
351  + (min[c].in * (1.0f - s->strength));
352  max[c].out = (s->swhitept[c] * s->strength)
353  + (max[c].in * (1.0f - s->strength));
354 
355  // Now, build a lookup table which linearly maps the adjusted input range
356  // [min.smoothed,max.smoothed] to the output range [min.out,max.out].
357  // Perform the linear interpolation for each x:
358  // lut[x] = (int)(float(x - min.smoothed) * scale + max.out + 0.5)
359  // where scale = (max.out - min.out) / (max.smoothed - min.smoothed)
360  if (min[c].smoothed == max[c].smoothed) {
361  // There is no dynamic range to expand. No mapping for this channel.
362  for (in_val = min[c].in; in_val <= max[c].in; in_val++)
363  s->lut[c][in_val] = min[c].out;
364  } else {
365  // We must set lookup values for all values in the original input
366  // range [min.in,max.in]. Since the original input range may be
367  // larger than [min.smoothed,max.smoothed], some output values may
368  // fall outside the [0,255] dynamic range. We need to clamp them.
369  float scale = (max[c].out - min[c].out) / (max[c].smoothed - min[c].smoothed);
370  for (in_val = min[c].in; in_val <= max[c].in; in_val++) {
371  int out_val = (in_val - min[c].smoothed) * scale + min[c].out + 0.5f;
372  out_val = av_clip_uintp2_c(out_val, s->depth);
373  s->lut[c][in_val] = out_val;
374  }
375  }
376  }
377 
378  // Finally, process the pixels of the input frame using the lookup tables.
379  s->process(s, in, out);
380 
381  s->frame_num++;
382 }
383 
384 // Now we define all the functions accessible from the ff_vf_normalize class,
385 // which is ffmpeg's interface to our filter. See doc/filter_design.txt and
386 // doc/writing_filters.txt for descriptions of what these interface functions
387 // are expected to do.
388 
389 // Set the pixel formats that our filter supports. We should be able to process
390 // any 8-bit RGB formats. 16-bit support might be useful one day.
392 {
393  static const enum AVPixelFormat pixel_fmts[] = {
410  };
411  // According to filter_design.txt, using ff_set_common_formats() this way
412  // ensures the pixel formats of the input and output will be the same. That
413  // saves a bit of effort possibly needing to handle format conversions.
415  if (!formats)
416  return AVERROR(ENOMEM);
417  return ff_set_common_formats(ctx, formats);
418 }
419 
420 // At this point we know the pixel format used for both input and output. We
421 // can also access the frame rate of the input video and allocate some memory
422 // appropriately
424 {
425  NormalizeContext *s = inlink->dst->priv;
426  // Store offsets to R,G,B,A bytes respectively in each pixel
428  int c, planar, scale;
429 
430  ff_fill_rgba_map(s->co, inlink->format);
431  s->depth = desc->comp[0].depth;
432  scale = 1 << (s->depth - 8);
433  s->num_components = desc->nb_components;
434  s->step = av_get_padded_bits_per_pixel(desc) >> (3 + (s->depth > 8));
435  // Convert smoothing value to history_len (a count of frames to average,
436  // must be at least 1). Currently this is a direct assignment, but the
437  // smoothing value was originally envisaged as a number of seconds. In
438  // future it would be nice to set history_len using a number of seconds,
439  // but VFR video is currently an obstacle to doing so.
440  s->history_len = s->smoothing + 1;
441  // Allocate the history buffers -- there are 6 -- one for each extrema.
442  // s->smoothing is limited to INT_MAX/8, so that (s->history_len * 6)
443  // can't overflow on 32bit causing a too-small allocation.
444  s->history_mem = av_malloc(s->history_len * 6 * sizeof(*s->history_mem));
445  if (s->history_mem == NULL)
446  return AVERROR(ENOMEM);
447 
448  for (c = 0; c < 3; c++) {
449  s->min[c].history = s->history_mem + (c*2) * s->history_len;
450  s->max[c].history = s->history_mem + (c*2+1) * s->history_len;
451  s->sblackpt[c] = scale * s->blackpt[c] + (s->blackpt[c] >> (s->depth - 8));
452  s->swhitept[c] = scale * s->whitept[c] + (s->whitept[c] >> (s->depth - 8));
453  }
454 
455  planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
456 
457  if (s->depth <= 8) {
459  s->process = planar? process_planar : process;
460  } else {
462  s->process = planar? process_planar_16 : process_16;
463  }
464 
465  return 0;
466 }
467 
468 // Free any memory allocations here
470 {
471  NormalizeContext *s = ctx->priv;
472 
473  av_freep(&s->history_mem);
474 }
475 
476 // This function is pretty much standard from doc/writing_filters.txt. It
477 // tries to do in-place filtering where possible, only allocating a new output
478 // frame when absolutely necessary.
480 {
481  AVFilterContext *ctx = inlink->dst;
482  AVFilterLink *outlink = ctx->outputs[0];
483  NormalizeContext *s = ctx->priv;
484  AVFrame *out;
485  // Set 'direct' if we can modify the input frame in-place. Otherwise we
486  // need to retrieve a new frame from the output link.
487  int direct = av_frame_is_writable(in) && !ctx->is_disabled;
488 
489  if (direct) {
490  out = in;
491  } else {
492  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
493  if (!out) {
494  av_frame_free(&in);
495  return AVERROR(ENOMEM);
496  }
497  av_frame_copy_props(out, in);
498  }
499 
500  // Now we've got the input and output frames (which may be the same frame)
501  // perform the filtering with our custom function.
502  normalize(s, in, out);
503 
504  if (ctx->is_disabled) {
505  av_frame_free(&out);
506  return ff_filter_frame(outlink, in);
507  }
508 
509  if (!direct)
510  av_frame_free(&in);
511 
512  return ff_filter_frame(outlink, out);
513 }
514 
515 static const AVFilterPad inputs[] = {
516  {
517  .name = "default",
518  .type = AVMEDIA_TYPE_VIDEO,
519  .filter_frame = filter_frame,
520  .config_props = config_input,
521  },
522  { NULL }
523 };
524 
525 static const AVFilterPad outputs[] = {
526  {
527  .name = "default",
528  .type = AVMEDIA_TYPE_VIDEO,
529  },
530  { NULL }
531 };
532 
534  .name = "normalize",
535  .description = NULL_IF_CONFIG_SMALL("Normalize RGB video."),
536  .priv_size = sizeof(NormalizeContext),
537  .priv_class = &normalize_class,
538  .uninit = uninit,
540  .inputs = inputs,
541  .outputs = outputs,
544 };
#define NULL
Definition: coverity.c:32
uint16_t lut[3][65536]
Definition: vf_normalize.c:119
AVFILTER_DEFINE_CLASS(normalize)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:417
misc image utilities
uint8_t whitept[4]
Definition: vf_normalize.c:101
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
const char * desc
Definition: nvenc.c:68
static void find_min_max_planar_16(NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:247
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:387
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
static const AVOption normalize_options[]
Definition: vf_normalize.c:129
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:413
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:392
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:239
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
int is_disabled
the enabled state from the last expression evaluation
Definition: avfilter.h:385
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
void(* process)(struct NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:122
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
static void find_min_max_planar(NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:173
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
AVOptions.
NormalizeHistory max[3]
Definition: vf_normalize.c:116
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static void process_planar(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:193
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:412
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
AVFilter ff_vf_normalize
Definition: vf_normalize.c:533
#define FFMIN3(a, b, c)
Definition: common.h:97
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:388
#define max(a, b)
Definition: cuda_runtime.h:33
static int query_formats(AVFilterContext *ctx)
Definition: vf_normalize.c:391
A filter pad used for either input or output.
Definition: internal.h:54
static void find_min_max(NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:140
int width
Definition: frame.h:353
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
static const AVFilterPad outputs[]
Definition: vf_normalize.c:525
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options...
Definition: avfilter.c:869
void * priv
private data for use by the filter
Definition: avfilter.h:353
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
Definition: pixdesc.c:2514
uint16_t * history_mem
Definition: vf_normalize.c:117
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:418
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:383
static const AVFilterPad inputs[]
Definition: vf_normalize.c:515
#define FFMAX(a, b)
Definition: common.h:94
#define FLAGSR
Definition: vf_normalize.c:127
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:419
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:416
#define FFMIN(a, b)
Definition: common.h:96
static void process_16(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:230
uint8_t blackpt[4]
Definition: vf_normalize.c:100
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
AVFormatContext * ctx
Definition: movenc.c:48
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_normalize.c:479
#define s(width, name)
Definition: cbs_vp9.c:257
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_normalize.c:469
uint64_t history_sum
Definition: vf_normalize.c:87
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
#define OFFSET(x)
Definition: vf_normalize.c:125
static void process_planar_16(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:267
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:415
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
if(ret)
NormalizeHistory min[3]
Definition: vf_normalize.c:116
misc drawing utilities
static int config_input(AVFilterLink *inlink)
Definition: vf_normalize.c:423
#define FLAGS
Definition: vf_normalize.c:126
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
#define AV_RN16(p)
Definition: intreadwrite.h:360
void(* find_min_max)(struct NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:121
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
static void normalize(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:291
const char * name
Filter name.
Definition: avfilter.h:148
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:414
#define flags(name, subs,...)
Definition: cbs_av1.c:564
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
static void find_min_max_16(NormalizeContext *s, AVFrame *in, NormalizeLocal min[3], NormalizeLocal max[3])
Definition: vf_normalize.c:214
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_afftdn.c:1374
#define av_freep(p)
formats
Definition: signature.h:48
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:237
uint16_t * history
Definition: vf_normalize.c:86
float min
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:156
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
#define FFMAX3(a, b, c)
Definition: common.h:95
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:229