FFmpeg
vf_gblur.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Pascal Getreuer
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following
12  * disclaimer in the documentation and/or other materials provided
13  * with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19  * HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <float.h>
29 
30 #include "libavutil/imgutils.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "formats.h"
35 #include "gblur.h"
36 #include "internal.h"
37 #include "video.h"
38 
39 #define OFFSET(x) offsetof(GBlurContext, x)
40 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
41 
42 static const AVOption gblur_options[] = {
43  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0.0, 1024, FLAGS },
44  { "steps", "set number of steps", OFFSET(steps), AV_OPT_TYPE_INT, {.i64=1}, 1, 6, FLAGS },
45  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
46  { "sigmaV", "set vertical sigma", OFFSET(sigmaV), AV_OPT_TYPE_FLOAT, {.dbl=-1}, -1, 1024, FLAGS },
47  { NULL }
48 };
49 
51 
52 typedef struct ThreadData {
53  int height;
54  int width;
55 } ThreadData;
56 
57 static void postscale_c(float *buffer, int length,
58  float postscale, float min, float max)
59 {
60  for (int i = 0; i < length; i++) {
61  buffer[i] *= postscale;
62  buffer[i] = av_clipf(buffer[i], min, max);
63  }
64 }
65 
66 static void horiz_slice_c(float *buffer, int width, int height, int steps,
67  float nu, float bscale, float *localbuf)
68 {
69  int step, x, y;
70  float *ptr;
71  for (y = 0; y < height; y++) {
72  for (step = 0; step < steps; step++) {
73  ptr = buffer + width * y;
74  ptr[0] *= bscale;
75 
76  /* Filter rightwards */
77  for (x = 1; x < width; x++)
78  ptr[x] += nu * ptr[x - 1];
79  ptr[x = width - 1] *= bscale;
80 
81  /* Filter leftwards */
82  for (; x > 0; x--)
83  ptr[x - 1] += nu * ptr[x];
84  }
85  }
86 }
87 
88 static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
89 {
90  GBlurContext *s = ctx->priv;
91  ThreadData *td = arg;
92  const int height = td->height;
93  const int width = td->width;
94  const int slice_start = (height * jobnr ) / nb_jobs;
95  const int slice_end = (height * (jobnr+1)) / nb_jobs;
96  const float boundaryscale = s->boundaryscale;
97  const int steps = s->steps;
98  const float nu = s->nu;
99  float *buffer = s->buffer;
100  float *localbuf = NULL;
101 
102  if (s->localbuf)
103  localbuf = s->localbuf + s->stride * width * slice_start;
104 
105  s->horiz_slice(buffer + width * slice_start, width, slice_end - slice_start,
106  steps, nu, boundaryscale, localbuf);
107  emms_c();
108  return 0;
109 }
110 
111 static void do_vertical_columns(float *buffer, int width, int height,
112  int column_begin, int column_end, int steps,
113  float nu, float boundaryscale, int column_step)
114 {
115  const int numpixels = width * height;
116  int i, x, k, step;
117  float *ptr;
118  for (x = column_begin; x < column_end;) {
119  for (step = 0; step < steps; step++) {
120  ptr = buffer + x;
121  for (k = 0; k < column_step; k++) {
122  ptr[k] *= boundaryscale;
123  }
124  /* Filter downwards */
125  for (i = width; i < numpixels; i += width) {
126  for (k = 0; k < column_step; k++) {
127  ptr[i + k] += nu * ptr[i - width + k];
128  }
129  }
130  i = numpixels - width;
131 
132  for (k = 0; k < column_step; k++)
133  ptr[i + k] *= boundaryscale;
134 
135  /* Filter upwards */
136  for (; i > 0; i -= width) {
137  for (k = 0; k < column_step; k++)
138  ptr[i - width + k] += nu * ptr[i + k];
139  }
140  }
141  x += column_step;
142  }
143 }
144 
145 static void verti_slice_c(float *buffer, int width, int height,
146  int slice_start, int slice_end, int steps,
147  float nu, float boundaryscale)
148 {
149  int aligned_end = slice_start + (((slice_end - slice_start) >> 3) << 3);
150  /* Filter vertically along columns (process 8 columns in each step) */
151  do_vertical_columns(buffer, width, height, slice_start, aligned_end,
152  steps, nu, boundaryscale, 8);
153  /* Filter un-aligned columns one by one */
155  steps, nu, boundaryscale, 1);
156 }
157 
158 static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
159 {
160  GBlurContext *s = ctx->priv;
161  ThreadData *td = arg;
162  const int height = td->height;
163  const int width = td->width;
164  const int slice_start = (width * jobnr ) / nb_jobs;
165  const int slice_end = (width * (jobnr+1)) / nb_jobs;
166  const float boundaryscale = s->boundaryscaleV;
167  const int steps = s->steps;
168  const float nu = s->nuV;
169  float *buffer = s->buffer;
170 
171  s->verti_slice(buffer, width, height, slice_start, slice_end,
172  steps, nu, boundaryscale);
173 
174  return 0;
175 }
176 
177 static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
178 {
179  GBlurContext *s = ctx->priv;
180  ThreadData *td = arg;
181  const float max = s->flt ? FLT_MAX : (1 << s->depth) - 1;
182  const float min = s->flt ? -FLT_MAX : 0.f;
183  const int height = td->height;
184  const int width = td->width;
185  const int awidth = FFALIGN(width, 64);
186  const int slice_start = (height * jobnr ) / nb_jobs;
187  const int slice_end = (height * (jobnr+1)) / nb_jobs;
188  const float postscale = s->postscale * s->postscaleV;
189  const int slice_size = slice_end - slice_start;
190 
191  s->postscale_slice(s->buffer + slice_start * awidth,
192  slice_size * awidth, postscale, min, max);
193 
194  return 0;
195 }
196 
197 static void gaussianiir2d(AVFilterContext *ctx, int plane)
198 {
199  GBlurContext *s = ctx->priv;
200  const int width = s->planewidth[plane];
201  const int height = s->planeheight[plane];
202  const int nb_threads = ff_filter_get_nb_threads(ctx);
203  ThreadData td;
204 
205  if (s->sigma <= 0 || s->steps < 0)
206  return;
207 
208  td.width = width;
209  td.height = height;
211  NULL, FFMIN(height, nb_threads));
213  NULL, FFMIN(width, nb_threads));
215  NULL, FFMIN(width * height, nb_threads));
216 }
217 
219 {
220  static const enum AVPixelFormat pix_fmts[] = {
242  };
243 
245 }
246 
248 {
249  s->localbuf = NULL;
250  s->horiz_slice = horiz_slice_c;
251  s->verti_slice = verti_slice_c;
252  s->postscale_slice = postscale_c;
253  if (ARCH_X86)
255 }
256 
258 {
260  GBlurContext *s = inlink->dst->priv;
261 
262  s->depth = desc->comp[0].depth;
263  s->flt = !!(desc->flags & AV_PIX_FMT_FLAG_FLOAT);
264  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
265  s->planewidth[0] = s->planewidth[3] = inlink->w;
266  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
267  s->planeheight[0] = s->planeheight[3] = inlink->h;
268 
269  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
270 
271  s->buffer = av_malloc_array(FFALIGN(inlink->w, 64), FFALIGN(inlink->h, 64) * sizeof(*s->buffer));
272  if (!s->buffer)
273  return AVERROR(ENOMEM);
274 
275  if (s->sigmaV < 0) {
276  s->sigmaV = s->sigma;
277  }
278  ff_gblur_init(s);
279 
280  return 0;
281 }
282 
283 static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
284 {
285  double dnu, lambda;
286 
287  lambda = (sigma * sigma) / (2.0 * steps);
288  dnu = (1.0 + 2.0 * lambda - sqrt(1.0 + 4.0 * lambda)) / (2.0 * lambda);
289  *postscale = pow(dnu / lambda, steps);
290  *boundaryscale = 1.0 / (1.0 - dnu);
291  *nu = (float)dnu;
292 }
293 
295 {
296  AVFilterContext *ctx = inlink->dst;
297  GBlurContext *s = ctx->priv;
298  AVFilterLink *outlink = ctx->outputs[0];
299  AVFrame *out;
300  int plane;
301 
302  set_params(s->sigma, s->steps, &s->postscale, &s->boundaryscale, &s->nu);
303  set_params(s->sigmaV, s->steps, &s->postscaleV, &s->boundaryscaleV, &s->nuV);
304 
305  if (av_frame_is_writable(in)) {
306  out = in;
307  } else {
308  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
309  if (!out) {
310  av_frame_free(&in);
311  return AVERROR(ENOMEM);
312  }
314  }
315 
316  for (plane = 0; plane < s->nb_planes; plane++) {
317  const int height = s->planeheight[plane];
318  const int width = s->planewidth[plane];
319  float *bptr = s->buffer;
320  const uint8_t *src = in->data[plane];
321  const uint16_t *src16 = (const uint16_t *)in->data[plane];
322  uint8_t *dst = out->data[plane];
323  uint16_t *dst16 = (uint16_t *)out->data[plane];
324  int y, x;
325 
326  if (!s->sigma || !(s->planes & (1 << plane))) {
327  if (out != in)
328  av_image_copy_plane(out->data[plane], out->linesize[plane],
329  in->data[plane], in->linesize[plane],
330  width * ((s->depth + 7) / 8), height);
331  continue;
332  }
333 
334  if (s->flt) {
335  av_image_copy_plane((uint8_t *)bptr, width * sizeof(float),
336  in->data[plane], in->linesize[plane],
337  width * sizeof(float), height);
338  } else if (s->depth == 8) {
339  for (y = 0; y < height; y++) {
340  for (x = 0; x < width; x++) {
341  bptr[x] = src[x];
342  }
343  bptr += width;
344  src += in->linesize[plane];
345  }
346  } else {
347  for (y = 0; y < height; y++) {
348  for (x = 0; x < width; x++) {
349  bptr[x] = src16[x];
350  }
351  bptr += width;
352  src16 += in->linesize[plane] / 2;
353  }
354  }
355 
356  gaussianiir2d(ctx, plane);
357 
358  bptr = s->buffer;
359  if (s->flt) {
360  av_image_copy_plane(out->data[plane], out->linesize[plane],
361  (uint8_t *)bptr, width * sizeof(float),
362  width * sizeof(float), height);
363  } else if (s->depth == 8) {
364  for (y = 0; y < height; y++) {
365  for (x = 0; x < width; x++) {
366  dst[x] = bptr[x];
367  }
368  bptr += width;
369  dst += out->linesize[plane];
370  }
371  } else {
372  for (y = 0; y < height; y++) {
373  for (x = 0; x < width; x++) {
374  dst16[x] = bptr[x];
375  }
376  bptr += width;
377  dst16 += out->linesize[plane] / 2;
378  }
379  }
380  }
381 
382  if (out != in)
383  av_frame_free(&in);
384  return ff_filter_frame(outlink, out);
385 }
386 
388 {
389  GBlurContext *s = ctx->priv;
390 
391  av_freep(&s->buffer);
392  if (s->localbuf)
393  av_free(s->localbuf);
394 }
395 
396 static const AVFilterPad gblur_inputs[] = {
397  {
398  .name = "default",
399  .type = AVMEDIA_TYPE_VIDEO,
400  .config_props = config_input,
401  .filter_frame = filter_frame,
402  },
403 };
404 
405 static const AVFilterPad gblur_outputs[] = {
406  {
407  .name = "default",
408  .type = AVMEDIA_TYPE_VIDEO,
409  },
410 };
411 
413  .name = "gblur",
414  .description = NULL_IF_CONFIG_SMALL("Apply Gaussian Blur filter."),
415  .priv_size = sizeof(GBlurContext),
416  .priv_class = &gblur_class,
417  .uninit = uninit,
422  .process_command = ff_filter_process_command,
423 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:432
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:411
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AV_PIX_FMT_FLAG_FLOAT
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:158
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:424
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
pixdesc.h
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:431
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:426
AVOption
AVOption.
Definition: opt.h:247
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:389
filter_horizontally
static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:88
float.h
filter_vertically
static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:158
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:427
ThreadData::width
int width
Definition: vf_avgblur.c:62
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:369
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
formats.h
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2580
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:423
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:407
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:405
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:433
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_gblur.c:387
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:387
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:373
OFFSET
#define OFFSET(x)
Definition: vf_gblur.c:39
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:392
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
planes
static const struct @318 planes[]
gblur_inputs
static const AVFilterPad gblur_inputs[]
Definition: vf_gblur.c:396
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:401
gblur.h
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:409
width
#define width
filter_postscale
static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:177
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:410
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:402
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
ThreadData::height
int height
Definition: vf_avgblur.c:61
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2041
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:430
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:386
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:400
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:372
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AV_PIX_FMT_GRAYF32
#define AV_PIX_FMT_GRAYF32
Definition: pixfmt.h:421
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:370
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:408
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: vf_gblur.c:40
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_gblur.c:257
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
av_clipf
#define av_clipf
Definition: common.h:144
src
#define src
Definition: vp8dsp.c:255
gblur_outputs
static const AVFilterPad gblur_outputs[]
Definition: vf_gblur.c:405
ff_gblur_init_x86
void ff_gblur_init_x86(GBlurContext *s)
Definition: vf_gblur_init.c:40
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:390
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:404
postscale
static const FLOAT postscale[64]
Definition: faandct.c:54
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_PIX_FMT_GBRPF32
#define AV_PIX_FMT_GBRPF32
Definition: pixfmt.h:418
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:394
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:396
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
gblur_options
static const AVOption gblur_options[]
Definition: vf_gblur.c:42
horiz_slice_c
static void horiz_slice_c(float *buffer, int width, int height, int steps, float nu, float bscale, float *localbuf)
Definition: vf_gblur.c:66
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:883
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:428
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:130
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:227
i
int i
Definition: input.c:406
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_gblur.c:294
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:406
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:804
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
postscale_c
static void postscale_c(float *buffer, int length, float postscale, float min, float max)
Definition: vf_gblur.c:57
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:388
AVFilter
Filter definition.
Definition: avfilter.h:149
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(gblur)
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:425
set_params
static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
Definition: vf_gblur.c:283
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:393
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:398
ff_vf_gblur
const AVFilter ff_vf_gblur
Definition: vf_gblur.c:412
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
verti_slice_c
static void verti_slice_c(float *buffer, int width, int height, int slice_start, int slice_end, int steps, float nu, float boundaryscale)
Definition: vf_gblur.c:145
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:429
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
AV_PIX_FMT_GBRAPF32
#define AV_PIX_FMT_GBRAPF32
Definition: pixfmt.h:419
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_gblur.c:218
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
ff_gblur_init
void ff_gblur_init(GBlurContext *s)
Definition: vf_gblur.c:247
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
gaussianiir2d
static void gaussianiir2d(AVFilterContext *ctx, int plane)
Definition: vf_gblur.c:197
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
GBlurContext
Definition: gblur.h:32
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:395
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:399
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:371
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
do_vertical_columns
static void do_vertical_columns(float *buffer, int width, int height, int column_begin, int column_end, int steps, float nu, float boundaryscale, int column_step)
Definition: vf_gblur.c:111
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:397
min
float min
Definition: vorbis_enc_data.h:429