FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_edgedetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012-2014 Clément Bœsch <u pkh me>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Edge detection filter
24  *
25  * @see https://en.wikipedia.org/wiki/Canny_edge_detector
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/opt.h"
30 #include "avfilter.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 enum FilterMode {
39 };
40 
41 struct plane_info {
43  uint16_t *gradients;
44  char *directions;
45 };
46 
47 typedef struct {
48  const AVClass *class;
49  struct plane_info planes[3];
50  int nb_planes;
51  double low, high;
52  uint8_t low_u8, high_u8;
53  int mode;
55 
56 #define OFFSET(x) offsetof(EdgeDetectContext, x)
57 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
58 static const AVOption edgedetect_options[] = {
59  { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
60  { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
61  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_WIRES}, 0, NB_MODE-1, FLAGS, "mode" },
62  { "wires", "white/gray wires on black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_WIRES}, INT_MIN, INT_MAX, FLAGS, "mode" },
63  { "colormix", "mix colors", 0, AV_OPT_TYPE_CONST, {.i64=MODE_COLORMIX}, INT_MIN, INT_MAX, FLAGS, "mode" },
64  { NULL }
65 };
66 
67 AVFILTER_DEFINE_CLASS(edgedetect);
68 
70 {
71  EdgeDetectContext *edgedetect = ctx->priv;
72 
73  edgedetect->low_u8 = edgedetect->low * 255. + .5;
74  edgedetect->high_u8 = edgedetect->high * 255. + .5;
75  return 0;
76 }
77 
79 {
80  const EdgeDetectContext *edgedetect = ctx->priv;
81  static const enum AVPixelFormat wires_pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
82  static const enum AVPixelFormat colormix_pix_fmts[] = {AV_PIX_FMT_GBRP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
83  AVFilterFormats *fmts_list;
84  const enum AVPixelFormat *pix_fmts = NULL;
85 
86  if (edgedetect->mode == MODE_WIRES) {
87  pix_fmts = wires_pix_fmts;
88  } else if (edgedetect->mode == MODE_COLORMIX) {
89  pix_fmts = colormix_pix_fmts;
90  } else {
91  av_assert0(0);
92  }
93  fmts_list = ff_make_format_list(pix_fmts);
94  if (!fmts_list)
95  return AVERROR(ENOMEM);
96  return ff_set_common_formats(ctx, fmts_list);
97 }
98 
99 static int config_props(AVFilterLink *inlink)
100 {
101  int p;
102  AVFilterContext *ctx = inlink->dst;
103  EdgeDetectContext *edgedetect = ctx->priv;
104 
105  edgedetect->nb_planes = inlink->format == AV_PIX_FMT_GRAY8 ? 1 : 3;
106  for (p = 0; p < edgedetect->nb_planes; p++) {
107  struct plane_info *plane = &edgedetect->planes[p];
108 
109  plane->tmpbuf = av_malloc(inlink->w * inlink->h);
110  plane->gradients = av_calloc(inlink->w * inlink->h, sizeof(*plane->gradients));
111  plane->directions = av_malloc(inlink->w * inlink->h);
112  if (!plane->tmpbuf || !plane->gradients || !plane->directions)
113  return AVERROR(ENOMEM);
114  }
115  return 0;
116 }
117 
118 static void gaussian_blur(AVFilterContext *ctx, int w, int h,
119  uint8_t *dst, int dst_linesize,
120  const uint8_t *src, int src_linesize)
121 {
122  int i, j;
123 
124  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
125  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
126  for (j = 2; j < h - 2; j++) {
127  dst[0] = src[0];
128  dst[1] = src[1];
129  for (i = 2; i < w - 2; i++) {
130  /* Gaussian mask of size 5x5 with sigma = 1.4 */
131  dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2
132  + (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4
133  + (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5
134  + (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4
135  + (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2
136 
137  + (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4
138  + (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9
139  + (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12
140  + (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9
141  + (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4
142 
143  + src[i-2] * 5
144  + src[i-1] * 12
145  + src[i ] * 15
146  + src[i+1] * 12
147  + src[i+2] * 5) / 159;
148  }
149  dst[i ] = src[i ];
150  dst[i + 1] = src[i + 1];
151 
152  dst += dst_linesize;
153  src += src_linesize;
154  }
155  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
156  memcpy(dst, src, w);
157 }
158 
159 enum {
164 };
165 
166 static int get_rounded_direction(int gx, int gy)
167 {
168  /* reference angles:
169  * tan( pi/8) = sqrt(2)-1
170  * tan(3pi/8) = sqrt(2)+1
171  * Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against
172  * <ref-angle>, or more simply Gy against <ref-angle>*Gx
173  *
174  * Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic:
175  * round((sqrt(2)-1) * (1<<16)) = 27146
176  * round((sqrt(2)+1) * (1<<16)) = 158218
177  */
178  if (gx) {
179  int tanpi8gx, tan3pi8gx;
180 
181  if (gx < 0)
182  gx = -gx, gy = -gy;
183  gy <<= 16;
184  tanpi8gx = 27146 * gx;
185  tan3pi8gx = 158218 * gx;
186  if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP;
187  if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL;
188  if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN;
189  }
190  return DIRECTION_VERTICAL;
191 }
192 
193 static void sobel(int w, int h,
194  uint16_t *dst, int dst_linesize,
195  int8_t *dir, int dir_linesize,
196  const uint8_t *src, int src_linesize)
197 {
198  int i, j;
199 
200  for (j = 1; j < h - 1; j++) {
201  dst += dst_linesize;
202  dir += dir_linesize;
203  src += src_linesize;
204  for (i = 1; i < w - 1; i++) {
205  const int gx =
206  -1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1]
207  -2*src[ i-1] + 2*src[ i+1]
208  -1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1];
209  const int gy =
210  -1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1]
211  -2*src[-src_linesize + i ] + 2*src[ src_linesize + i ]
212  -1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
213 
214  dst[i] = FFABS(gx) + FFABS(gy);
215  dir[i] = get_rounded_direction(gx, gy);
216  }
217  }
218 }
219 
220 static void non_maximum_suppression(int w, int h,
221  uint8_t *dst, int dst_linesize,
222  const int8_t *dir, int dir_linesize,
223  const uint16_t *src, int src_linesize)
224 {
225  int i, j;
226 
227 #define COPY_MAXIMA(ay, ax, by, bx) do { \
228  if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
229  src[i] > src[(by)*src_linesize + i+(bx)]) \
230  dst[i] = av_clip_uint8(src[i]); \
231 } while (0)
232 
233  for (j = 1; j < h - 1; j++) {
234  dst += dst_linesize;
235  dir += dir_linesize;
236  src += src_linesize;
237  for (i = 1; i < w - 1; i++) {
238  switch (dir[i]) {
239  case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
240  case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
241  case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
242  case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break;
243  }
244  }
245  }
246 }
247 
248 static void double_threshold(int low, int high, int w, int h,
249  uint8_t *dst, int dst_linesize,
250  const uint8_t *src, int src_linesize)
251 {
252  int i, j;
253 
254  for (j = 0; j < h; j++) {
255  for (i = 0; i < w; i++) {
256  if (src[i] > high) {
257  dst[i] = src[i];
258  continue;
259  }
260 
261  if ((!i || i == w - 1 || !j || j == h - 1) &&
262  src[i] > low &&
263  (src[-src_linesize + i-1] > high ||
264  src[-src_linesize + i ] > high ||
265  src[-src_linesize + i+1] > high ||
266  src[ i-1] > high ||
267  src[ i+1] > high ||
268  src[ src_linesize + i-1] > high ||
269  src[ src_linesize + i ] > high ||
270  src[ src_linesize + i+1] > high))
271  dst[i] = src[i];
272  else
273  dst[i] = 0;
274  }
275  dst += dst_linesize;
276  src += src_linesize;
277  }
278 }
279 
280 static void color_mix(int w, int h,
281  uint8_t *dst, int dst_linesize,
282  const uint8_t *src, int src_linesize)
283 {
284  int i, j;
285 
286  for (j = 0; j < h; j++) {
287  for (i = 0; i < w; i++)
288  dst[i] = (dst[i] + src[i]) >> 1;
289  dst += dst_linesize;
290  src += src_linesize;
291  }
292 }
293 
294 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
295 {
296  AVFilterContext *ctx = inlink->dst;
297  EdgeDetectContext *edgedetect = ctx->priv;
298  AVFilterLink *outlink = ctx->outputs[0];
299  int p, direct = 0;
300  AVFrame *out;
301 
302  if (edgedetect->mode != MODE_COLORMIX && av_frame_is_writable(in)) {
303  direct = 1;
304  out = in;
305  } else {
306  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
307  if (!out) {
308  av_frame_free(&in);
309  return AVERROR(ENOMEM);
310  }
311  av_frame_copy_props(out, in);
312  }
313 
314  for (p = 0; p < edgedetect->nb_planes; p++) {
315  struct plane_info *plane = &edgedetect->planes[p];
316  uint8_t *tmpbuf = plane->tmpbuf;
317  uint16_t *gradients = plane->gradients;
318  int8_t *directions = plane->directions;
319 
320  /* gaussian filter to reduce noise */
321  gaussian_blur(ctx, inlink->w, inlink->h,
322  tmpbuf, inlink->w,
323  in->data[p], in->linesize[p]);
324 
325  /* compute the 16-bits gradients and directions for the next step */
326  sobel(inlink->w, inlink->h,
327  gradients, inlink->w,
328  directions,inlink->w,
329  tmpbuf, inlink->w);
330 
331  /* non_maximum_suppression() will actually keep & clip what's necessary and
332  * ignore the rest, so we need a clean output buffer */
333  memset(tmpbuf, 0, inlink->w * inlink->h);
334  non_maximum_suppression(inlink->w, inlink->h,
335  tmpbuf, inlink->w,
336  directions,inlink->w,
337  gradients, inlink->w);
338 
339  /* keep high values, or low values surrounded by high values */
340  double_threshold(edgedetect->low_u8, edgedetect->high_u8,
341  inlink->w, inlink->h,
342  out->data[p], out->linesize[p],
343  tmpbuf, inlink->w);
344 
345  if (edgedetect->mode == MODE_COLORMIX) {
346  color_mix(inlink->w, inlink->h,
347  out->data[p], out->linesize[p],
348  in->data[p], in->linesize[p]);
349  }
350  }
351 
352  if (!direct)
353  av_frame_free(&in);
354  return ff_filter_frame(outlink, out);
355 }
356 
358 {
359  int p;
360  EdgeDetectContext *edgedetect = ctx->priv;
361 
362  for (p = 0; p < edgedetect->nb_planes; p++) {
363  struct plane_info *plane = &edgedetect->planes[p];
364  av_freep(&plane->tmpbuf);
365  av_freep(&plane->gradients);
366  av_freep(&plane->directions);
367  }
368 }
369 
370 static const AVFilterPad edgedetect_inputs[] = {
371  {
372  .name = "default",
373  .type = AVMEDIA_TYPE_VIDEO,
374  .config_props = config_props,
375  .filter_frame = filter_frame,
376  },
377  { NULL }
378 };
379 
380 static const AVFilterPad edgedetect_outputs[] = {
381  {
382  .name = "default",
383  .type = AVMEDIA_TYPE_VIDEO,
384  },
385  { NULL }
386 };
387 
389  .name = "edgedetect",
390  .description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
391  .priv_size = sizeof(EdgeDetectContext),
392  .init = init,
393  .uninit = uninit,
395  .inputs = edgedetect_inputs,
396  .outputs = edgedetect_outputs,
397  .priv_class = &edgedetect_class,
399 };
int plane
Definition: avisynth_c.h:422
static const AVFilterPad edgedetect_outputs[]
#define NULL
Definition: coverity.c:32
static av_cold void uninit(AVFilterContext *ctx)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:187
AVOption.
Definition: opt.h:246
Main libavfilter public API header.
static void sobel(int w, int h, uint16_t *dst, int dst_linesize, int8_t *dir, int dir_linesize, const uint8_t *src, int src_linesize)
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:180
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:92
#define src
Definition: vp8dsp.c:254
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:230
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static void double_threshold(int low, int high, int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1125
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static int query_formats(AVFilterContext *ctx)
Definition: vf_edgedetect.c:78
char * directions
Definition: vf_edgedetect.c:44
static int flags
Definition: log.c:57
AVFILTER_DEFINE_CLASS(edgedetect)
A filter pad used for either input or output.
Definition: internal.h:54
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
static const AVOption edgedetect_options[]
Definition: vf_edgedetect.c:58
#define AVERROR(e)
Definition: error.h:43
#define FLAGS
Definition: vf_edgedetect.c:57
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
void * priv
private data for use by the filter
Definition: avfilter.h:338
uint16_t * gradients
Definition: vf_edgedetect.c:43
simple assert() macros that are a bit more flexible than ISO C assert().
static int config_props(AVFilterLink *inlink)
Definition: vf_edgedetect.c:99
AVFilter ff_vf_edgedetect
static void gaussian_blur(AVFilterContext *ctx, int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
AVFormatContext * ctx
Definition: movenc.c:48
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:386
static int get_rounded_direction(int gx, int gy)
static const AVFilterPad edgedetect_inputs[]
static av_cold int init(AVFilterContext *ctx)
Definition: vf_edgedetect.c:69
FilterMode
Definition: vp9.h:64
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:376
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:536
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:218
struct plane_info planes[3]
Definition: vf_edgedetect.c:49
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define OFFSET(x)
Definition: vf_edgedetect.c:56
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:335
#define COPY_MAXIMA(ay, ax, by, bx)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:201
static void non_maximum_suppression(int w, int h, uint8_t *dst, int dst_linesize, const int8_t *dir, int dir_linesize, const uint16_t *src, int src_linesize)
Y , 8bpp.
Definition: pixfmt.h:70
uint8_t * tmpbuf
Definition: vf_edgedetect.c:42
static void color_mix(int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:323
FILE * out
Definition: movenc.c:54
#define av_freep(p)
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:596