FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_edgedetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Clément Bœsch
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Edge detection filter
24  *
25  * @see https://en.wikipedia.org/wiki/Canny_edge_detector
26  */
27 
28 #include "libavutil/opt.h"
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "video.h"
33 
34 typedef struct {
35  const AVClass *class;
37  uint16_t *gradients;
38  char *directions;
39  double low, high;
40  uint8_t low_u8, high_u8;
42 
43 #define OFFSET(x) offsetof(EdgeDetectContext, x)
44 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
45 static const AVOption edgedetect_options[] = {
46  { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
47  { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
48  { NULL },
49 };
50 
51 AVFILTER_DEFINE_CLASS(edgedetect);
52 
53 static av_cold int init(AVFilterContext *ctx, const char *args)
54 {
55  int ret;
56  EdgeDetectContext *edgedetect = ctx->priv;
57 
58  edgedetect->class = &edgedetect_class;
59  av_opt_set_defaults(edgedetect);
60 
61  if ((ret = av_set_options_string(edgedetect, args, "=", ":")) < 0)
62  return ret;
63 
64  edgedetect->low_u8 = edgedetect->low * 255. + .5;
65  edgedetect->high_u8 = edgedetect->high * 255. + .5;
66  return 0;
67 }
68 
70 {
71  static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
73  return 0;
74 }
75 
76 static int config_props(AVFilterLink *inlink)
77 {
78  AVFilterContext *ctx = inlink->dst;
79  EdgeDetectContext *edgedetect = ctx->priv;
80 
81  edgedetect->tmpbuf = av_malloc(inlink->w * inlink->h);
82  edgedetect->gradients = av_calloc(inlink->w * inlink->h, sizeof(*edgedetect->gradients));
83  edgedetect->directions = av_malloc(inlink->w * inlink->h);
84  if (!edgedetect->tmpbuf || !edgedetect->gradients || !edgedetect->directions)
85  return AVERROR(ENOMEM);
86  return 0;
87 }
88 
89 static void gaussian_blur(AVFilterContext *ctx, int w, int h,
90  uint8_t *dst, int dst_linesize,
91  const uint8_t *src, int src_linesize)
92 {
93  int i, j;
94 
95  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
96  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
97  for (j = 2; j < h - 2; j++) {
98  dst[0] = src[0];
99  dst[1] = src[1];
100  for (i = 2; i < w - 2; i++) {
101  /* Gaussian mask of size 5x5 with sigma = 1.4 */
102  dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2
103  + (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4
104  + (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5
105  + (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4
106  + (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2
107 
108  + (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4
109  + (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9
110  + (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12
111  + (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9
112  + (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4
113 
114  + src[i-2] * 5
115  + src[i-1] * 12
116  + src[i ] * 15
117  + src[i+1] * 12
118  + src[i+2] * 5) / 159;
119  }
120  dst[i ] = src[i ];
121  dst[i + 1] = src[i + 1];
122 
123  dst += dst_linesize;
124  src += src_linesize;
125  }
126  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
127  memcpy(dst, src, w);
128 }
129 
130 enum {
135 };
136 
137 static int get_rounded_direction(int gx, int gy)
138 {
139  /* reference angles:
140  * tan( pi/8) = sqrt(2)-1
141  * tan(3pi/8) = sqrt(2)+1
142  * Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against
143  * <ref-angle>, or more simply Gy against <ref-angle>*Gx
144  *
145  * Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic:
146  * round((sqrt(2)-1) * (1<<16)) = 27146
147  * round((sqrt(2)+1) * (1<<16)) = 158218
148  */
149  if (gx) {
150  int tanpi8gx, tan3pi8gx;
151 
152  if (gx < 0)
153  gx = -gx, gy = -gy;
154  gy <<= 16;
155  tanpi8gx = 27146 * gx;
156  tan3pi8gx = 158218 * gx;
157  if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP;
158  if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL;
159  if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN;
160  }
161  return DIRECTION_VERTICAL;
162 }
163 
164 static void sobel(AVFilterContext *ctx, int w, int h,
165  uint16_t *dst, int dst_linesize,
166  const uint8_t *src, int src_linesize)
167 {
168  int i, j;
169  EdgeDetectContext *edgedetect = ctx->priv;
170 
171  for (j = 1; j < h - 1; j++) {
172  dst += dst_linesize;
173  src += src_linesize;
174  for (i = 1; i < w - 1; i++) {
175  const int gx =
176  -1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1]
177  -2*src[ i-1] + 2*src[ i+1]
178  -1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1];
179  const int gy =
180  -1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1]
181  -2*src[-src_linesize + i ] + 2*src[ src_linesize + i ]
182  -1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
183 
184  dst[i] = FFABS(gx) + FFABS(gy);
185  edgedetect->directions[j*w + i] = get_rounded_direction(gx, gy);
186  }
187  }
188 }
189 
190 static void non_maximum_suppression(AVFilterContext *ctx, int w, int h,
191  uint8_t *dst, int dst_linesize,
192  const uint16_t *src, int src_linesize)
193 {
194  int i, j;
195  EdgeDetectContext *edgedetect = ctx->priv;
196 
197 #define COPY_MAXIMA(ay, ax, by, bx) do { \
198  if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
199  src[i] > src[(by)*src_linesize + i+(bx)]) \
200  dst[i] = av_clip_uint8(src[i]); \
201 } while (0)
202 
203  for (j = 1; j < h - 1; j++) {
204  dst += dst_linesize;
205  src += src_linesize;
206  for (i = 1; i < w - 1; i++) {
207  switch (edgedetect->directions[j*w + i]) {
208  case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
209  case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
210  case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
211  case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break;
212  }
213  }
214  }
215 }
216 
217 static void double_threshold(AVFilterContext *ctx, int w, int h,
218  uint8_t *dst, int dst_linesize,
219  const uint8_t *src, int src_linesize)
220 {
221  int i, j;
222  EdgeDetectContext *edgedetect = ctx->priv;
223  const int low = edgedetect->low_u8;
224  const int high = edgedetect->high_u8;
225 
226  for (j = 0; j < h; j++) {
227  for (i = 0; i < w; i++) {
228  if (src[i] > high) {
229  dst[i] = src[i];
230  continue;
231  }
232 
233  if ((!i || i == w - 1 || !j || j == h - 1) &&
234  src[i] > low &&
235  (src[-src_linesize + i-1] > high ||
236  src[-src_linesize + i ] > high ||
237  src[-src_linesize + i+1] > high ||
238  src[ i-1] > high ||
239  src[ i+1] > high ||
240  src[ src_linesize + i-1] > high ||
241  src[ src_linesize + i ] > high ||
242  src[ src_linesize + i+1] > high))
243  dst[i] = src[i];
244  else
245  dst[i] = 0;
246  }
247  dst += dst_linesize;
248  src += src_linesize;
249  }
250 }
251 
253 {
254  AVFilterContext *ctx = inlink->dst;
255  EdgeDetectContext *edgedetect = ctx->priv;
256  AVFilterLink *outlink = inlink->dst->outputs[0];
257  uint8_t *tmpbuf = edgedetect->tmpbuf;
258  uint16_t *gradients = edgedetect->gradients;
260 
261  out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
262  if (!out) {
264  return AVERROR(ENOMEM);
265  }
267 
268  /* gaussian filter to reduce noise */
269  gaussian_blur(ctx, inlink->w, inlink->h,
270  tmpbuf, inlink->w,
271  in->data[0], in->linesize[0]);
272 
273  /* compute the 16-bits gradients and directions for the next step */
274  sobel(ctx, inlink->w, inlink->h,
275  gradients, inlink->w,
276  tmpbuf, inlink->w);
277 
278  /* non_maximum_suppression() will actually keep & clip what's necessary and
279  * ignore the rest, so we need a clean output buffer */
280  memset(tmpbuf, 0, inlink->w * inlink->h);
281  non_maximum_suppression(ctx, inlink->w, inlink->h,
282  tmpbuf, inlink->w,
283  gradients, inlink->w);
284 
285  /* keep high values, or low values surrounded by high values */
286  double_threshold(ctx, inlink->w, inlink->h,
287  out->data[0], out->linesize[0],
288  tmpbuf, inlink->w);
289 
291  return ff_filter_frame(outlink, out);
292 }
293 
294 static av_cold void uninit(AVFilterContext *ctx)
295 {
296  EdgeDetectContext *edgedetect = ctx->priv;
297  av_freep(&edgedetect->tmpbuf);
298  av_freep(&edgedetect->gradients);
299  av_freep(&edgedetect->directions);
300 }
301 
302 static const AVFilterPad edgedetect_inputs[] = {
303  {
304  .name = "default",
305  .type = AVMEDIA_TYPE_VIDEO,
306  .config_props = config_props,
307  .filter_frame = filter_frame,
308  .min_perms = AV_PERM_READ,
309  },
310  { NULL }
311 };
312 
313 static const AVFilterPad edgedetect_outputs[] = {
314  {
315  .name = "default",
316  .type = AVMEDIA_TYPE_VIDEO,
317  },
318  { NULL }
319 };
320 
322  .name = "edgedetect",
323  .description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
324  .priv_size = sizeof(EdgeDetectContext),
325  .init = init,
326  .uninit = uninit,
328  .inputs = edgedetect_inputs,
329  .outputs = edgedetect_outputs,
330  .priv_class = &edgedetect_class,
331 };