FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_edgedetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Clément Bœsch
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Edge detection filter
24  *
25  * @see https://en.wikipedia.org/wiki/Canny_edge_detector
26  */
27 
28 #include "libavutil/opt.h"
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "video.h"
33 
34 typedef struct {
35  const AVClass *class;
37  uint16_t *gradients;
38  char *directions;
39  double low, high;
40  uint8_t low_u8, high_u8;
42 
43 #define OFFSET(x) offsetof(EdgeDetectContext, x)
44 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
45 static const AVOption edgedetect_options[] = {
46  { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
47  { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
48  { NULL }
49 };
50 
51 AVFILTER_DEFINE_CLASS(edgedetect);
52 
53 static av_cold int init(AVFilterContext *ctx)
54 {
55  EdgeDetectContext *edgedetect = ctx->priv;
56 
57  edgedetect->low_u8 = edgedetect->low * 255. + .5;
58  edgedetect->high_u8 = edgedetect->high * 255. + .5;
59  return 0;
60 }
61 
63 {
64  static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
66  return 0;
67 }
68 
69 static int config_props(AVFilterLink *inlink)
70 {
71  AVFilterContext *ctx = inlink->dst;
72  EdgeDetectContext *edgedetect = ctx->priv;
73 
74  edgedetect->tmpbuf = av_malloc(inlink->w * inlink->h);
75  edgedetect->gradients = av_calloc(inlink->w * inlink->h, sizeof(*edgedetect->gradients));
76  edgedetect->directions = av_malloc(inlink->w * inlink->h);
77  if (!edgedetect->tmpbuf || !edgedetect->gradients || !edgedetect->directions)
78  return AVERROR(ENOMEM);
79  return 0;
80 }
81 
82 static void gaussian_blur(AVFilterContext *ctx, int w, int h,
83  uint8_t *dst, int dst_linesize,
84  const uint8_t *src, int src_linesize)
85 {
86  int i, j;
87 
88  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
89  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
90  for (j = 2; j < h - 2; j++) {
91  dst[0] = src[0];
92  dst[1] = src[1];
93  for (i = 2; i < w - 2; i++) {
94  /* Gaussian mask of size 5x5 with sigma = 1.4 */
95  dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2
96  + (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4
97  + (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5
98  + (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4
99  + (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2
100 
101  + (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4
102  + (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9
103  + (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12
104  + (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9
105  + (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4
106 
107  + src[i-2] * 5
108  + src[i-1] * 12
109  + src[i ] * 15
110  + src[i+1] * 12
111  + src[i+2] * 5) / 159;
112  }
113  dst[i ] = src[i ];
114  dst[i + 1] = src[i + 1];
115 
116  dst += dst_linesize;
117  src += src_linesize;
118  }
119  memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
120  memcpy(dst, src, w);
121 }
122 
123 enum {
128 };
129 
130 static int get_rounded_direction(int gx, int gy)
131 {
132  /* reference angles:
133  * tan( pi/8) = sqrt(2)-1
134  * tan(3pi/8) = sqrt(2)+1
135  * Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against
136  * <ref-angle>, or more simply Gy against <ref-angle>*Gx
137  *
138  * Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic:
139  * round((sqrt(2)-1) * (1<<16)) = 27146
140  * round((sqrt(2)+1) * (1<<16)) = 158218
141  */
142  if (gx) {
143  int tanpi8gx, tan3pi8gx;
144 
145  if (gx < 0)
146  gx = -gx, gy = -gy;
147  gy <<= 16;
148  tanpi8gx = 27146 * gx;
149  tan3pi8gx = 158218 * gx;
150  if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP;
151  if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL;
152  if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN;
153  }
154  return DIRECTION_VERTICAL;
155 }
156 
157 static void sobel(AVFilterContext *ctx, int w, int h,
158  uint16_t *dst, int dst_linesize,
159  const uint8_t *src, int src_linesize)
160 {
161  int i, j;
162  EdgeDetectContext *edgedetect = ctx->priv;
163 
164  for (j = 1; j < h - 1; j++) {
165  dst += dst_linesize;
166  src += src_linesize;
167  for (i = 1; i < w - 1; i++) {
168  const int gx =
169  -1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1]
170  -2*src[ i-1] + 2*src[ i+1]
171  -1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1];
172  const int gy =
173  -1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1]
174  -2*src[-src_linesize + i ] + 2*src[ src_linesize + i ]
175  -1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
176 
177  dst[i] = FFABS(gx) + FFABS(gy);
178  edgedetect->directions[j*w + i] = get_rounded_direction(gx, gy);
179  }
180  }
181 }
182 
183 static void non_maximum_suppression(AVFilterContext *ctx, int w, int h,
184  uint8_t *dst, int dst_linesize,
185  const uint16_t *src, int src_linesize)
186 {
187  int i, j;
188  EdgeDetectContext *edgedetect = ctx->priv;
189 
190 #define COPY_MAXIMA(ay, ax, by, bx) do { \
191  if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
192  src[i] > src[(by)*src_linesize + i+(bx)]) \
193  dst[i] = av_clip_uint8(src[i]); \
194 } while (0)
195 
196  for (j = 1; j < h - 1; j++) {
197  dst += dst_linesize;
198  src += src_linesize;
199  for (i = 1; i < w - 1; i++) {
200  switch (edgedetect->directions[j*w + i]) {
201  case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
202  case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
203  case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
204  case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break;
205  }
206  }
207  }
208 }
209 
210 static void double_threshold(AVFilterContext *ctx, int w, int h,
211  uint8_t *dst, int dst_linesize,
212  const uint8_t *src, int src_linesize)
213 {
214  int i, j;
215  EdgeDetectContext *edgedetect = ctx->priv;
216  const int low = edgedetect->low_u8;
217  const int high = edgedetect->high_u8;
218 
219  for (j = 0; j < h; j++) {
220  for (i = 0; i < w; i++) {
221  if (src[i] > high) {
222  dst[i] = src[i];
223  continue;
224  }
225 
226  if ((!i || i == w - 1 || !j || j == h - 1) &&
227  src[i] > low &&
228  (src[-src_linesize + i-1] > high ||
229  src[-src_linesize + i ] > high ||
230  src[-src_linesize + i+1] > high ||
231  src[ i-1] > high ||
232  src[ i+1] > high ||
233  src[ src_linesize + i-1] > high ||
234  src[ src_linesize + i ] > high ||
235  src[ src_linesize + i+1] > high))
236  dst[i] = src[i];
237  else
238  dst[i] = 0;
239  }
240  dst += dst_linesize;
241  src += src_linesize;
242  }
243 }
244 
245 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
246 {
247  AVFilterContext *ctx = inlink->dst;
248  EdgeDetectContext *edgedetect = ctx->priv;
249  AVFilterLink *outlink = inlink->dst->outputs[0];
250  uint8_t *tmpbuf = edgedetect->tmpbuf;
251  uint16_t *gradients = edgedetect->gradients;
252  int direct = 0;
253  AVFrame *out;
254 
255  if (av_frame_is_writable(in)) {
256  direct = 1;
257  out = in;
258  } else {
259  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
260  if (!out) {
261  av_frame_free(&in);
262  return AVERROR(ENOMEM);
263  }
264  av_frame_copy_props(out, in);
265  }
266 
267  /* gaussian filter to reduce noise */
268  gaussian_blur(ctx, inlink->w, inlink->h,
269  tmpbuf, inlink->w,
270  in->data[0], in->linesize[0]);
271 
272  /* compute the 16-bits gradients and directions for the next step */
273  sobel(ctx, inlink->w, inlink->h,
274  gradients, inlink->w,
275  tmpbuf, inlink->w);
276 
277  /* non_maximum_suppression() will actually keep & clip what's necessary and
278  * ignore the rest, so we need a clean output buffer */
279  memset(tmpbuf, 0, inlink->w * inlink->h);
280  non_maximum_suppression(ctx, inlink->w, inlink->h,
281  tmpbuf, inlink->w,
282  gradients, inlink->w);
283 
284  /* keep high values, or low values surrounded by high values */
285  double_threshold(ctx, inlink->w, inlink->h,
286  out->data[0], out->linesize[0],
287  tmpbuf, inlink->w);
288 
289  if (!direct)
290  av_frame_free(&in);
291  return ff_filter_frame(outlink, out);
292 }
293 
294 static av_cold void uninit(AVFilterContext *ctx)
295 {
296  EdgeDetectContext *edgedetect = ctx->priv;
297  av_freep(&edgedetect->tmpbuf);
298  av_freep(&edgedetect->gradients);
299  av_freep(&edgedetect->directions);
300 }
301 
302 static const AVFilterPad edgedetect_inputs[] = {
303  {
304  .name = "default",
305  .type = AVMEDIA_TYPE_VIDEO,
306  .config_props = config_props,
307  .filter_frame = filter_frame,
308  },
309  { NULL }
310 };
311 
312 static const AVFilterPad edgedetect_outputs[] = {
313  {
314  .name = "default",
315  .type = AVMEDIA_TYPE_VIDEO,
316  },
317  { NULL }
318 };
319 
321  .name = "edgedetect",
322  .description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
323  .priv_size = sizeof(EdgeDetectContext),
324  .init = init,
325  .uninit = uninit,
327  .inputs = edgedetect_inputs,
328  .outputs = edgedetect_outputs,
329  .priv_class = &edgedetect_class,
331 };