FFmpeg
vf_deshake.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
3  * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * fast deshake / depan video filter
25  *
26  * SAD block-matching motion compensation to fix small changes in
27  * horizontal and/or vertical shift. This filter helps remove camera shake
28  * from hand-holding a camera, bumping a tripod, moving on a vehicle, etc.
29  *
30  * Algorithm:
31  * - For each frame with one previous reference frame
32  * - For each block in the frame
33  * - If contrast > threshold then find likely motion vector
34  * - For all found motion vectors
35  * - Find most common, store as global motion vector
36  * - Find most likely rotation angle
37  * - Transform image along global motion
38  *
39  * TODO:
40  * - Fill frame edges based on previous/next reference frames
41  * - Fill frame edges by stretching image near the edges?
42  * - Can this be done quickly and look decent?
43  *
44  * Dark Shikari links to http://wiki.videolan.org/SoC_x264_2010#GPU_Motion_Estimation_2
45  * for an algorithm similar to what could be used here to get the gmv
46  * It requires only a couple diamond searches + fast downscaling
47  *
48  * Special thanks to Jason Kotenko for his help with the algorithm and my
49  * inability to see simple errors in C code.
50  */
51 
52 #include "avfilter.h"
53 #include "formats.h"
54 #include "internal.h"
55 #include "video.h"
56 #include "libavutil/common.h"
57 #include "libavutil/mem.h"
58 #include "libavutil/opt.h"
59 #include "libavutil/pixdesc.h"
60 #include "libavutil/qsort.h"
61 
62 #include "deshake.h"
63 
64 #define OFFSET(x) offsetof(DeshakeContext, x)
65 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
66 
67 static const AVOption deshake_options[] = {
68  { "x", "set x for the rectangular search area", OFFSET(cx), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
69  { "y", "set y for the rectangular search area", OFFSET(cy), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
70  { "w", "set width for the rectangular search area", OFFSET(cw), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
71  { "h", "set height for the rectangular search area", OFFSET(ch), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
72  { "rx", "set x for the rectangular search area", OFFSET(rx), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
73  { "ry", "set y for the rectangular search area", OFFSET(ry), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
74  { "edge", "set edge mode", OFFSET(edge), AV_OPT_TYPE_INT, {.i64=FILL_MIRROR}, FILL_BLANK, FILL_COUNT-1, FLAGS, "edge"},
75  { "blank", "fill zeroes at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_BLANK}, INT_MIN, INT_MAX, FLAGS, "edge" },
76  { "original", "original image at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_ORIGINAL}, INT_MIN, INT_MAX, FLAGS, "edge" },
77  { "clamp", "extruded edge value at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_CLAMP}, INT_MIN, INT_MAX, FLAGS, "edge" },
78  { "mirror", "mirrored edge at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_MIRROR}, INT_MIN, INT_MAX, FLAGS, "edge" },
79  { "blocksize", "set motion search blocksize", OFFSET(blocksize), AV_OPT_TYPE_INT, {.i64=8}, 4, 128, .flags = FLAGS },
80  { "contrast", "set contrast threshold for blocks", OFFSET(contrast), AV_OPT_TYPE_INT, {.i64=125}, 1, 255, .flags = FLAGS },
81  { "search", "set search strategy", OFFSET(search), AV_OPT_TYPE_INT, {.i64=EXHAUSTIVE}, EXHAUSTIVE, SEARCH_COUNT-1, FLAGS, "smode" },
82  { "exhaustive", "exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
83  { "less", "less exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=SMART_EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
84  { "filename", "set motion search detailed log file name", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
85  { "opencl", "ignored", OFFSET(opencl), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
86  { NULL }
87 };
88 
89 AVFILTER_DEFINE_CLASS(deshake);
90 
91 static int cmp(const void *a, const void *b)
92 {
93  return FFDIFFSIGN(*(const double *)a, *(const double *)b);
94 }
95 
96 /**
97  * Cleaned mean (cuts off 20% of values to remove outliers and then averages)
98  */
99 static double clean_mean(double *values, int count)
100 {
101  double mean = 0;
102  int cut = count / 5;
103  int x;
104 
105  AV_QSORT(values, count, double, cmp);
106 
107  for (x = cut; x < count - cut; x++) {
108  mean += values[x];
109  }
110 
111  return mean / (count - cut * 2);
112 }
113 
114 /**
115  * Find the most likely shift in motion between two frames for a given
116  * macroblock. Test each block against several shifts given by the rx
117  * and ry attributes. Searches using a simple matrix of those shifts and
118  * chooses the most likely shift by the smallest difference in blocks.
119  */
120 static void find_block_motion(DeshakeContext *deshake, uint8_t *src1,
121  uint8_t *src2, int cx, int cy, int stride,
123 {
124  int x, y;
125  int diff;
126  int smallest = INT_MAX;
127  int tmp, tmp2;
128 
129  #define CMP(i, j) deshake->sad(src1 + cy * stride + cx, stride,\
130  src2 + (j) * stride + (i), stride)
131 
132  if (deshake->search == EXHAUSTIVE) {
133  // Compare every possible position - this is sloooow!
134  for (y = -deshake->ry; y <= deshake->ry; y++) {
135  for (x = -deshake->rx; x <= deshake->rx; x++) {
136  diff = CMP(cx - x, cy - y);
137  if (diff < smallest) {
138  smallest = diff;
139  mv->x = x;
140  mv->y = y;
141  }
142  }
143  }
144  } else if (deshake->search == SMART_EXHAUSTIVE) {
145  // Compare every other possible position and find the best match
146  for (y = -deshake->ry + 1; y < deshake->ry; y += 2) {
147  for (x = -deshake->rx + 1; x < deshake->rx; x += 2) {
148  diff = CMP(cx - x, cy - y);
149  if (diff < smallest) {
150  smallest = diff;
151  mv->x = x;
152  mv->y = y;
153  }
154  }
155  }
156 
157  // Hone in on the specific best match around the match we found above
158  tmp = mv->x;
159  tmp2 = mv->y;
160 
161  for (y = tmp2 - 1; y <= tmp2 + 1; y++) {
162  for (x = tmp - 1; x <= tmp + 1; x++) {
163  if (x == tmp && y == tmp2)
164  continue;
165 
166  diff = CMP(cx - x, cy - y);
167  if (diff < smallest) {
168  smallest = diff;
169  mv->x = x;
170  mv->y = y;
171  }
172  }
173  }
174  }
175 
176  if (smallest > 512) {
177  mv->x = -1;
178  mv->y = -1;
179  }
180  emms_c();
181  //av_log(NULL, AV_LOG_ERROR, "%d\n", smallest);
182  //av_log(NULL, AV_LOG_ERROR, "Final: (%d, %d) = %d x %d\n", cx, cy, mv->x, mv->y);
183 }
184 
185 /**
186  * Find the contrast of a given block. When searching for global motion we
187  * really only care about the high contrast blocks, so using this method we
188  * can actually skip blocks we don't care much about.
189  */
190 static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
191 {
192  int highest = 0;
193  int lowest = 255;
194  int i, j, pos;
195 
196  for (i = 0; i <= blocksize * 2; i++) {
197  // We use a width of 16 here to match the sad function
198  for (j = 0; j <= 15; j++) {
199  pos = (y + i) * stride + (x + j);
200  if (src[pos] < lowest)
201  lowest = src[pos];
202  else if (src[pos] > highest) {
203  highest = src[pos];
204  }
205  }
206  }
207 
208  return highest - lowest;
209 }
210 
211 /**
212  * Find the rotation for a given block.
213  */
214 static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift)
215 {
216  double a1, a2, diff;
217 
218  a1 = atan2(y - cy, x - cx);
219  a2 = atan2(y - cy + shift->y, x - cx + shift->x);
220 
221  diff = a2 - a1;
222 
223  return (diff > M_PI) ? diff - 2 * M_PI :
224  (diff < -M_PI) ? diff + 2 * M_PI :
225  diff;
226 }
227 
228 /**
229  * Find the estimated global motion for a scene given the most likely shift
230  * for each block in the frame. The global motion is estimated to be the
231  * same as the motion from most blocks in the frame, so if most blocks
232  * move one pixel to the right and two pixels down, this would yield a
233  * motion vector (1, -2).
234  */
235 static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
236  int width, int height, int stride, Transform *t)
237 {
238  int x, y;
239  IntMotionVector mv = {0, 0};
240  int count_max_value = 0;
241  int contrast;
242 
243  int pos;
244  int center_x = 0, center_y = 0;
245  double p_x, p_y;
246 
247  av_fast_malloc(&deshake->angles, &deshake->angles_size, width * height / (16 * deshake->blocksize) * sizeof(*deshake->angles));
248 
249  // Reset counts to zero
250  for (x = 0; x < deshake->rx * 2 + 1; x++) {
251  for (y = 0; y < deshake->ry * 2 + 1; y++) {
252  deshake->counts[x][y] = 0;
253  }
254  }
255 
256  pos = 0;
257  // Find motion for every block and store the motion vector in the counts
258  for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) {
259  // We use a width of 16 here to match the sad function
260  for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) {
261  // If the contrast is too low, just skip this block as it probably
262  // won't be very useful to us.
263  contrast = block_contrast(src2, x, y, stride, deshake->blocksize);
264  if (contrast > deshake->contrast) {
265  //av_log(NULL, AV_LOG_ERROR, "%d\n", contrast);
266  find_block_motion(deshake, src1, src2, x, y, stride, &mv);
267  if (mv.x != -1 && mv.y != -1) {
268  deshake->counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1;
269  if (x > deshake->rx && y > deshake->ry)
270  deshake->angles[pos++] = block_angle(x, y, 0, 0, &mv);
271 
272  center_x += mv.x;
273  center_y += mv.y;
274  }
275  }
276  }
277  }
278 
279  if (pos) {
280  center_x /= pos;
281  center_y /= pos;
282  t->angle = clean_mean(deshake->angles, pos);
283  if (t->angle < 0.001)
284  t->angle = 0;
285  } else {
286  t->angle = 0;
287  }
288 
289  // Find the most common motion vector in the frame and use it as the gmv
290  for (y = deshake->ry * 2; y >= 0; y--) {
291  for (x = 0; x < deshake->rx * 2 + 1; x++) {
292  //av_log(NULL, AV_LOG_ERROR, "%5d ", deshake->counts[x][y]);
293  if (deshake->counts[x][y] > count_max_value) {
294  t->vec.x = x - deshake->rx;
295  t->vec.y = y - deshake->ry;
296  count_max_value = deshake->counts[x][y];
297  }
298  }
299  //av_log(NULL, AV_LOG_ERROR, "\n");
300  }
301 
302  p_x = (center_x - width / 2.0);
303  p_y = (center_y - height / 2.0);
304  t->vec.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
305  t->vec.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
306 
307  // Clamp max shift & rotation?
308  t->vec.x = av_clipf(t->vec.x, -deshake->rx * 2, deshake->rx * 2);
309  t->vec.y = av_clipf(t->vec.y, -deshake->ry * 2, deshake->ry * 2);
310  t->angle = av_clipf(t->angle, -0.1, 0.1);
311 
312  //av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y);
313 }
314 
316  int width, int height, int cw, int ch,
317  const float *matrix_y, const float *matrix_uv,
319  enum FillMethod fill, AVFrame *in, AVFrame *out)
320 {
321  int i = 0, ret = 0;
322  const float *matrixs[3];
323  int plane_w[3], plane_h[3];
324  matrixs[0] = matrix_y;
325  matrixs[1] = matrixs[2] = matrix_uv;
326  plane_w[0] = width;
327  plane_w[1] = plane_w[2] = cw;
328  plane_h[0] = height;
329  plane_h[1] = plane_h[2] = ch;
330 
331  for (i = 0; i < 3; i++) {
332  // Transform the luma and chroma planes
333  ret = ff_affine_transform(in->data[i], out->data[i], in->linesize[i],
334  out->linesize[i], plane_w[i], plane_h[i],
335  matrixs[i], interpolate, fill);
336  if (ret < 0)
337  return ret;
338  }
339  return ret;
340 }
341 
343 {
344  DeshakeContext *deshake = ctx->priv;
345 
346  deshake->refcount = 20; // XXX: add to options?
347  deshake->blocksize /= 2;
348  deshake->blocksize = av_clip(deshake->blocksize, 4, 128);
349 
350  if (deshake->rx % 16) {
351  av_log(ctx, AV_LOG_ERROR, "rx must be a multiple of 16\n");
352  return AVERROR_PATCHWELCOME;
353  }
354 
355  if (deshake->filename)
356  deshake->fp = fopen(deshake->filename, "w");
357  if (deshake->fp)
358  fwrite("Ori x, Avg x, Fin x, Ori y, Avg y, Fin y, Ori angle, Avg angle, Fin angle, Ori zoom, Avg zoom, Fin zoom\n", 1, 104, deshake->fp);
359 
360  // Quadword align left edge of box for MMX code, adjust width if necessary
361  // to keep right margin
362  if (deshake->cx > 0) {
363  deshake->cw += deshake->cx - (deshake->cx & ~15);
364  deshake->cx &= ~15;
365  }
366  deshake->transform = deshake_transform_c;
367 
368  av_log(ctx, AV_LOG_VERBOSE, "cx: %d, cy: %d, cw: %d, ch: %d, rx: %d, ry: %d, edge: %d blocksize: %d contrast: %d search: %d\n",
369  deshake->cx, deshake->cy, deshake->cw, deshake->ch,
370  deshake->rx, deshake->ry, deshake->edge, deshake->blocksize * 2, deshake->contrast, deshake->search);
371 
372  return 0;
373 }
374 
376 {
377  static const enum AVPixelFormat pix_fmts[] = {
381  };
383 }
384 
386 {
387  DeshakeContext *deshake = link->dst->priv;
388 
389  deshake->ref = NULL;
390  deshake->last.vec.x = 0;
391  deshake->last.vec.y = 0;
392  deshake->last.angle = 0;
393  deshake->last.zoom = 0;
394 
395  return 0;
396 }
397 
399 {
400  DeshakeContext *deshake = ctx->priv;
401  av_frame_free(&deshake->ref);
402  av_freep(&deshake->angles);
403  deshake->angles_size = 0;
404  if (deshake->fp)
405  fclose(deshake->fp);
406 }
407 
409 {
410  DeshakeContext *deshake = link->dst->priv;
411  AVFilterLink *outlink = link->dst->outputs[0];
412  AVFrame *out;
413  Transform t = {{0},0}, orig = {{0},0};
414  float matrix_y[9], matrix_uv[9];
415  float alpha = 2.0 / deshake->refcount;
416  char tmp[256];
417  int ret = 0;
419  const int chroma_width = AV_CEIL_RSHIFT(link->w, desc->log2_chroma_w);
420  const int chroma_height = AV_CEIL_RSHIFT(link->h, desc->log2_chroma_h);
421  int aligned;
422  float transform_zoom;
423 
424  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
425  if (!out) {
426  av_frame_free(&in);
427  return AVERROR(ENOMEM);
428  }
430 
431  aligned = !((intptr_t)in->data[0] & 15 | in->linesize[0] & 15);
432  deshake->sad = av_pixelutils_get_sad_fn(4, 4, aligned, deshake); // 16x16, 2nd source unaligned
433  if (!deshake->sad)
434  return AVERROR(EINVAL);
435 
436  if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
437  // Find the most likely global motion for the current frame
438  find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
439  } else {
440  uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];
441  uint8_t *src2 = in->data[0];
442 
443  deshake->cx = FFMIN(deshake->cx, link->w);
444  deshake->cy = FFMIN(deshake->cy, link->h);
445 
446  if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;
447  if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;
448 
449  // Quadword align right margin
450  deshake->cw &= ~15;
451 
452  src1 += deshake->cy * in->linesize[0] + deshake->cx;
453  src2 += deshake->cy * in->linesize[0] + deshake->cx;
454 
455  find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);
456  }
457 
458 
459  // Copy transform so we can output it later to compare to the smoothed value
460  orig.vec.x = t.vec.x;
461  orig.vec.y = t.vec.y;
462  orig.angle = t.angle;
463  orig.zoom = t.zoom;
464 
465  // Generate a one-sided moving exponential average
466  deshake->avg.vec.x = alpha * t.vec.x + (1.0 - alpha) * deshake->avg.vec.x;
467  deshake->avg.vec.y = alpha * t.vec.y + (1.0 - alpha) * deshake->avg.vec.y;
468  deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;
469  deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;
470 
471  // Remove the average from the current motion to detect the motion that
472  // is not on purpose, just as jitter from bumping the camera
473  t.vec.x -= deshake->avg.vec.x;
474  t.vec.y -= deshake->avg.vec.y;
475  t.angle -= deshake->avg.angle;
476  t.zoom -= deshake->avg.zoom;
477 
478  // Invert the motion to undo it
479  t.vec.x *= -1;
480  t.vec.y *= -1;
481  t.angle *= -1;
482 
483  // Write statistics to file
484  if (deshake->fp) {
485  snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vec.x, deshake->avg.vec.x, t.vec.x, orig.vec.y, deshake->avg.vec.y, t.vec.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);
486  fwrite(tmp, 1, strlen(tmp), deshake->fp);
487  }
488 
489  // Turn relative current frame motion into absolute by adding it to the
490  // last absolute motion
491  t.vec.x += deshake->last.vec.x;
492  t.vec.y += deshake->last.vec.y;
493  t.angle += deshake->last.angle;
494  t.zoom += deshake->last.zoom;
495 
496  // Shrink motion by 10% to keep things centered in the camera frame
497  t.vec.x *= 0.9;
498  t.vec.y *= 0.9;
499  t.angle *= 0.9;
500 
501  // Store the last absolute motion information
502  deshake->last.vec.x = t.vec.x;
503  deshake->last.vec.y = t.vec.y;
504  deshake->last.angle = t.angle;
505  deshake->last.zoom = t.zoom;
506 
507  transform_zoom = 1.0 + t.zoom / 100.0;
508 
509  // Generate a luma transformation matrix
510  ff_get_matrix(t.vec.x, t.vec.y, t.angle, transform_zoom, transform_zoom, matrix_y);
511  // Generate a chroma transformation matrix
512  ff_get_matrix(t.vec.x / (link->w / chroma_width), t.vec.y / (link->h / chroma_height), t.angle, transform_zoom, transform_zoom, matrix_uv);
513  // Transform the luma and chroma planes
514  ret = deshake->transform(link->dst, link->w, link->h, chroma_width, chroma_height,
515  matrix_y, matrix_uv, INTERPOLATE_BILINEAR, deshake->edge, in, out);
516 
517  // Cleanup the old reference frame
518  av_frame_free(&deshake->ref);
519 
520  if (ret < 0)
521  goto fail;
522 
523  // Store the current frame as the reference frame for calculating the
524  // motion of the next frame
525  deshake->ref = in;
526 
527  return ff_filter_frame(outlink, out);
528 fail:
529  av_frame_free(&out);
530  return ret;
531 }
532 
533 static const AVFilterPad deshake_inputs[] = {
534  {
535  .name = "default",
536  .type = AVMEDIA_TYPE_VIDEO,
537  .filter_frame = filter_frame,
538  .config_props = config_props,
539  },
540 };
541 
542 static const AVFilterPad deshake_outputs[] = {
543  {
544  .name = "default",
545  .type = AVMEDIA_TYPE_VIDEO,
546  },
547 };
548 
550  .name = "deshake",
551  .description = NULL_IF_CONFIG_SMALL("Stabilize shaky video."),
552  .priv_size = sizeof(DeshakeContext),
553  .init = init,
554  .uninit = uninit,
558  .priv_class = &deshake_class,
559 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
stride
int stride
Definition: mace.c:144
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
MAX_R
#define MAX_R
Definition: deshake.h:53
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
SMART_EXHAUSTIVE
@ SMART_EXHAUSTIVE
Search most possible positions (faster)
Definition: deshake.h:33
DeshakeContext::ry
int ry
Maximum vertical shift.
Definition: deshake.h:62
MotionVector::y
int16_t y
Definition: agm.c:57
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1017
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
FILL_CLAMP
@ FILL_CLAMP
Definition: transform.h:54
mv
static const int8_t mv[256][2]
Definition: 4xm.c:79
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
pixdesc.h
DeshakeContext::angles_size
unsigned angles_size
Definition: deshake.h:59
deshake_options
static const AVOption deshake_options[]
Definition: vf_deshake.c:67
AVOption
AVOption.
Definition: opt.h:247
b
#define b
Definition: input.c:40
DeshakeContext::transform
int(* transform)(AVFilterContext *ctx, int width, int height, int cw, int ch, const float *matrix_y, const float *matrix_uv, enum InterpolateMethod interpolate, enum FillMethod fill, AVFrame *in, AVFrame *out)
Definition: deshake.h:78
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_deshake.c:375
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
deshake_inputs
static const AVFilterPad deshake_inputs[]
Definition: vf_deshake.c:533
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
InterpolateMethod
InterpolateMethod
Definition: transform.h:39
FLAGS
#define FLAGS
Definition: vf_deshake.c:65
video.h
block_contrast
static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
Find the contrast of a given block.
Definition: vf_deshake.c:190
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
DeshakeContext::blocksize
int blocksize
Size of blocks to compare.
Definition: deshake.h:64
FILL_ORIGINAL
@ FILL_ORIGINAL
Definition: transform.h:53
formats.h
FILL_BLANK
@ FILL_BLANK
Definition: transform.h:52
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(deshake)
MotionVector::x
int16_t x
Definition: agm.c:57
DeshakeContext::last
Transform last
Transform from last frame.
Definition: deshake.h:68
ff_vf_deshake
const AVFilter ff_vf_deshake
Definition: vf_deshake.c:549
fail
#define fail()
Definition: checkasm.h:127
DeshakeContext::ch
int ch
Definition: deshake.h:73
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_deshake.c:342
cmp
static int cmp(const void *a, const void *b)
Definition: vf_deshake.c:91
CMP
#define CMP(i, j)
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: macros.h:45
a1
#define a1
Definition: regdef.h:47
aligned
static int aligned(int val)
Definition: dashdec.c:169
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: vf_deshake.c:64
Transform::vec
MotionVector vec
Motion vector.
Definition: deshake.h:48
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
FILL_COUNT
@ FILL_COUNT
Definition: transform.h:56
width
#define width
DeshakeContext::refcount
int refcount
Number of reference frames (defines averaging window)
Definition: deshake.h:69
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
DeshakeContext::avg
Transform avg
Definition: deshake.h:71
Transform::zoom
double zoom
Zoom percentage.
Definition: deshake.h:50
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
DeshakeContext::ref
AVFrame * ref
Previous frame.
Definition: deshake.h:60
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
DeshakeContext::filename
char * filename
Motion search detailed log filename.
Definition: deshake.h:76
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
av_clipf
#define av_clipf
Definition: common.h:144
src
#define src
Definition: vp8dsp.c:255
block_angle
static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift)
Find the rotation for a given block.
Definition: vf_deshake.c:214
DeshakeContext::cx
int cx
Definition: deshake.h:74
DeshakeContext::cw
int cw
Crop motion search to this box.
Definition: deshake.h:72
find_motion
static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2, int width, int height, int stride, Transform *t)
Find the estimated global motion for a scene given the most likely shift for each block in the frame.
Definition: vf_deshake.c:235
qsort.h
clean_mean
static double clean_mean(double *values, int count)
Cleaned mean (cuts off 20% of values to remove outliers and then averages)
Definition: vf_deshake.c:99
find_block_motion
static void find_block_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2, int cx, int cy, int stride, IntMotionVector *mv)
Find the most likely shift in motion between two frames for a given macroblock.
Definition: vf_deshake.c:120
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
DeshakeContext::counts
int counts[2 *MAX_R+1][2 *MAX_R+1]
Definition: deshake.h:57
FILL_MIRROR
@ FILL_MIRROR
Definition: transform.h:55
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:376
DeshakeContext::angles
double * angles
< Scratch buffer for motion search
Definition: deshake.h:58
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
INTERPOLATE_BILINEAR
@ INTERPOLATE_BILINEAR
Definition: transform.h:41
interpolate
static void interpolate(float *out, float v1, float v2, int size)
Definition: twinvq.c:84
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
src1
#define src1
Definition: h264pred.c:140
i
int i
Definition: input.c:406
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_deshake.c:408
ff_get_matrix
void ff_get_matrix(float x_shift, float y_shift, float angle, float scale_x, float scale_y, float *matrix)
Get an affine transformation matrix from given translation, rotation, and zoom factors.
Definition: transform.c:106
config_props
static int config_props(AVFilterLink *link)
Definition: vf_deshake.c:385
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
SEARCH_COUNT
@ SEARCH_COUNT
Definition: deshake.h:34
a2
#define a2
Definition: regdef.h:48
common.h
DeshakeContext::contrast
int contrast
Contrast threshold.
Definition: deshake.h:65
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
Transform::angle
double angle
Angle of rotation.
Definition: deshake.h:49
DeshakeContext::search
int search
Motion search method.
Definition: deshake.h:66
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AVFilter
Filter definition.
Definition: avfilter.h:149
ret
ret
Definition: filter_design.txt:187
search
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:157
EXHAUSTIVE
@ EXHAUSTIVE
Search all possible positions.
Definition: deshake.h:32
pos
unsigned int pos
Definition: spdifenc.c:412
deshake_transform_c
static int deshake_transform_c(AVFilterContext *ctx, int width, int height, int cw, int ch, const float *matrix_y, const float *matrix_uv, enum InterpolateMethod interpolate, enum FillMethod fill, AVFrame *in, AVFrame *out)
Definition: vf_deshake.c:315
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_deshake.c:398
ff_affine_transform
int ff_affine_transform(const uint8_t *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, const float *matrix, enum InterpolateMethod interpolate, enum FillMethod fill)
Do an affine transformation with the given interpolation method.
Definition: transform.c:125
DeshakeContext::edge
int edge
Edge fill method.
Definition: deshake.h:63
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
Transform
Definition: deshake.h:47
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
deshake.h
av_pixelutils_get_sad_fn
av_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits, int aligned, void *log_ctx)
Get a potentially optimized pointer to a Sum-of-absolute-differences function (see the av_pixelutils_...
Definition: pixelutils.c:66
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:860
DeshakeContext::fp
FILE * fp
Definition: deshake.h:70
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
shift
static int shift(int a, int b)
Definition: sonic.c:83
deshake_outputs
static const AVFilterPad deshake_outputs[]
Definition: vf_deshake.c:542
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
mem.h
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
DeshakeContext::rx
int rx
Maximum horizontal shift.
Definition: deshake.h:61
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:558
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
FillMethod
FillMethod
Definition: transform.h:51
DeshakeContext
Definition: deshake.h:55
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
DeshakeContext::cy
int cy
Definition: deshake.h:75
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
snprintf
#define snprintf
Definition: snprintf.h:34
IntMotionVector
Definition: deshake.h:37
DeshakeContext::sad
av_pixelutils_sad_fn sad
Sum of the absolute difference function.
Definition: deshake.h:67