FFmpeg
vf_lensfun.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 by Andrew Zabolotny (author of lensfun, from which this filter derives from)
3  * Copyright (C) 2018 Stephen Seo
4  *
5  * This file is part of FFmpeg.
6  *
7  * This program is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 3 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program. If not, see <https://www.gnu.org/licenses/>.
19  */
20 
21 /**
22  * @file
23  * Lensfun filter, applies lens correction with parameters from the lensfun database
24  *
25  * @see https://lensfun.sourceforge.net/
26  */
27 
28 #include <float.h>
29 #include <math.h>
30 
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libswscale/swscale.h"
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 #include <lensfun.h>
41 
42 #define LANCZOS_RESOLUTION 256
43 
44 enum Mode {
45  VIGNETTING = 0x1,
48 };
49 
54 };
55 
56 typedef struct VignettingThreadData {
57  int width, height;
61  lfModifier *modifier;
63 
65  int width, height;
66  const float *distortion_coords;
67  const uint8_t *data_in;
70  const float *interpolation;
71  int mode;
74 
75 typedef struct LensfunContext {
76  const AVClass *class;
77  const char *make, *model, *lens_model;
78  int mode;
79  float focal_length;
80  float aperture;
82  float scale;
84  int reverse;
86 
88  float *interpolation;
89 
90  lfLens *lens;
91  lfCamera *camera;
92  lfModifier *modifier;
94 
95 #define OFFSET(x) offsetof(LensfunContext, x)
96 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
97 static const AVOption lensfun_options[] = {
98  { "make", "set camera maker", OFFSET(make), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
99  { "model", "set camera model", OFFSET(model), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
100  { "lens_model", "set lens model", OFFSET(lens_model), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
101  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=GEOMETRY_DISTORTION}, 0, VIGNETTING | GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION, FLAGS, "mode" },
102  { "vignetting", "fix lens vignetting", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING}, 0, 0, FLAGS, "mode" },
103  { "geometry", "correct geometry distortion", 0, AV_OPT_TYPE_CONST, {.i64=GEOMETRY_DISTORTION}, 0, 0, FLAGS, "mode" },
104  { "subpixel", "fix chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=SUBPIXEL_DISTORTION}, 0, 0, FLAGS, "mode" },
105  { "vig_geo", "fix lens vignetting and correct geometry distortion", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | GEOMETRY_DISTORTION}, 0, 0, FLAGS, "mode" },
106  { "vig_subpixel", "fix lens vignetting and chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, "mode" },
107  { "distortion", "correct geometry distortion and chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, "mode" },
108  { "all", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, "mode" },
109  { "focal_length", "focal length of video (zoom; constant for the duration of the use of this filter)", OFFSET(focal_length), AV_OPT_TYPE_FLOAT, {.dbl=18}, 0.0, DBL_MAX, FLAGS },
110  { "aperture", "aperture (constant for the duration of the use of this filter)", OFFSET(aperture), AV_OPT_TYPE_FLOAT, {.dbl=3.5}, 0.0, DBL_MAX, FLAGS },
111  { "focus_distance", "focus distance (constant for the duration of the use of this filter)", OFFSET(focus_distance), AV_OPT_TYPE_FLOAT, {.dbl=1000.0f}, 0.0, DBL_MAX, FLAGS },
112  { "scale", "scale factor applied after corrections (0.0 means automatic scaling)", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, DBL_MAX, FLAGS },
113  { "target_geometry", "target geometry of the lens correction (only when geometry correction is enabled)", OFFSET(target_geometry), AV_OPT_TYPE_INT, {.i64=LF_RECTILINEAR}, 0, INT_MAX, FLAGS, "lens_geometry" },
114  { "rectilinear", "rectilinear lens (default)", 0, AV_OPT_TYPE_CONST, {.i64=LF_RECTILINEAR}, 0, 0, FLAGS, "lens_geometry" },
115  { "fisheye", "fisheye lens", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE}, 0, 0, FLAGS, "lens_geometry" },
116  { "panoramic", "panoramic (cylindrical)", 0, AV_OPT_TYPE_CONST, {.i64=LF_PANORAMIC}, 0, 0, FLAGS, "lens_geometry" },
117  { "equirectangular", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=LF_EQUIRECTANGULAR}, 0, 0, FLAGS, "lens_geometry" },
118  { "fisheye_orthographic", "orthographic fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_ORTHOGRAPHIC}, 0, 0, FLAGS, "lens_geometry" },
119  { "fisheye_stereographic", "stereographic fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_STEREOGRAPHIC}, 0, 0, FLAGS, "lens_geometry" },
120  { "fisheye_equisolid", "equisolid fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_EQUISOLID}, 0, 0, FLAGS, "lens_geometry" },
121  { "fisheye_thoby", "fisheye as measured by thoby", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_THOBY}, 0, 0, FLAGS, "lens_geometry" },
122  { "reverse", "Does reverse correction (regular image to lens distorted)", OFFSET(reverse), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
123  { "interpolation", "Type of interpolation", OFFSET(interpolation_type), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, LANCZOS, FLAGS, "interpolation" },
124  { "nearest", NULL, 0, AV_OPT_TYPE_CONST, {.i64=NEAREST}, 0, 0, FLAGS, "interpolation" },
125  { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "interpolation" },
126  { "lanczos", NULL, 0, AV_OPT_TYPE_CONST, {.i64=LANCZOS}, 0, 0, FLAGS, "interpolation" },
127  { NULL }
128 };
129 
130 AVFILTER_DEFINE_CLASS(lensfun);
131 
133 {
134  LensfunContext *lensfun = ctx->priv;
135  lfDatabase *db;
136  const lfCamera **cameras;
137  const lfLens **lenses;
138 
139  db = lf_db_create();
140  if (lf_db_load(db) != LF_NO_ERROR) {
141  lf_db_destroy(db);
142  av_log(ctx, AV_LOG_FATAL, "Failed to load lensfun database\n");
143  return AVERROR_INVALIDDATA;
144  }
145 
146  if (!lensfun->make || !lensfun->model) {
147  const lfCamera *const *cameras = lf_db_get_cameras(db);
148 
149  av_log(ctx, AV_LOG_FATAL, "Option \"make\" or option \"model\" not specified\n");
150  av_log(ctx, AV_LOG_INFO, "Available values for \"make\" and \"model\":\n");
151  for (int i = 0; cameras && cameras[i]; i++)
152  av_log(ctx, AV_LOG_INFO, "\t%s\t%s\n", cameras[i]->Maker, cameras[i]->Model);
153  lf_db_destroy(db);
154  return AVERROR(EINVAL);
155  } else if (!lensfun->lens_model) {
156  const lfLens *const *lenses = lf_db_get_lenses(db);
157 
158  av_log(ctx, AV_LOG_FATAL, "Option \"lens_model\" not specified\n");
159  av_log(ctx, AV_LOG_INFO, "Available values for \"lens_model\":\n");
160  for (int i = 0; lenses && lenses[i]; i++)
161  av_log(ctx, AV_LOG_INFO, "\t%s\t(make %s)\n", lenses[i]->Model, lenses[i]->Maker);
162  lf_db_destroy(db);
163  return AVERROR(EINVAL);
164  }
165 
166  lensfun->lens = lf_lens_create();
167  lensfun->camera = lf_camera_create();
168 
169  cameras = lf_db_find_cameras(db, lensfun->make, lensfun->model);
170  if (cameras && *cameras) {
171  lf_camera_copy(lensfun->camera, *cameras);
172  av_log(ctx, AV_LOG_INFO, "Using camera %s\n", lensfun->camera->Model);
173  } else {
174  lf_free(cameras);
175  lf_db_destroy(db);
176  av_log(ctx, AV_LOG_FATAL, "Failed to find camera in lensfun database\n");
177  return AVERROR_INVALIDDATA;
178  }
179  lf_free(cameras);
180 
181  lenses = lf_db_find_lenses(db, lensfun->camera, NULL, lensfun->lens_model, 0);
182  if (lenses && *lenses) {
183  lf_lens_copy(lensfun->lens, *lenses);
184  av_log(ctx, AV_LOG_INFO, "Using lens %s\n", lensfun->lens->Model);
185  } else {
186  lf_free(lenses);
187  lf_db_destroy(db);
188  av_log(ctx, AV_LOG_FATAL, "Failed to find lens in lensfun database\n");
189  return AVERROR_INVALIDDATA;
190  }
191  lf_free(lenses);
192 
193  lf_db_destroy(db);
194  return 0;
195 }
196 
198 {
199  // Some of the functions provided by lensfun require pixels in RGB format
200  static const enum AVPixelFormat fmts[] = {AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE};
201  AVFilterFormats *fmts_list = ff_make_format_list(fmts);
202  return ff_set_common_formats(ctx, fmts_list);
203 }
204 
205 static float lanczos_kernel(float x)
206 {
207  if (x == 0.0f) {
208  return 1.0f;
209  } else if (x > -2.0f && x < 2.0f) {
210  return (2.0f * sin(M_PI * x) * sin(M_PI / 2.0f * x)) / (M_PI * M_PI * x * x);
211  } else {
212  return 0.0f;
213  }
214 }
215 
217 {
218  AVFilterContext *ctx = inlink->dst;
219  LensfunContext *lensfun = ctx->priv;
220  int index;
221  float a;
222 
223  if (!lensfun->modifier) {
224  if (lensfun->camera && lensfun->lens) {
225  lensfun->modifier = lf_modifier_create(lensfun->lens,
226  lensfun->focal_length,
227  lensfun->camera->CropFactor,
228  inlink->w,
229  inlink->h, LF_PF_U8, lensfun->reverse);
230  if (lensfun->mode & VIGNETTING)
231  lf_modifier_enable_vignetting_correction(lensfun->modifier, lensfun->aperture, lensfun->focus_distance);
232  if (lensfun->mode & GEOMETRY_DISTORTION) {
233  lf_modifier_enable_distortion_correction(lensfun->modifier);
234  lf_modifier_enable_projection_transform(lensfun->modifier, lensfun->target_geometry);
235  lf_modifier_enable_scaling(lensfun->modifier, lensfun->scale);
236  }
237  if (lensfun->mode & SUBPIXEL_DISTORTION)
238  lf_modifier_enable_tca_correction(lensfun->modifier);
239  } else {
240  // lensfun->camera and lensfun->lens should have been initialized
241  return AVERROR_BUG;
242  }
243  }
244 
245  if (!lensfun->distortion_coords) {
246  if (lensfun->mode & SUBPIXEL_DISTORTION) {
247  lensfun->distortion_coords = av_malloc_array(inlink->w * inlink->h, sizeof(float) * 2 * 3);
248  if (!lensfun->distortion_coords)
249  return AVERROR(ENOMEM);
250  if (lensfun->mode & GEOMETRY_DISTORTION) {
251  // apply both geometry and subpixel distortion
252  lf_modifier_apply_subpixel_geometry_distortion(lensfun->modifier,
253  0, 0,
254  inlink->w, inlink->h,
255  lensfun->distortion_coords);
256  } else {
257  // apply only subpixel distortion
258  lf_modifier_apply_subpixel_distortion(lensfun->modifier,
259  0, 0,
260  inlink->w, inlink->h,
261  lensfun->distortion_coords);
262  }
263  } else if (lensfun->mode & GEOMETRY_DISTORTION) {
264  lensfun->distortion_coords = av_malloc_array(inlink->w * inlink->h, sizeof(float) * 2);
265  if (!lensfun->distortion_coords)
266  return AVERROR(ENOMEM);
267  // apply only geometry distortion
268  lf_modifier_apply_geometry_distortion(lensfun->modifier,
269  0, 0,
270  inlink->w, inlink->h,
271  lensfun->distortion_coords);
272  }
273  }
274 
275  if (!lensfun->interpolation)
276  if (lensfun->interpolation_type == LANCZOS) {
277  lensfun->interpolation = av_malloc_array(LANCZOS_RESOLUTION, sizeof(float) * 4);
278  if (!lensfun->interpolation)
279  return AVERROR(ENOMEM);
280  for (index = 0; index < 4 * LANCZOS_RESOLUTION; ++index) {
281  if (index == 0) {
282  lensfun->interpolation[index] = 1.0f;
283  } else {
284  a = sqrtf((float)index / LANCZOS_RESOLUTION);
285  lensfun->interpolation[index] = lanczos_kernel(a);
286  }
287  }
288  }
289 
290  return 0;
291 }
292 
293 static int vignetting_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
294 {
296  const int slice_start = thread_data->height * jobnr / nb_jobs;
297  const int slice_end = thread_data->height * (jobnr + 1) / nb_jobs;
298 
299  lf_modifier_apply_color_modification(thread_data->modifier,
300  thread_data->data_in + slice_start * thread_data->linesize_in,
301  0,
302  slice_start,
303  thread_data->width,
304  slice_end - slice_start,
305  thread_data->pixel_composition,
306  thread_data->linesize_in);
307 
308  return 0;
309 }
310 
311 static float square(float x)
312 {
313  return x * x;
314 }
315 
316 static int distortion_correction_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
317 {
319  const int slice_start = thread_data->height * jobnr / nb_jobs;
320  const int slice_end = thread_data->height * (jobnr + 1) / nb_jobs;
321 
322  int x, y, i, j, rgb_index;
323  float interpolated, new_x, new_y, d, norm;
324  int new_x_int, new_y_int;
325  for (y = slice_start; y < slice_end; ++y)
326  for (x = 0; x < thread_data->width; ++x)
327  for (rgb_index = 0; rgb_index < 3; ++rgb_index) {
328  if (thread_data->mode & SUBPIXEL_DISTORTION) {
329  // subpixel (and possibly geometry) distortion correction was applied, correct distortion
330  switch(thread_data->interpolation_type) {
331  case NEAREST:
332  new_x_int = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2] + 0.5f;
333  new_y_int = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1] + 0.5f;
334  if (new_x_int < 0 || new_x_int >= thread_data->width || new_y_int < 0 || new_y_int >= thread_data->height) {
335  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
336  } else {
337  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in];
338  }
339  break;
340  case LINEAR:
341  interpolated = 0.0f;
342  new_x = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2];
343  new_x_int = new_x;
344  new_y = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1];
345  new_y_int = new_y;
346  if (new_x_int < 0 || new_x_int + 1 >= thread_data->width || new_y_int < 0 || new_y_int + 1 >= thread_data->height) {
347  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
348  } else {
349  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] =
350  thread_data->data_in[ new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y_int + 1 - new_y)
351  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x - new_x_int) * (new_y_int + 1 - new_y)
352  + thread_data->data_in[ new_x_int * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y - new_y_int)
353  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x - new_x_int) * (new_y - new_y_int);
354  }
355  break;
356  case LANCZOS:
357  interpolated = 0.0f;
358  norm = 0.0f;
359  new_x = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2];
360  new_x_int = new_x;
361  new_y = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1];
362  new_y_int = new_y;
363  for (j = 0; j < 4; ++j)
364  for (i = 0; i < 4; ++i) {
365  if (new_x_int + i - 2 < 0 || new_x_int + i - 2 >= thread_data->width || new_y_int + j - 2 < 0 || new_y_int + j - 2 >= thread_data->height)
366  continue;
367  d = square(new_x - (new_x_int + i - 2)) * square(new_y - (new_y_int + j - 2));
368  if (d >= 4.0f)
369  continue;
370  d = thread_data->interpolation[(int)(d * LANCZOS_RESOLUTION)];
371  norm += d;
372  interpolated += thread_data->data_in[(new_x_int + i - 2) * 3 + rgb_index + (new_y_int + j - 2) * thread_data->linesize_in] * d;
373  }
374  if (norm == 0.0f) {
375  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
376  } else {
377  interpolated /= norm;
378  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = interpolated < 0.0f ? 0.0f : interpolated > 255.0f ? 255.0f : interpolated;
379  }
380  break;
381  }
382  } else if (thread_data->mode & GEOMETRY_DISTORTION) {
383  // geometry distortion correction was applied, correct distortion
384  switch(thread_data->interpolation_type) {
385  case NEAREST:
386  new_x_int = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2] + 0.5f;
387  new_y_int = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2 + 1] + 0.5f;
388  if (new_x_int < 0 || new_x_int >= thread_data->width || new_y_int < 0 || new_y_int >= thread_data->height) {
389  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
390  } else {
391  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in];
392  }
393  break;
394  case LINEAR:
395  interpolated = 0.0f;
396  new_x = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2];
397  new_x_int = new_x;
398  new_y = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2 + 1];
399  new_y_int = new_y;
400  if (new_x_int < 0 || new_x_int + 1 >= thread_data->width || new_y_int < 0 || new_y_int + 1 >= thread_data->height) {
401  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
402  } else {
403  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] =
404  thread_data->data_in[ new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y_int + 1 - new_y)
405  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x - new_x_int) * (new_y_int + 1 - new_y)
406  + thread_data->data_in[ new_x_int * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y - new_y_int)
407  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x - new_x_int) * (new_y - new_y_int);
408  }
409  break;
410  case LANCZOS:
411  interpolated = 0.0f;
412  norm = 0.0f;
413  new_x = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2];
414  new_x_int = new_x;
415  new_y = thread_data->distortion_coords[x * 2 + 1 + y * thread_data->width * 2];
416  new_y_int = new_y;
417  for (j = 0; j < 4; ++j)
418  for (i = 0; i < 4; ++i) {
419  if (new_x_int + i - 2 < 0 || new_x_int + i - 2 >= thread_data->width || new_y_int + j - 2 < 0 || new_y_int + j - 2 >= thread_data->height)
420  continue;
421  d = square(new_x - (new_x_int + i - 2)) * square(new_y - (new_y_int + j - 2));
422  if (d >= 4.0f)
423  continue;
424  d = thread_data->interpolation[(int)(d * LANCZOS_RESOLUTION)];
425  norm += d;
426  interpolated += thread_data->data_in[(new_x_int + i - 2) * 3 + rgb_index + (new_y_int + j - 2) * thread_data->linesize_in] * d;
427  }
428  if (norm == 0.0f) {
429  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
430  } else {
431  interpolated /= norm;
432  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = interpolated < 0.0f ? 0.0f : interpolated > 255.0f ? 255.0f : interpolated;
433  }
434  break;
435  }
436  } else {
437  // no distortion correction was applied
438  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[x * 3 + rgb_index + y * thread_data->linesize_in];
439  }
440  }
441 
442  return 0;
443 }
444 
446 {
447  AVFilterContext *ctx = inlink->dst;
448  LensfunContext *lensfun = ctx->priv;
449  AVFilterLink *outlink = ctx->outputs[0];
450  AVFrame *out;
451  VignettingThreadData vignetting_thread_data;
452  DistortionCorrectionThreadData distortion_correction_thread_data;
453 
454  if (lensfun->mode & VIGNETTING) {
456 
457  vignetting_thread_data = (VignettingThreadData) {
458  .width = inlink->w,
459  .height = inlink->h,
460  .data_in = in->data[0],
461  .linesize_in = in->linesize[0],
462  .pixel_composition = LF_CR_3(RED, GREEN, BLUE),
463  .modifier = lensfun->modifier
464  };
465 
466  ctx->internal->execute(ctx,
468  &vignetting_thread_data,
469  NULL,
470  FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
471  }
472 
473  if (lensfun->mode & (GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION)) {
474  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
475  if (!out) {
476  av_frame_free(&in);
477  return AVERROR(ENOMEM);
478  }
480 
481  distortion_correction_thread_data = (DistortionCorrectionThreadData) {
482  .width = inlink->w,
483  .height = inlink->h,
484  .distortion_coords = lensfun->distortion_coords,
485  .data_in = in->data[0],
486  .data_out = out->data[0],
487  .linesize_in = in->linesize[0],
488  .linesize_out = out->linesize[0],
489  .interpolation = lensfun->interpolation,
490  .mode = lensfun->mode,
491  .interpolation_type = lensfun->interpolation_type
492  };
493 
494  ctx->internal->execute(ctx,
496  &distortion_correction_thread_data,
497  NULL,
498  FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
499 
500  av_frame_free(&in);
501  return ff_filter_frame(outlink, out);
502  } else {
503  return ff_filter_frame(outlink, in);
504  }
505 }
506 
508 {
509  LensfunContext *lensfun = ctx->priv;
510 
511  if (lensfun->camera)
512  lf_camera_destroy(lensfun->camera);
513  if (lensfun->lens)
514  lf_lens_destroy(lensfun->lens);
515  if (lensfun->modifier)
516  lf_modifier_destroy(lensfun->modifier);
517  av_freep(&lensfun->distortion_coords);
518  av_freep(&lensfun->interpolation);
519 }
520 
521 static const AVFilterPad lensfun_inputs[] = {
522  {
523  .name = "default",
524  .type = AVMEDIA_TYPE_VIDEO,
525  .config_props = config_props,
526  .filter_frame = filter_frame,
527  },
528  { NULL }
529 };
530 
531 static const AVFilterPad lensfun_outputs[] = {
532  {
533  .name = "default",
534  .type = AVMEDIA_TYPE_VIDEO,
535  },
536  { NULL }
537 };
538 
540  .name = "lensfun",
541  .description = NULL_IF_CONFIG_SMALL("Apply correction to an image based on info derived from the lensfun database."),
542  .priv_size = sizeof(LensfunContext),
543  .init = init,
544  .uninit = uninit,
548  .priv_class = &lensfun_class,
550 };
LensfunContext::focal_length
float focal_length
Definition: vf_lensfun.c:79
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
distortion_correction_filter_slice
static int distortion_correction_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lensfun.c:316
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
RED
@ RED
Definition: rpzaenc.c:53
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
LINEAR
@ LINEAR
Definition: vf_lensfun.c:52
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
lanczos_kernel
static float lanczos_kernel(float x)
Definition: vf_lensfun.c:205
LensfunContext
Definition: vf_lensfun.c:75
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
LensfunContext::model
const char * model
Definition: vf_lensfun.c:77
AVOption
AVOption.
Definition: opt.h:248
DistortionCorrectionThreadData
Definition: vf_lensfun.c:64
float.h
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
LensfunContext::reverse
int reverse
Definition: vf_lensfun.c:84
LensfunContext::camera
lfCamera * camera
Definition: vf_lensfun.c:91
DistortionCorrectionThreadData::data_out
uint8_t * data_out
Definition: vf_lensfun.c:68
video.h
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
GREEN
@ GREEN
Definition: rpzaenc.c:54
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
formats.h
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_lensfun.c:507
VignettingThreadData::data_in
uint8_t * data_in
Definition: vf_lensfun.c:58
LensfunContext::interpolation
float * interpolation
Definition: vf_lensfun.c:88
OFFSET
#define OFFSET(x)
Definition: vf_lensfun.c:95
DistortionCorrectionThreadData::height
int height
Definition: vf_lensfun.c:65
DistortionCorrectionThreadData::distortion_coords
const float * distortion_coords
Definition: vf_lensfun.c:66
lensfun_inputs
static const AVFilterPad lensfun_inputs[]
Definition: vf_lensfun.c:521
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
reverse
static uint32_t reverse(uint32_t num, int bits)
Definition: speedhqenc.c:51
VignettingThreadData::pixel_composition
int pixel_composition
Definition: vf_lensfun.c:60
avassert.h
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
thread_data
Definition: vf_lut.c:340
Mode
Mode
Frame type (Table 1a in 3GPP TS 26.101)
Definition: amrnbdata.h:39
VignettingThreadData::modifier
lfModifier * modifier
Definition: vf_lensfun.c:61
square
static float square(float x)
Definition: vf_lensfun.c:311
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2033
VignettingThreadData::width
int width
Definition: vf_lensfun.c:57
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
DistortionCorrectionThreadData::mode
int mode
Definition: vf_lensfun.c:71
ctx
AVFormatContext * ctx
Definition: movenc.c:48
f
#define f(width, name)
Definition: cbs_vp9.c:255
VignettingThreadData::linesize_in
int linesize_in
Definition: vf_lensfun.c:59
arg
const char * arg
Definition: jacosubdec.c:66
BLUE
@ BLUE
Definition: rpzaenc.c:55
LensfunContext::focus_distance
float focus_distance
Definition: vf_lensfun.c:81
DistortionCorrectionThreadData::linesize_out
int linesize_out
Definition: vf_lensfun.c:69
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
lensfun_outputs
static const AVFilterPad lensfun_outputs[]
Definition: vf_lensfun.c:531
DistortionCorrectionThreadData::linesize_in
int linesize_in
Definition: vf_lensfun.c:69
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
SUBPIXEL_DISTORTION
@ SUBPIXEL_DISTORTION
Definition: vf_lensfun.c:47
ff_vf_lensfun
AVFilter ff_vf_lensfun
Definition: vf_lensfun.c:539
lensfun_options
static const AVOption lensfun_options[]
Definition: vf_lensfun.c:97
vignetting_filter_slice
static int vignetting_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lensfun.c:293
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_lensfun.c:445
index
int index
Definition: gxfenc.c:89
LensfunContext::distortion_coords
float * distortion_coords
Definition: vf_lensfun.c:87
VignettingThreadData::height
int height
Definition: vf_lensfun.c:57
DistortionCorrectionThreadData::interpolation
const float * interpolation
Definition: vf_lensfun.c:70
LensfunContext::interpolation_type
int interpolation_type
Definition: vf_lensfun.c:85
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
LensfunContext::target_geometry
int target_geometry
Definition: vf_lensfun.c:83
LANCZOS
@ LANCZOS
Definition: vf_lensfun.c:53
LANCZOS_RESOLUTION
#define LANCZOS_RESOLUTION
Definition: vf_lensfun.c:42
FLAGS
#define FLAGS
Definition: vf_lensfun.c:96
NEAREST
@ NEAREST
Definition: vf_lensfun.c:51
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
M_PI
#define M_PI
Definition: mathematics.h:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
internal.h
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_lensfun.c:216
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_lensfun.c:197
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
i
int i
Definition: input.c:407
LensfunContext::aperture
float aperture
Definition: vf_lensfun.c:80
LensfunContext::make
const char * make
Definition: vf_lensfun.c:77
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
uint8_t
uint8_t
Definition: audio_convert.c:194
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
DistortionCorrectionThreadData::data_in
const uint8_t * data_in
Definition: vf_lensfun.c:67
AVFilter
Filter definition.
Definition: avfilter.h:145
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
LensfunContext::mode
int mode
Definition: vf_lensfun.c:78
InterpolationType
InterpolationType
Definition: vf_lensfun.c:50
LensfunContext::lens_model
const char * lens_model
Definition: vf_lensfun.c:77
GEOMETRY_DISTORTION
@ GEOMETRY_DISTORTION
Definition: vf_lensfun.c:46
DistortionCorrectionThreadData::interpolation_type
int interpolation_type
Definition: vf_lensfun.c:72
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
LensfunContext::scale
float scale
Definition: vf_lensfun.c:82
VignettingThreadData
Definition: vf_lensfun.c:56
LensfunContext::modifier
lfModifier * modifier
Definition: vf_lensfun.c:92
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
VIGNETTING
@ VIGNETTING
Definition: vf_lensfun.c:45
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(lensfun)
DistortionCorrectionThreadData::width
int width
Definition: vf_lensfun.c:65
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
Model
Definition: mss12.h:40
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
int
int
Definition: ffmpeg_filter.c:170
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
LensfunContext::lens
lfLens * lens
Definition: vf_lensfun.c:90
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_lensfun.c:132
swscale.h