FFmpeg
vf_perspective.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2013 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avassert.h"
23 #include "libavutil/eval.h"
24 #include "libavutil/imgutils.h"
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/opt.h"
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "video.h"
30 
31 #define SUB_PIXEL_BITS 8
32 #define SUB_PIXELS (1 << SUB_PIXEL_BITS)
33 #define COEFF_BITS 11
34 
35 #define LINEAR 0
36 #define CUBIC 1
37 
38 typedef struct PerspectiveContext {
39  const AVClass *class;
40  char *expr_str[4][2];
41  double ref[4][2];
42  int32_t (*pv)[2];
45  int linesize[4];
46  int height[4];
47  int hsub, vsub;
48  int nb_planes;
49  int sense;
50  int eval_mode;
51 
53  void *arg, int job, int nb_jobs);
55 
56 #define OFFSET(x) offsetof(PerspectiveContext, x)
57 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
58 
60  PERSPECTIVE_SENSE_SOURCE = 0, ///< coordinates give locations in source of corners of destination.
61  PERSPECTIVE_SENSE_DESTINATION = 1, ///< coordinates give locations in destination of corners of source.
62 };
63 
64 enum EvalMode {
68 };
69 
70 static const AVOption perspective_options[] = {
71  { "x0", "set top left x coordinate", OFFSET(expr_str[0][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
72  { "y0", "set top left y coordinate", OFFSET(expr_str[0][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
73  { "x1", "set top right x coordinate", OFFSET(expr_str[1][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
74  { "y1", "set top right y coordinate", OFFSET(expr_str[1][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
75  { "x2", "set bottom left x coordinate", OFFSET(expr_str[2][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
76  { "y2", "set bottom left y coordinate", OFFSET(expr_str[2][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
77  { "x3", "set bottom right x coordinate", OFFSET(expr_str[3][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
78  { "y3", "set bottom right y coordinate", OFFSET(expr_str[3][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
79  { "interpolation", "set interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, 1, FLAGS, .unit = "interpolation" },
80  { "linear", "", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, .unit = "interpolation" },
81  { "cubic", "", 0, AV_OPT_TYPE_CONST, {.i64=CUBIC}, 0, 0, FLAGS, .unit = "interpolation" },
82  { "sense", "specify the sense of the coordinates", OFFSET(sense), AV_OPT_TYPE_INT, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 1, FLAGS, .unit = "sense"},
83  { "source", "specify locations in source to send to corners in destination",
84  0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 0, FLAGS, .unit = "sense"},
85  { "destination", "specify locations in destination to send corners of source",
86  0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_DESTINATION}, 0, 0, FLAGS, .unit = "sense"},
87  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, .unit = "eval" },
88  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
89  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
90 
91  { NULL }
92 };
93 
94 AVFILTER_DEFINE_CLASS(perspective);
95 
96 static const enum AVPixelFormat pix_fmts[] = {
101 };
102 
103 static inline double get_coeff(double d)
104 {
105  double coeff, A = -0.60;
106 
107  d = fabs(d);
108 
109  if (d < 1.0)
110  coeff = (1.0 - (A + 3.0) * d * d + (A + 2.0) * d * d * d);
111  else if (d < 2.0)
112  coeff = (-4.0 * A + 8.0 * A * d - 5.0 * A * d * d + A * d * d * d);
113  else
114  coeff = 0.0;
115 
116  return coeff;
117 }
118 
119 static const char *const var_names[] = { "W", "H", "in", "on", NULL };
121 
123 {
124  PerspectiveContext *s = ctx->priv;
125  AVFilterLink *outlink = ctx->outputs[0];
126  double (*ref)[2] = s->ref;
127 
128  double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h,
129  [VAR_IN] = inlink->frame_count_out + 1,
130  [VAR_ON] = outlink->frame_count_in + 1 };
131  const int h = values[VAR_H];
132  const int w = values[VAR_W];
133  double x0, x1, x2, x3, x4, x5, x6, x7, x8, q;
134  double t0, t1, t2, t3;
135  int x, y, i, j, ret;
136 
137  for (i = 0; i < 4; i++) {
138  for (j = 0; j < 2; j++) {
139  if (!s->expr_str[i][j])
140  return AVERROR(EINVAL);
141  ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j],
142  var_names, &values[0],
143  NULL, NULL, NULL, NULL,
144  0, 0, ctx);
145  if (ret < 0)
146  return ret;
147  }
148  }
149 
150  switch (s->sense) {
152  x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
153  (ref[2][1] - ref[3][1]) -
154  ( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
155  (ref[2][0] - ref[3][0])) * h;
156  x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
157  (ref[1][0] - ref[3][0]) -
158  ( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
159  (ref[1][1] - ref[3][1])) * w;
160  q = ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) -
161  ( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]);
162 
163  x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0];
164  x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0];
165  x2 = q * ref[0][0] * w * h;
166  x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1];
167  x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1];
168  x5 = q * ref[0][1] * w * h;
169  x8 = q * w * h;
170  break;
172  t0 = ref[0][0] * (ref[3][1] - ref[1][1]) +
173  ref[1][0] * (ref[0][1] - ref[3][1]) +
174  ref[3][0] * (ref[1][1] - ref[0][1]);
175  t1 = ref[1][0] * (ref[2][1] - ref[3][1]) +
176  ref[2][0] * (ref[3][1] - ref[1][1]) +
177  ref[3][0] * (ref[1][1] - ref[2][1]);
178  t2 = ref[0][0] * (ref[3][1] - ref[2][1]) +
179  ref[2][0] * (ref[0][1] - ref[3][1]) +
180  ref[3][0] * (ref[2][1] - ref[0][1]);
181  t3 = ref[0][0] * (ref[1][1] - ref[2][1]) +
182  ref[1][0] * (ref[2][1] - ref[0][1]) +
183  ref[2][0] * (ref[0][1] - ref[1][1]);
184 
185  x0 = t0 * t1 * w * (ref[2][1] - ref[0][1]);
186  x1 = t0 * t1 * w * (ref[0][0] - ref[2][0]);
187  x2 = t0 * t1 * w * (ref[0][1] * ref[2][0] - ref[0][0] * ref[2][1]);
188  x3 = t1 * t2 * h * (ref[1][1] - ref[0][1]);
189  x4 = t1 * t2 * h * (ref[0][0] - ref[1][0]);
190  x5 = t1 * t2 * h * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]);
191  x6 = t1 * t2 * (ref[1][1] - ref[0][1]) +
192  t0 * t3 * (ref[2][1] - ref[3][1]);
193  x7 = t1 * t2 * (ref[0][0] - ref[1][0]) +
194  t0 * t3 * (ref[3][0] - ref[2][0]);
195  x8 = t1 * t2 * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]) +
196  t0 * t3 * (ref[2][0] * ref[3][1] - ref[2][1] * ref[3][0]);
197  break;
198  default:
199  av_assert0(0);
200  }
201 
202  for (y = 0; y < h; y++){
203  for (x = 0; x < w; x++){
204  int u, v;
205 
206  u = lrint(SUB_PIXELS * (x0 * x + x1 * y + x2) /
207  (x6 * x + x7 * y + x8));
208  v = lrint(SUB_PIXELS * (x3 * x + x4 * y + x5) /
209  (x6 * x + x7 * y + x8));
210 
211  s->pv[x + y * w][0] = u;
212  s->pv[x + y * w][1] = v;
213  }
214  }
215 
216  return 0;
217 }
218 
220 {
221  AVFilterContext *ctx = inlink->dst;
222  PerspectiveContext *s = ctx->priv;
224  int h = inlink->h;
225  int w = inlink->w;
226  int i, j, ret;
227  s->hsub = desc->log2_chroma_w;
228  s->vsub = desc->log2_chroma_h;
229  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
230  if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
231  return ret;
232 
233  s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
234  s->height[0] = s->height[3] = inlink->h;
235 
236  s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
237  if (!s->pv)
238  return AVERROR(ENOMEM);
239 
240  if (s->eval_mode == EVAL_MODE_INIT) {
241  if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
242  return ret;
243  }
244  }
245 
246  for (i = 0; i < SUB_PIXELS; i++){
247  double d = i / (double)SUB_PIXELS;
248  double temp[4];
249  double sum = 0;
250 
251  for (j = 0; j < 4; j++)
252  temp[j] = get_coeff(j - d - 1);
253 
254  for (j = 0; j < 4; j++)
255  sum += temp[j];
256 
257  for (j = 0; j < 4; j++)
258  s->coeff[i][j] = lrint((1 << COEFF_BITS) * temp[j] / sum);
259  }
260 
261  return 0;
262 }
263 
264 typedef struct ThreadData {
265  uint8_t *dst;
266  int dst_linesize;
267  uint8_t *src;
268  int src_linesize;
269  int w, h;
270  int hsub, vsub;
271 } ThreadData;
272 
274  int job, int nb_jobs)
275 {
276  PerspectiveContext *s = ctx->priv;
277  ThreadData *td = arg;
278  uint8_t *dst = td->dst;
279  int dst_linesize = td->dst_linesize;
280  uint8_t *src = td->src;
281  int src_linesize = td->src_linesize;
282  int w = td->w;
283  int h = td->h;
284  int hsub = td->hsub;
285  int vsub = td->vsub;
286  int start = (h * job) / nb_jobs;
287  int end = (h * (job+1)) / nb_jobs;
288  const int linesize = s->linesize[0];
289  int x, y;
290 
291  for (y = start; y < end; y++) {
292  int sy = y << vsub;
293  for (x = 0; x < w; x++) {
294  int u, v, subU, subV, sum, sx;
295 
296  sx = x << hsub;
297  u = s->pv[sx + sy * linesize][0] >> hsub;
298  v = s->pv[sx + sy * linesize][1] >> vsub;
299  subU = u & (SUB_PIXELS - 1);
300  subV = v & (SUB_PIXELS - 1);
301  u >>= SUB_PIXEL_BITS;
302  v >>= SUB_PIXEL_BITS;
303 
304  if (u > 0 && v > 0 && u < w - 2 && v < h - 2){
305  const int index = u + v*src_linesize;
306  const int a = s->coeff[subU][0];
307  const int b = s->coeff[subU][1];
308  const int c = s->coeff[subU][2];
309  const int d = s->coeff[subU][3];
310 
311  sum = s->coeff[subV][0] * (a * src[index - 1 - src_linesize] + b * src[index - 0 - src_linesize] +
312  c * src[index + 1 - src_linesize] + d * src[index + 2 - src_linesize]) +
313  s->coeff[subV][1] * (a * src[index - 1 ] + b * src[index - 0 ] +
314  c * src[index + 1 ] + d * src[index + 2 ]) +
315  s->coeff[subV][2] * (a * src[index - 1 + src_linesize] + b * src[index - 0 + src_linesize] +
316  c * src[index + 1 + src_linesize] + d * src[index + 2 + src_linesize]) +
317  s->coeff[subV][3] * (a * src[index - 1 + 2 * src_linesize] + b * src[index - 0 + 2 * src_linesize] +
318  c * src[index + 1 + 2 * src_linesize] + d * src[index + 2 + 2 * src_linesize]);
319  } else {
320  int dx, dy;
321 
322  sum = 0;
323 
324  for (dy = 0; dy < 4; dy++) {
325  int iy = v + dy - 1;
326 
327  if (iy < 0)
328  iy = 0;
329  else if (iy >= h)
330  iy = h-1;
331  for (dx = 0; dx < 4; dx++) {
332  int ix = u + dx - 1;
333 
334  if (ix < 0)
335  ix = 0;
336  else if (ix >= w)
337  ix = w - 1;
338 
339  sum += s->coeff[subU][dx] * s->coeff[subV][dy] * src[ ix + iy * src_linesize];
340  }
341  }
342  }
343 
344  sum = (sum + (1<<(COEFF_BITS * 2 - 1))) >> (COEFF_BITS * 2);
345  sum = av_clip_uint8(sum);
346  dst[x + y * dst_linesize] = sum;
347  }
348  }
349  return 0;
350 }
351 
353  int job, int nb_jobs)
354 {
355  PerspectiveContext *s = ctx->priv;
356  ThreadData *td = arg;
357  uint8_t *dst = td->dst;
358  int dst_linesize = td->dst_linesize;
359  uint8_t *src = td->src;
360  int src_linesize = td->src_linesize;
361  int w = td->w;
362  int h = td->h;
363  int hsub = td->hsub;
364  int vsub = td->vsub;
365  int start = (h * job) / nb_jobs;
366  int end = (h * (job+1)) / nb_jobs;
367  const int linesize = s->linesize[0];
368  int x, y;
369 
370  for (y = start; y < end; y++){
371  int sy = y << vsub;
372  for (x = 0; x < w; x++){
373  int u, v, subU, subV, sum, sx, index, subUI, subVI;
374 
375  sx = x << hsub;
376  u = s->pv[sx + sy * linesize][0] >> hsub;
377  v = s->pv[sx + sy * linesize][1] >> vsub;
378  subU = u & (SUB_PIXELS - 1);
379  subV = v & (SUB_PIXELS - 1);
380  u >>= SUB_PIXEL_BITS;
381  v >>= SUB_PIXEL_BITS;
382 
383  index = u + v * src_linesize;
384  subUI = SUB_PIXELS - subU;
385  subVI = SUB_PIXELS - subV;
386 
387  if ((unsigned)u < (unsigned)(w - 1)){
388  if((unsigned)v < (unsigned)(h - 1)){
389  sum = subVI * (subUI * src[index] + subU * src[index + 1]) +
390  subV * (subUI * src[index + src_linesize] + subU * src[index + src_linesize + 1]);
391  sum = (sum + (1 << (SUB_PIXEL_BITS * 2 - 1)))>> (SUB_PIXEL_BITS * 2);
392  } else {
393  if (v < 0)
394  v = 0;
395  else
396  v = h - 1;
397  index = u + v * src_linesize;
398  sum = subUI * src[index] + subU * src[index + 1];
399  sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
400  }
401  } else {
402  if (u < 0)
403  u = 0;
404  else
405  u = w - 1;
406  if ((unsigned)v < (unsigned)(h - 1)){
407  index = u + v * src_linesize;
408  sum = subVI * src[index] + subV * src[index + src_linesize];
409  sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
410  } else {
411  if (v < 0)
412  v = 0;
413  else
414  v = h - 1;
415  index = u + v * src_linesize;
416  sum = src[index];
417  }
418  }
419 
420  sum = av_clip_uint8(sum);
421  dst[x + y * dst_linesize] = sum;
422  }
423  }
424  return 0;
425 }
426 
428 {
429  PerspectiveContext *s = ctx->priv;
430 
431  switch (s->interpolation) {
432  case LINEAR: s->perspective = resample_linear; break;
433  case CUBIC: s->perspective = resample_cubic; break;
434  }
435 
436  return 0;
437 }
438 
440 {
441  AVFilterContext *ctx = inlink->dst;
442  AVFilterLink *outlink = ctx->outputs[0];
443  PerspectiveContext *s = ctx->priv;
444  AVFrame *out;
445  int plane;
446  int ret;
447 
448  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
449  if (!out) {
451  return AVERROR(ENOMEM);
452  }
454 
455  if (s->eval_mode == EVAL_MODE_FRAME) {
456  if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
457  av_frame_free(&out);
458  return ret;
459  }
460  }
461 
462  for (plane = 0; plane < s->nb_planes; plane++) {
463  int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
464  int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
465  ThreadData td = {.dst = out->data[plane],
466  .dst_linesize = out->linesize[plane],
467  .src = frame->data[plane],
468  .src_linesize = frame->linesize[plane],
469  .w = s->linesize[plane],
470  .h = s->height[plane],
471  .hsub = hsub,
472  .vsub = vsub };
473  ff_filter_execute(ctx, s->perspective, &td, NULL,
475  }
476 
478  return ff_filter_frame(outlink, out);
479 }
480 
482 {
483  PerspectiveContext *s = ctx->priv;
484 
485  av_freep(&s->pv);
486 }
487 
488 static const AVFilterPad perspective_inputs[] = {
489  {
490  .name = "default",
491  .type = AVMEDIA_TYPE_VIDEO,
492  .filter_frame = filter_frame,
493  .config_props = config_input,
494  },
495 };
496 
498  .name = "perspective",
499  .description = NULL_IF_CONFIG_SMALL("Correct the perspective of video."),
500  .priv_size = sizeof(PerspectiveContext),
501  .init = init,
502  .uninit = uninit,
506  .priv_class = &perspective_class,
508 };
get_coeff
static double get_coeff(double d)
Definition: vf_perspective.c:103
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
A
#define A(x)
Definition: vpx_arith.h:28
PerspectiveContext::sense
int sense
Definition: vf_perspective.c:49
ThreadData::hsub
int hsub
Definition: vf_perspective.c:270
PerspectiveContext::coeff
int32_t coeff[SUB_PIXELS][4]
Definition: vf_perspective.c:43
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_perspective.c:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
PerspectiveContext::height
int height[4]
Definition: vf_perspective.c:46
PerspectiveContext::expr_str
char * expr_str[4][2]
Definition: vf_perspective.c:40
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
PerspectiveContext::interpolation
int interpolation
Definition: vf_perspective.c:44
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
PerspectiveContext::perspective
int(* perspective)(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_perspective.c:66
w
uint8_t w
Definition: llviddspenc.c:38
calc_persp_luts
static int calc_persp_luts(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: vf_perspective.c:122
AVOption
AVOption.
Definition: opt.h:346
t0
#define t0
Definition: regdef.h:28
b
#define b
Definition: input.c:41
PerspectiveContext::nb_planes
int nb_planes
Definition: vf_perspective.c:48
PERSPECTIVESense
PERSPECTIVESense
Definition: vf_perspective.c:59
t1
#define t1
Definition: regdef.h:29
ThreadData::w
int w
Definition: vf_blend.c:59
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
video.h
PERSPECTIVE_SENSE_SOURCE
@ PERSPECTIVE_SENSE_SOURCE
coordinates give locations in source of corners of destination.
Definition: vf_perspective.c:60
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:73
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3002
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
PerspectiveContext::ref
double ref[4][2]
Definition: vf_perspective.c:41
VAR_IN
@ VAR_IN
Definition: vf_perspective.c:120
COEFF_BITS
#define COEFF_BITS
Definition: vf_perspective.c:33
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_perspective.c:67
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
av_cold
#define av_cold
Definition: attributes.h:90
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_perspective.c:427
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
SUB_PIXEL_BITS
#define SUB_PIXEL_BITS
Definition: vf_perspective.c:31
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
resample_linear
static int resample_linear(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:352
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
PerspectiveContext::linesize
int linesize[4]
Definition: vf_perspective.c:45
LINEAR
#define LINEAR
Definition: vf_perspective.c:35
ThreadData::src
uint8_t * src
Definition: vf_perspective.c:267
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
ThreadData::h
int h
Definition: vf_blend.c:59
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
frame
static AVFrame * frame
Definition: demux_decode.c:54
arg
const char * arg
Definition: jacosubdec.c:67
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:32
perspective_options
static const AVOption perspective_options[]
Definition: vf_perspective.c:70
perspective_inputs
static const AVFilterPad perspective_inputs[]
Definition: vf_perspective.c:488
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
VAR_ON
@ VAR_ON
Definition: vf_perspective.c:120
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:637
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_perspective.c:120
double
double
Definition: af_crystalizer.c:131
VAR_W
@ VAR_W
Definition: vf_perspective.c:120
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
eval.h
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_expr_parse_and_eval
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:804
OFFSET
#define OFFSET(x)
Definition: vf_perspective.c:56
ff_vf_perspective
const AVFilter ff_vf_perspective
Definition: vf_perspective.c:497
PERSPECTIVE_SENSE_DESTINATION
@ PERSPECTIVE_SENSE_DESTINATION
coordinates give locations in destination of corners of source.
Definition: vf_perspective.c:61
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(perspective)
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
interpolation
static int interpolation(DeclickChannel *c, const double *src, int ar_order, double *acoefficients, int *index, int nb_errors, double *auxiliary, double *interpolated)
Definition: af_adeclick.c:389
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
t3
#define t3
Definition: regdef.h:31
ThreadData::dst
uint8_t * dst
Definition: vf_perspective.c:265
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:825
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
EvalMode
EvalMode
Definition: af_volume.h:39
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_perspective.c:219
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_perspective.c:481
VAR_H
@ VAR_H
Definition: vf_perspective.c:120
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
PerspectiveContext
Definition: vf_perspective.c:38
ThreadData::vsub
int vsub
Definition: vf_perspective.c:270
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_perspective.c:65
PerspectiveContext::vsub
int vsub
Definition: vf_perspective.c:47
ThreadData::src_linesize
int src_linesize
Definition: vf_bm3d.c:55
resample_cubic
static int resample_cubic(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:273
t2
#define t2
Definition: regdef.h:30
PerspectiveContext::hsub
int hsub
Definition: vf_perspective.c:47
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
var_names
static const char *const var_names[]
Definition: vf_perspective.c:119
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:263
SUB_PIXELS
#define SUB_PIXELS
Definition: vf_perspective.c:32
av_clip_uint8
#define av_clip_uint8
Definition: common.h:104
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:73
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
ThreadData::dst_linesize
int dst_linesize
Definition: vf_colorlevels.c:91
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
PerspectiveContext::eval_mode
int eval_mode
Definition: vf_perspective.c:50
d
d
Definition: ffmpeg_filter.c:425
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_perspective.c:439
h
h
Definition: vp9dsp_template.c:2038
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
int
int
Definition: ffmpeg_filter.c:425
PerspectiveContext::pv
int32_t(* pv)[2]
Definition: vf_perspective.c:42
FLAGS
#define FLAGS
Definition: vf_perspective.c:57
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
CUBIC
#define CUBIC
Definition: vf_perspective.c:36