FFmpeg
vf_convolve.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/imgutils.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/tx.h"
27 
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "framesync.h"
31 #include "internal.h"
32 #include "video.h"
33 
34 #define MAX_THREADS 16
35 
36 typedef struct ConvolveContext {
37  const AVClass *class;
39 
42 
45 
46  int fft_len[4];
47  int planewidth[4];
48  int planeheight[4];
49 
58 
59  int depth;
60  int planes;
61  int impulse;
62  float noise;
63  int nb_planes;
64  int got_impulse[4];
65 
66  int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
68 
69 #define OFFSET(x) offsetof(ConvolveContext, x)
70 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
71 
72 static const AVOption convolve_options[] = {
73  { "planes", "set planes to convolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
74  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
75  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
76  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
77  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
78  { NULL },
79 };
80 
82 {
83  static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
102  };
103 
104  return ff_set_common_formats_from_list(ctx, pixel_fmts_fftfilt);
105 }
106 
108 {
109  ConvolveContext *s = inlink->dst->priv;
111  int i;
112 
113  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
114  s->planewidth[0] = s->planewidth[3] = inlink->w;
115  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
116  s->planeheight[0] = s->planeheight[3] = inlink->h;
117 
118  s->nb_planes = desc->nb_components;
119  s->depth = desc->comp[0].depth;
120 
121  for (i = 0; i < s->nb_planes; i++) {
122  int w = s->planewidth[i];
123  int h = s->planeheight[i];
124  int n = FFMAX(w, h);
125 
126  s->fft_len[i] = 1 << (av_log2(2 * n - 1));
127 
128  if (!(s->fft_hdata_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
129  return AVERROR(ENOMEM);
130 
131  if (!(s->fft_hdata_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
132  return AVERROR(ENOMEM);
133 
134  if (!(s->fft_vdata_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
135  return AVERROR(ENOMEM);
136 
137  if (!(s->fft_vdata_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
138  return AVERROR(ENOMEM);
139 
140  if (!(s->fft_hdata_impulse_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
141  return AVERROR(ENOMEM);
142 
143  if (!(s->fft_vdata_impulse_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
144  return AVERROR(ENOMEM);
145 
146  if (!(s->fft_hdata_impulse_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
147  return AVERROR(ENOMEM);
148 
149  if (!(s->fft_vdata_impulse_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
150  return AVERROR(ENOMEM);
151  }
152 
153  return 0;
154 }
155 
157 {
158  AVFilterContext *ctx = inlink->dst;
159 
160  if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
161  ctx->inputs[0]->h != ctx->inputs[1]->h) {
162  av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
163  return AVERROR(EINVAL);
164  }
165  if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
166  av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
167  return AVERROR(EINVAL);
168  }
169 
170  return 0;
171 }
172 
173 typedef struct ThreadData {
176  int plane, n;
177 } ThreadData;
178 
179 static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
180 {
181  ConvolveContext *s = ctx->priv;
182  ThreadData *td = arg;
183  AVComplexFloat *hdata_in = td->hdata_in;
184  AVComplexFloat *hdata_out = td->hdata_out;
185  const int plane = td->plane;
186  const int n = td->n;
187  int start = (n * jobnr) / nb_jobs;
188  int end = (n * (jobnr+1)) / nb_jobs;
189  int y;
190 
191  for (y = start; y < end; y++) {
192  s->tx_fn[plane](s->fft[plane][jobnr], hdata_out + y * n, hdata_in + y * n, sizeof(float));
193  }
194 
195  return 0;
196 }
197 
198 static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata,
199  AVFrame *in, int w, int h, int n, int plane, float scale)
200 {
201  const int iw = (n - w) / 2, ih = (n - h) / 2;
202  int y, x;
203 
204  if (s->depth == 8) {
205  for (y = 0; y < h; y++) {
206  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
207 
208  for (x = 0; x < w; x++) {
209  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
210  fft_hdata[(y + ih) * n + iw + x].im = 0;
211  }
212 
213  for (x = 0; x < iw; x++) {
214  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
215  fft_hdata[(y + ih) * n + x].im = 0;
216  }
217 
218  for (x = n - iw; x < n; x++) {
219  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
220  fft_hdata[(y + ih) * n + x].im = 0;
221  }
222  }
223 
224  for (y = 0; y < ih; y++) {
225  for (x = 0; x < n; x++) {
226  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
227  fft_hdata[y * n + x].im = 0;
228  }
229  }
230 
231  for (y = n - ih; y < n; y++) {
232  for (x = 0; x < n; x++) {
233  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
234  fft_hdata[y * n + x].im = 0;
235  }
236  }
237  } else {
238  for (y = 0; y < h; y++) {
239  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
240 
241  for (x = 0; x < w; x++) {
242  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
243  fft_hdata[(y + ih) * n + iw + x].im = 0;
244  }
245 
246  for (x = 0; x < iw; x++) {
247  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
248  fft_hdata[(y + ih) * n + x].im = 0;
249  }
250 
251  for (x = n - iw; x < n; x++) {
252  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
253  fft_hdata[(y + ih) * n + x].im = 0;
254  }
255  }
256 
257  for (y = 0; y < ih; y++) {
258  for (x = 0; x < n; x++) {
259  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
260  fft_hdata[y * n + x].im = 0;
261  }
262  }
263 
264  for (y = n - ih; y < n; y++) {
265  for (x = 0; x < n; x++) {
266  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
267  fft_hdata[y * n + x].im = 0;
268  }
269  }
270  }
271 }
272 
273 static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
274 {
275  ConvolveContext *s = ctx->priv;
276  ThreadData *td = arg;
277  AVComplexFloat *hdata = td->hdata_out;
278  AVComplexFloat *vdata_in = td->vdata_in;
279  AVComplexFloat *vdata_out = td->vdata_out;
280  const int plane = td->plane;
281  const int n = td->n;
282  int start = (n * jobnr) / nb_jobs;
283  int end = (n * (jobnr+1)) / nb_jobs;
284  int y, x;
285 
286  for (y = start; y < end; y++) {
287  for (x = 0; x < n; x++) {
288  vdata_in[y * n + x].re = hdata[x * n + y].re;
289  vdata_in[y * n + x].im = hdata[x * n + y].im;
290  }
291 
292  s->tx_fn[plane](s->fft[plane][jobnr], vdata_out + y * n, vdata_in + y * n, sizeof(float));
293  }
294 
295  return 0;
296 }
297 
298 static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
299 {
300  ConvolveContext *s = ctx->priv;
301  ThreadData *td = arg;
302  AVComplexFloat *hdata = td->hdata_out;
303  AVComplexFloat *vdata_out = td->vdata_out;
304  AVComplexFloat *vdata_in = td->vdata_in;
305  const int plane = td->plane;
306  const int n = td->n;
307  int start = (n * jobnr) / nb_jobs;
308  int end = (n * (jobnr+1)) / nb_jobs;
309  int y, x;
310 
311  for (y = start; y < end; y++) {
312  s->itx_fn[plane](s->ifft[plane][jobnr], vdata_out + y * n, vdata_in + y * n, sizeof(float));
313 
314  for (x = 0; x < n; x++) {
315  hdata[x * n + y].re = vdata_out[y * n + x].re;
316  hdata[x * n + y].im = vdata_out[y * n + x].im;
317  }
318  }
319 
320  return 0;
321 }
322 
323 static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
324 {
325  ConvolveContext *s = ctx->priv;
326  ThreadData *td = arg;
327  AVComplexFloat *hdata_out = td->hdata_out;
328  AVComplexFloat *hdata_in = td->hdata_in;
329  const int plane = td->plane;
330  const int n = td->n;
331  int start = (n * jobnr) / nb_jobs;
332  int end = (n * (jobnr+1)) / nb_jobs;
333  int y;
334 
335  for (y = start; y < end; y++) {
336  s->itx_fn[plane](s->ifft[plane][jobnr], hdata_out + y * n, hdata_in + y * n, sizeof(float));
337  }
338 
339  return 0;
340 }
341 
343  int w, int h, int n, int plane, float scale)
344 {
345  const int max = (1 << s->depth) - 1;
346  const int hh = h / 2;
347  const int hw = w / 2;
348  int y, x;
349 
350  if (s->depth == 8) {
351  for (y = 0; y < hh; y++) {
352  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane] + hw;
353  for (x = 0; x < hw; x++)
354  dst[x] = av_clip_uint8(input[y * n + x].re * scale);
355  }
356  for (y = 0; y < hh; y++) {
357  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane];
358  for (x = 0; x < hw; x++)
359  dst[x] = av_clip_uint8(input[y * n + n - hw + x].re * scale);
360  }
361  for (y = 0; y < hh; y++) {
362  uint8_t *dst = out->data[plane] + y * out->linesize[plane] + hw;
363  for (x = 0; x < hw; x++)
364  dst[x] = av_clip_uint8(input[(n - hh + y) * n + x].re * scale);
365  }
366  for (y = 0; y < hh; y++) {
367  uint8_t *dst = out->data[plane] + y * out->linesize[plane];
368  for (x = 0; x < hw; x++)
369  dst[x] = av_clip_uint8(input[(n - hh + y) * n + n - hw + x].re * scale);
370  }
371  } else {
372  for (y = 0; y < hh; y++) {
373  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane] + hw * 2);
374  for (x = 0; x < hw; x++)
375  dst[x] = av_clip(input[y * n + x].re * scale, 0, max);
376  }
377  for (y = 0; y < hh; y++) {
378  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane]);
379  for (x = 0; x < hw; x++)
380  dst[x] = av_clip(input[y * n + n - hw + x].re * scale, 0, max);
381  }
382  for (y = 0; y < hh; y++) {
383  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane] + hw * 2);
384  for (x = 0; x < hw; x++)
385  dst[x] = av_clip(input[(n - hh + y) * n + x].re * scale, 0, max);
386  }
387  for (y = 0; y < hh; y++) {
388  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
389  for (x = 0; x < hw; x++)
390  dst[x] = av_clip(input[(n - hh + y) * n + n - hw + x].re * scale, 0, max);
391  }
392  }
393 }
394 
395 static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
396 {
397  ConvolveContext *s = ctx->priv;
398  ThreadData *td = arg;
399  AVComplexFloat *input = td->hdata_in;
400  AVComplexFloat *filter = td->vdata_in;
401  const float noise = s->noise;
402  const int n = td->n;
403  int start = (n * jobnr) / nb_jobs;
404  int end = (n * (jobnr+1)) / nb_jobs;
405  int y, x;
406 
407  for (y = start; y < end; y++) {
408  int yn = y * n;
409 
410  for (x = 0; x < n; x++) {
411  float re, im, ire, iim;
412 
413  re = input[yn + x].re;
414  im = input[yn + x].im;
415  ire = filter[yn + x].re + noise;
416  iim = filter[yn + x].im;
417 
418  input[yn + x].re = ire * re - iim * im;
419  input[yn + x].im = iim * re + ire * im;
420  }
421  }
422 
423  return 0;
424 }
425 
426 static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
427 {
428  ConvolveContext *s = ctx->priv;
429  ThreadData *td = arg;
430  AVComplexFloat *input = td->hdata_in;
431  AVComplexFloat *filter = td->vdata_in;
432  const float noise = s->noise;
433  const int n = td->n;
434  int start = (n * jobnr) / nb_jobs;
435  int end = (n * (jobnr+1)) / nb_jobs;
436  int y, x;
437 
438  for (y = start; y < end; y++) {
439  int yn = y * n;
440 
441  for (x = 0; x < n; x++) {
442  float re, im, ire, iim, div;
443 
444  re = input[yn + x].re;
445  im = input[yn + x].im;
446  ire = filter[yn + x].re;
447  iim = filter[yn + x].im;
448  div = ire * ire + iim * iim + noise;
449 
450  input[yn + x].re = (ire * re + iim * im) / div;
451  input[yn + x].im = (ire * im - iim * re) / div;
452  }
453  }
454 
455  return 0;
456 }
457 
459 {
460  AVFilterContext *ctx = fs->parent;
461  AVFilterLink *outlink = ctx->outputs[0];
462  ConvolveContext *s = ctx->priv;
463  AVFrame *mainpic = NULL, *impulsepic = NULL;
464  int ret, y, x, plane;
465 
466  ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic);
467  if (ret < 0)
468  return ret;
469  if (!impulsepic)
470  return ff_filter_frame(outlink, mainpic);
471 
472  for (plane = 0; plane < s->nb_planes; plane++) {
473  AVComplexFloat *filter = s->fft_vdata_impulse_out[plane];
474  AVComplexFloat *input = s->fft_vdata_out[plane];
475  const int n = s->fft_len[plane];
476  const int w = s->planewidth[plane];
477  const int h = s->planeheight[plane];
478  float total = 0;
479  ThreadData td;
480 
481  if (!(s->planes & (1 << plane))) {
482  continue;
483  }
484 
485  td.plane = plane, td.n = n;
486  get_input(s, s->fft_hdata_in[plane], mainpic, w, h, n, plane, 1.f);
487 
488  td.hdata_in = s->fft_hdata_in[plane];
489  td.vdata_in = s->fft_vdata_in[plane];
490  td.hdata_out = s->fft_hdata_out[plane];
491  td.vdata_out = s->fft_vdata_out[plane];
492 
497 
498  if ((!s->impulse && !s->got_impulse[plane]) || s->impulse) {
499  if (s->depth == 8) {
500  for (y = 0; y < h; y++) {
501  const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
502  for (x = 0; x < w; x++) {
503  total += src[x];
504  }
505  }
506  } else {
507  for (y = 0; y < h; y++) {
508  const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
509  for (x = 0; x < w; x++) {
510  total += src[x];
511  }
512  }
513  }
514  total = FFMAX(1, total);
515 
516  get_input(s, s->fft_hdata_impulse_in[plane], impulsepic, w, h, n, plane, 1.f / total);
517 
518  td.hdata_in = s->fft_hdata_impulse_in[plane];
519  td.vdata_in = s->fft_vdata_impulse_in[plane];
520  td.hdata_out = s->fft_hdata_impulse_out[plane];
521  td.vdata_out = s->fft_vdata_impulse_out[plane];
522 
527 
528  s->got_impulse[plane] = 1;
529  }
530 
531  td.hdata_in = input;
532  td.vdata_in = filter;
533 
534  ff_filter_execute(ctx, s->filter, &td, NULL,
536 
537  td.hdata_in = s->fft_hdata_out[plane];
538  td.vdata_in = s->fft_vdata_out[plane];
539  td.hdata_out = s->fft_hdata_in[plane];
540  td.vdata_out = s->fft_vdata_in[plane];
541 
544 
545  td.hdata_out = s->fft_hdata_out[plane];
546  td.hdata_in = s->fft_hdata_in[plane];
547 
550 
551  get_output(s, s->fft_hdata_out[plane], mainpic, w, h, n, plane, 1.f / (n * n));
552  }
553 
554  return ff_filter_frame(outlink, mainpic);
555 }
556 
557 static int config_output(AVFilterLink *outlink)
558 {
559  AVFilterContext *ctx = outlink->src;
560  ConvolveContext *s = ctx->priv;
561  AVFilterLink *mainlink = ctx->inputs[0];
562  int ret, i, j;
563 
564  s->fs.on_event = do_convolve;
566  if (ret < 0)
567  return ret;
568  outlink->w = mainlink->w;
569  outlink->h = mainlink->h;
570  outlink->time_base = mainlink->time_base;
571  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
572  outlink->frame_rate = mainlink->frame_rate;
573 
574  if ((ret = ff_framesync_configure(&s->fs)) < 0)
575  return ret;
576 
577  for (i = 0; i < s->nb_planes; i++) {
578  for (j = 0; j < MAX_THREADS; j++) {
579  float scale;
580 
581  ret = av_tx_init(&s->fft[i][j], &s->tx_fn[i], AV_TX_FLOAT_FFT, 0, s->fft_len[i], &scale, 0);
582  if (ret < 0)
583  return ret;
584  ret = av_tx_init(&s->ifft[i][j], &s->itx_fn[i], AV_TX_FLOAT_FFT, 1, s->fft_len[i], &scale, 0);
585  if (ret < 0)
586  return ret;
587  }
588  }
589 
590  return 0;
591 }
592 
594 {
595  ConvolveContext *s = ctx->priv;
596  return ff_framesync_activate(&s->fs);
597 }
598 
600 {
601  ConvolveContext *s = ctx->priv;
602 
603  if (!strcmp(ctx->filter->name, "convolve")) {
604  s->filter = complex_multiply;
605  } else if (!strcmp(ctx->filter->name, "deconvolve")) {
606  s->filter = complex_divide;
607  } else {
608  return AVERROR_BUG;
609  }
610 
611  return 0;
612 }
613 
615 {
616  ConvolveContext *s = ctx->priv;
617  int i, j;
618 
619  for (i = 0; i < 4; i++) {
620  av_freep(&s->fft_hdata_in[i]);
621  av_freep(&s->fft_vdata_in[i]);
622  av_freep(&s->fft_hdata_out[i]);
623  av_freep(&s->fft_vdata_out[i]);
624  av_freep(&s->fft_hdata_impulse_in[i]);
625  av_freep(&s->fft_vdata_impulse_in[i]);
626  av_freep(&s->fft_hdata_impulse_out[i]);
627  av_freep(&s->fft_vdata_impulse_out[i]);
628 
629  for (j = 0; j < MAX_THREADS; j++) {
630  av_tx_uninit(&s->fft[i][j]);
631  av_tx_uninit(&s->ifft[i][j]);
632  }
633  }
634 
635  ff_framesync_uninit(&s->fs);
636 }
637 
638 static const AVFilterPad convolve_inputs[] = {
639  {
640  .name = "main",
641  .type = AVMEDIA_TYPE_VIDEO,
642  .config_props = config_input_main,
643  },{
644  .name = "impulse",
645  .type = AVMEDIA_TYPE_VIDEO,
646  .config_props = config_input_impulse,
647  },
648 };
649 
650 static const AVFilterPad convolve_outputs[] = {
651  {
652  .name = "default",
653  .type = AVMEDIA_TYPE_VIDEO,
654  .config_props = config_output,
655  },
656 };
657 
658 #if CONFIG_CONVOLVE_FILTER
659 
661 
662 const AVFilter ff_vf_convolve = {
663  .name = "convolve",
664  .description = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."),
665  .preinit = convolve_framesync_preinit,
666  .init = init,
667  .uninit = uninit,
668  .query_formats = query_formats,
669  .activate = activate,
670  .priv_size = sizeof(ConvolveContext),
671  .priv_class = &convolve_class,
675 };
676 
677 #endif /* CONFIG_CONVOLVE_FILTER */
678 
679 #if CONFIG_DECONVOLVE_FILTER
680 
681 static const AVOption deconvolve_options[] = {
682  { "planes", "set planes to deconvolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
683  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
684  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
685  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
686  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
687  { NULL },
688 };
689 
691 
692 const AVFilter ff_vf_deconvolve = {
693  .name = "deconvolve",
694  .description = NULL_IF_CONFIG_SMALL("Deconvolve first video stream with second video stream."),
695  .preinit = deconvolve_framesync_preinit,
696  .init = init,
697  .uninit = uninit,
698  .query_formats = query_formats,
699  .activate = activate,
700  .priv_size = sizeof(ConvolveContext),
701  .priv_class = &deconvolve_class,
705 };
706 
707 #endif /* CONFIG_DECONVOLVE_FILTER */
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:432
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:411
ThreadData::vdata_out
AVComplexFloat * vdata_out
Definition: vf_convolve.c:175
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:124
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_clip
#define av_clip
Definition: common.h:96
OFFSET
#define OFFSET(x)
Definition: vf_convolve.c:69
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ConvolveContext::filter
int(* filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:66
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:290
out
FILE * out
Definition: movenc.c:54
ff_vf_deconvolve
const AVFilter ff_vf_deconvolve
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1017
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
AVTXContext
Definition: tx_priv.h:110
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
complex_divide
static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:426
ConvolveContext::nb_planes
int nb_planes
Definition: vf_convolve.c:63
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:424
im
float im
Definition: fft.c:78
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:431
w
uint8_t w
Definition: llviddspenc.c:38
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_convolve.c:81
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:426
AVOption
AVOption.
Definition: opt.h:247
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:389
FRAMESYNC_DEFINE_CLASS
#define FRAMESYNC_DEFINE_CLASS(name, context, field)
Definition: framesync.h:302
float.h
AVComplexFloat
Definition: tx.h:27
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
FFFrameSync
Frame sync structure.
Definition: framesync.h:146
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:427
ThreadData::hdata_in
AVComplexFloat * hdata_in
Definition: vf_convolve.c:174
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:228
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:369
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
ThreadData::vdata_in
AVComplexFloat * vdata_in
Definition: vf_convolve.c:174
formats.h
ConvolveContext::fft_hdata_out
AVComplexFloat * fft_hdata_out[4]
Definition: vf_convolve.c:52
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:423
AVComplexFloat::im
float im
Definition: tx.h:28
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:407
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:405
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:433
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:387
ifft_horizontal
static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:323
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:373
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:392
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
planes
static const struct @318 planes[]
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:401
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:102
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:409
ThreadData::plane
int plane
Definition: vf_blend.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:257
ConvolveContext::fft_hdata_in
AVComplexFloat * fft_hdata_in[4]
Definition: vf_convolve.c:50
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:410
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:402
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
ConvolveContext::planes
int planes
Definition: vf_convolve.c:60
convolve_inputs
static const AVFilterPad convolve_inputs[]
Definition: vf_convolve.c:638
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
get_output
static void get_output(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:342
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type AVComplexFloat.
Definition: tx.h:45
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:386
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:400
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:372
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
fft_vertical
static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:273
ConvolveContext::impulse
int impulse
Definition: vf_convolve.c:61
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
ThreadData::n
int n
Definition: vf_convolve.c:176
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:370
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:408
ConvolveContext::fft_len
int fft_len[4]
Definition: vf_convolve.c:46
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ConvolveContext::tx_fn
av_tx_fn tx_fn[4]
Definition: vf_convolve.c:43
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
ConvolveContext::ifft
AVTXContext * ifft[4][MAX_THREADS]
Definition: vf_convolve.c:41
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
config_input_impulse
static int config_input_impulse(AVFilterLink *inlink)
Definition: vf_convolve.c:156
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:390
ConvolveContext::fft_hdata_impulse_out
AVComplexFloat * fft_hdata_impulse_out[4]
Definition: vf_convolve.c:56
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
FLAGS
#define FLAGS
Definition: vf_convolve.c:70
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:404
ConvolveContext::fft_vdata_impulse_in
AVComplexFloat * fft_vdata_impulse_in[4]
Definition: vf_convolve.c:55
ConvolveContext::fft
AVTXContext * fft[4][MAX_THREADS]
Definition: vf_convolve.c:40
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ConvolveContext::planeheight
int planeheight[4]
Definition: vf_convolve.c:48
ConvolveContext::fft_vdata_out
AVComplexFloat * fft_vdata_out[4]
Definition: vf_convolve.c:53
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:358
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:394
AVComplexFloat::re
float re
Definition: tx.h:28
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:396
convolve_outputs
static const AVFilterPad convolve_outputs[]
Definition: vf_convolve.c:650
ConvolveContext::itx_fn
av_tx_fn itx_fn[4]
Definition: vf_convolve.c:44
complex_multiply
static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:395
ConvolveContext::fs
FFFrameSync fs
Definition: vf_convolve.c:38
convolve
static void convolve(float *tgt, const float *src, int len, int n)
Definition: ra288.c:88
activate
static int activate(AVFilterContext *ctx)
Definition: vf_convolve.c:593
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:428
ConvolveContext
Definition: vf_convolve.c:36
ConvolveContext::fft_hdata_impulse_in
AVComplexFloat * fft_hdata_impulse_in[4]
Definition: vf_convolve.c:54
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets ctx to NULL, does nothing when ctx == NULL.
Definition: tx.c:213
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:227
ThreadData::hdata_out
AVComplexFloat * hdata_out
Definition: vf_convolve.c:175
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_convolve.c:614
i
int i
Definition: input.c:406
do_convolve
static int do_convolve(FFFrameSync *fs)
Definition: vf_convolve.c:458
fft_horizontal
static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:179
ifft_vertical
static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:298
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:406
ConvolveContext::depth
int depth
Definition: vf_convolve.c:59
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:804
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
ConvolveContext::noise
float noise
Definition: vf_convolve.c:62
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:388
AVFilter
Filter definition.
Definition: avfilter.h:149
convolve_options
static const AVOption convolve_options[]
Definition: vf_convolve.c:72
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:425
get_input
static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:198
ff_vf_convolve
const AVFilter ff_vf_convolve
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:393
ConvolveContext::fft_vdata_in
AVComplexFloat * fft_vdata_in[4]
Definition: vf_convolve.c:51
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:398
ConvolveContext::fft_vdata_impulse_out
AVComplexFloat * fft_vdata_impulse_out[4]
Definition: vf_convolve.c:57
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Equivalent of av_mallocz_array().
Definition: mem.c:269
config_input_main
static int config_input_main(AVFilterLink *inlink)
Definition: vf_convolve.c:107
framesync.h
noise
static int noise(AVBSFContext *ctx, AVPacket *pkt)
Definition: noise_bsf.c:121
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ConvolveContext::planewidth
int planewidth[4]
Definition: vf_convolve.c:47
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_convolve.c:599
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:138
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:395
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:399
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:341
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_convolve.c:557
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:376
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:371
MAX_THREADS
#define MAX_THREADS
Definition: vf_convolve.c:34
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
int
int
Definition: ffmpeg_filter.c:156
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ConvolveContext::got_impulse
int got_impulse[4]
Definition: vf_convolve.c:64
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:397
tx.h
re
float re
Definition: fft.c:78