FFmpeg
vf_nnedi.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2010-2011 Kevin Stone
3  * Copyright (C) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 #include <float.h>
23 
24 #include "libavutil/common.h"
25 #include "libavutil/file_open.h"
26 #include "libavutil/float_dsp.h"
27 #include "libavutil/imgutils.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/mem_internal.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "avfilter.h"
33 #include "filters.h"
34 #include "video.h"
35 
36 static const size_t NNEDI_WEIGHTS_SIZE = 13574928;
37 static const uint8_t NNEDI_XDIM[] = { 8, 16, 32, 48, 8, 16, 32 };
38 static const uint8_t NNEDI_YDIM[] = { 6, 6, 6, 6, 4, 4, 4 };
39 static const uint16_t NNEDI_NNS[] = { 16, 32, 64, 128, 256 };
40 
41 typedef struct PrescreenerCoefficients {
42  DECLARE_ALIGNED(32, float, kernel_l0)[4][16 * 4];
43  DECLARE_ALIGNED(32, float, bias_l0)[4];
44 
45  DECLARE_ALIGNED(32, float, kernel_l1)[4][4];
46  DECLARE_ALIGNED(32, float, bias_l1)[4];
47 
48  DECLARE_ALIGNED(32, float, kernel_l2)[4][8];
49  DECLARE_ALIGNED(32, float, bias_l2)[4];
51 
52 typedef struct PredictorCoefficients {
53  int xdim, ydim, nns, nsize;
54  float *data;
55  float *softmax_q1;
56  float *elliott_q1;
59  float *softmax_q2;
60  float *elliott_q2;
64 
65 typedef struct NNEDIContext {
66  const AVClass *class;
67 
68  char *weights_file;
69 
71  int eof;
73 
75  int depth;
76  int nb_planes;
78  int linesize[4];
79  int planewidth[4];
80  int planeheight[4];
81  int field_n;
82 
85 
86  float half;
87  float in_scale;
88  float out_scale;
89 
90  // Parameters
91  int deint;
92  int field;
94  int nsize;
95  int nnsparam;
96  int qual;
97  int etype;
98  int pscrn;
99 
101  uint8_t **prescreen_buf;
102  float **input_buf;
103  float **output_buf;
104 
105  void (*read)(const uint8_t *src, float *dst,
106  int src_stride, int dst_stride,
107  int width, int height, float scale);
108  void (*write)(const float *src, uint8_t *dst,
109  int src_stride, int dst_stride,
110  int width, int height, int depth, float scale);
112  const void *src, ptrdiff_t src_stride,
113  uint8_t *prescreen, int N,
114  const PrescreenerCoefficients *const coeffs);
115 } NNEDIContext;
116 
117 #define OFFSET(x) offsetof(NNEDIContext, x)
118 #define RFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
119 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
120 
121 static const AVOption nnedi_options[] = {
122  {"weights", "set weights file", OFFSET(weights_file), AV_OPT_TYPE_STRING, {.str="nnedi3_weights.bin"}, 0, 0, FLAGS },
123  {"deint", "set which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, RFLAGS, .unit = "deint" },
124  {"all", "deinterlace all frames", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "deint" },
125  {"interlaced", "only deinterlace frames marked as interlaced", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "deint" },
126  {"field", "set mode of operation", OFFSET(field), AV_OPT_TYPE_INT, {.i64=-1}, -2, 3, RFLAGS, .unit = "field" },
127  {"af", "use frame flags, both fields", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, RFLAGS, .unit = "field" },
128  {"a", "use frame flags, single field", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, RFLAGS, .unit = "field" },
129  {"t", "use top field only", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "field" },
130  {"b", "use bottom field only", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "field" },
131  {"tf", "use both fields, top first", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "field" },
132  {"bf", "use both fields, bottom first", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, RFLAGS, .unit = "field" },
133  {"planes", "set which planes to process", OFFSET(process_plane), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, RFLAGS },
134  {"nsize", "set size of local neighborhood around each pixel, used by the predictor neural network", OFFSET(nsize), AV_OPT_TYPE_INT, {.i64=6}, 0, 6, RFLAGS, .unit = "nsize" },
135  {"s8x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "nsize" },
136  {"s16x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "nsize" },
137  {"s32x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "nsize" },
138  {"s48x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, RFLAGS, .unit = "nsize" },
139  {"s8x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, RFLAGS, .unit = "nsize" },
140  {"s16x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, RFLAGS, .unit = "nsize" },
141  {"s32x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, RFLAGS, .unit = "nsize" },
142  {"nns", "set number of neurons in predictor neural network", OFFSET(nnsparam), AV_OPT_TYPE_INT, {.i64=1}, 0, 4, RFLAGS, .unit = "nns" },
143  {"n16", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "nns" },
144  {"n32", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "nns" },
145  {"n64", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "nns" },
146  {"n128", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, RFLAGS, .unit = "nns" },
147  {"n256", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, RFLAGS, .unit = "nns" },
148  {"qual", "set quality", OFFSET(qual), AV_OPT_TYPE_INT, {.i64=1}, 1, 2, RFLAGS, .unit = "qual" },
149  {"fast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "qual" },
150  {"slow", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "qual" },
151  {"etype", "set which set of weights to use in the predictor", OFFSET(etype), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, RFLAGS, .unit = "etype" },
152  {"a", "weights trained to minimize absolute error", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "etype" },
153  {"abs","weights trained to minimize absolute error", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "etype" },
154  {"s", "weights trained to minimize squared error", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "etype" },
155  {"mse","weights trained to minimize squared error", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "etype" },
156  {"pscrn", "set prescreening", OFFSET(pscrn), AV_OPT_TYPE_INT, {.i64=2}, 0, 4, RFLAGS, .unit = "pscrn" },
157  {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "pscrn" },
158  {"original", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "pscrn" },
159  {"new", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "pscrn" },
160  {"new2", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, RFLAGS, .unit = "pscrn" },
161  {"new3", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, RFLAGS, .unit = "pscrn" },
162  { NULL }
163 };
164 
165 AVFILTER_DEFINE_CLASS(nnedi);
166 
167 static int config_output(AVFilterLink *outlink)
168 {
169  AVFilterContext *ctx = outlink->src;
170  const NNEDIContext *const s = ctx->priv;
171 
172  outlink->time_base = av_mul_q(ctx->inputs[0]->time_base, (AVRational){1, 2});
173  outlink->w = ctx->inputs[0]->w;
174  outlink->h = ctx->inputs[0]->h;
175 
176  if (s->field == -2 || s->field > 1) {
177  FilterLink *il = ff_filter_link(ctx->inputs[0]);
178  FilterLink *ol = ff_filter_link(outlink);
179  ol->frame_rate = av_mul_q(il->frame_rate, (AVRational){2, 1});
180  }
181 
182  return 0;
183 }
184 
185 static const enum AVPixelFormat pix_fmts[] = {
209 };
210 
211 static float dot_dsp(const NNEDIContext *const s, const float *kernel, const float *input,
212  int n, float scale, float bias)
213 {
214  float sum, y;
215 
216  sum = s->fdsp->scalarproduct_float(kernel, input, n);
217 
218  y = sum * scale + bias + 1e-20f;
219 
220  return y;
221 }
222 
223 static float elliott(float x)
224 {
225  return x / (1.0f + fabsf(x));
226 }
227 
228 static void transform_elliott(float *input, int size)
229 {
230  for (int i = 0; i < size; i++)
231  input[i] = elliott(input[i]);
232 }
233 
235  const void *src, ptrdiff_t src_stride,
236  uint8_t *prescreen, int N,
237  const PrescreenerCoefficients *const m_data)
238 {
239  NNEDIContext *s = ctx->priv;
240  const float *src_p = src;
241 
242  // Adjust source pointer to point to top-left of filter window.
243  const float *window = src_p - 2 * src_stride - 5;
244 
245  for (int j = 0; j < N; j++) {
246  LOCAL_ALIGNED_32(float, input, [48]);
247  float state[12];
248 
249  for (int i = 0; i < 4; i++)
250  memcpy(input + i * 12, window + i * src_stride + j, 12 * sizeof(float));
251 
252  // Layer 0.
253  for (int n = 0; n < 4; n++)
254  state[n] = dot_dsp(s, m_data->kernel_l0[n], input, 48, 1.0f, m_data->bias_l0[n]);
255  transform_elliott(state + 1, 3);
256 
257  // Layer 1.
258  for (int n = 0; n < 4; n++)
259  state[n + 4] = dot_dsp(s, m_data->kernel_l1[n], state, 4, 1.0f, m_data->bias_l1[n]);
260  transform_elliott(state + 4, 3);
261 
262  // Layer 2.
263  for (int n = 0; n < 4; n++)
264  state[n + 8] = dot_dsp(s, m_data->kernel_l2[n], state, 8, 1.0f, m_data->bias_l2[n]);
265 
266  prescreen[j] = FFMAX(state[10], state[11]) <= FFMAX(state[8], state[9]) ? 255 : 0;
267  }
268 }
269 
271  const void *src, ptrdiff_t src_stride,
272  uint8_t *prescreen, int N,
273  const PrescreenerCoefficients *const m_data)
274 {
275  NNEDIContext *s = ctx->priv;
276  const float *src_p = src;
277 
278  // Adjust source pointer to point to top-left of filter window.
279  const float *window = src_p - 2 * src_stride - 6;
280 
281  for (int j = 0; j < N; j += 4) {
282  LOCAL_ALIGNED_32(float, input, [64]);
283  float state[8];
284 
285  for (int i = 0; i < 4; i++)
286  memcpy(input + i * 16, window + i * src_stride + j, 16 * sizeof(float));
287 
288  for (int n = 0; n < 4; n++)
289  state[n] = dot_dsp(s, m_data->kernel_l0[n], input, 64, 1.0f, m_data->bias_l0[n]);
291 
292  for (int n = 0; n < 4; n++)
293  state[n + 4] = dot_dsp(s, m_data->kernel_l1[n], state, 4, 1.0f, m_data->bias_l1[n]);
294 
295  for (int n = 0; n < 4; n++)
296  prescreen[j + n] = state[n + 4] > 0.f;
297  }
298 }
299 
300 static int filter_offset(int nn, const PredictorCoefficients *const model)
301 {
302  return nn * model->nsize;
303 }
304 
305 static const float *softmax_q1_filter(int nn,
306  const PredictorCoefficients *const model)
307 {
308  return model->softmax_q1 + filter_offset(nn, model);
309 }
310 
311 static const float *elliott_q1_filter(int nn,
312  const PredictorCoefficients *const model)
313 {
314  return model->elliott_q1 + filter_offset(nn, model);
315 }
316 
317 static const float *softmax_q2_filter(int nn,
318  const PredictorCoefficients *const model)
319 {
320  return model->softmax_q2 + filter_offset(nn, model);
321 }
322 
323 static const float *elliott_q2_filter(int nn,
324  const PredictorCoefficients *const model)
325 {
326  return model->elliott_q2 + filter_offset(nn, model);
327 }
328 
329 static void gather_input(const float *src, ptrdiff_t src_stride,
330  float *buf, float mstd[4],
331  const PredictorCoefficients *const model)
332 {
333  const float scale = 1.f / model->nsize;
334  float sum = 0.f;
335  float sum_sq = 0.f;
336  float tmp;
337 
338  for (int i = 0; i < model->ydim; i++) {
339  memcpy(buf, src, model->xdim * sizeof(float));
340 
341  for (int j = 0; j < model->xdim; j++) {
342  const float val = src[j];
343 
344  sum += val;
345  sum_sq += val * val;
346  }
347 
348  src += src_stride;
349  buf += model->xdim;
350  }
351 
352  mstd[0] = sum * scale;
353  mstd[3] = 0.f;
354 
355  tmp = sum_sq * scale - mstd[0] * mstd[0];
356  if (tmp < FLT_EPSILON) {
357  mstd[1] = 0.0f;
358  mstd[2] = 0.0f;
359  } else {
360  mstd[1] = sqrtf(tmp);
361  mstd[2] = 1.0f / mstd[1];
362  }
363 }
364 
365 static float softmax_exp(float x)
366 {
367  return expf(av_clipf(x, -80.f, 80.f));
368 }
369 
370 static void transform_softmax_exp(float *input, int size)
371 {
372  for (int i = 0; i < size; i++)
373  input[i] = softmax_exp(input[i]);
374 }
375 
376 static void wae5(const float *softmax, const float *el,
377  int n, float mstd[4])
378 {
379  float vsum = 0.0f, wsum = 0.0f;
380 
381  for (int i = 0; i < n; i++) {
382  vsum += softmax[i] * elliott(el[i]);
383  wsum += softmax[i];
384  }
385 
386  if (wsum > 1e-10f)
387  mstd[3] += (5.0f * vsum) / wsum * mstd[1] + mstd[0];
388  else
389  mstd[3] += mstd[0];
390 }
391 
393  const void *src, ptrdiff_t src_stride, void *dst,
394  const uint8_t *prescreen, int N,
395  const PredictorCoefficients *const model, int use_q2)
396 {
397  const NNEDIContext *const s = ctx->priv;
398  const float *src_p = src;
399  float *dst_p = dst;
400 
401  // Adjust source pointer to point to top-left of filter window.
402  const float *window = src_p - (model->ydim / 2) * src_stride - (model->xdim / 2 - 1);
403  const int filter_size = model->nsize;
404  const int nns = model->nns;
405 
406  for (int i = 0; i < N; i++) {
407  LOCAL_ALIGNED_32(float, input, [48 * 6]);
408  float activation[256 * 2];
409  float mstd[4];
410  float scale;
411 
412  if (prescreen[i])
413  continue;
414 
415  gather_input(window + i, src_stride, input, mstd, model);
416  scale = mstd[2];
417 
418  for (int nn = 0; nn < nns; nn++)
419  activation[nn] = dot_dsp(s, softmax_q1_filter(nn, model), input, filter_size, scale, model->softmax_bias_q1[nn]);
420 
421  for (int nn = 0; nn < nns; nn++)
422  activation[nns + nn] = dot_dsp(s, elliott_q1_filter(nn, model), input, filter_size, scale, model->elliott_bias_q1[nn]);
423 
424  transform_softmax_exp(activation, nns);
425  wae5(activation, activation + nns, nns, mstd);
426 
427  if (use_q2) {
428  for (int nn = 0; nn < nns; nn++)
429  activation[nn] = dot_dsp(s, softmax_q2_filter(nn, model), input, filter_size, scale, model->softmax_bias_q2[nn]);
430 
431  for (int nn = 0; nn < nns; nn++)
432  activation[nns + nn] = dot_dsp(s, elliott_q2_filter(nn, model), input, filter_size, scale, model->elliott_bias_q2[nn]);
433 
434  transform_softmax_exp(activation, nns);
435  wae5(activation, activation + nns, nns, mstd);
436  }
437 
438  dst_p[i] = mstd[3] * (use_q2 ? 0.5f : 1.f);
439  }
440 }
441 
442 static void read_bytes(const uint8_t *src, float *dst,
443  int src_stride, int dst_stride,
444  int width, int height, float scale)
445 {
446  for (int y = 0; y < height; y++) {
447  for (int x = 0; x < 32; x++)
448  dst[-x - 1] = src[x];
449 
450  for (int x = 0; x < width; x++)
451  dst[x] = src[x];
452 
453  for (int x = 0; x < 32; x++)
454  dst[width + x] = src[width - x - 1];
455 
456  dst += dst_stride;
457  src += src_stride;
458  }
459 }
460 
461 static void read_words(const uint8_t *srcp, float *dst,
462  int src_stride, int dst_stride,
463  int width, int height, float scale)
464 {
465  const uint16_t *src = (const uint16_t *)srcp;
466 
467  src_stride /= 2;
468 
469  for (int y = 0; y < height; y++) {
470  for (int x = 0; x < 32; x++)
471  dst[-x - 1] = src[x] * scale;
472 
473  for (int x = 0; x < width; x++)
474  dst[x] = src[x] * scale;
475 
476  for (int x = 0; x < 32; x++)
477  dst[width + x] = src[width - x - 1] * scale;
478 
479  dst += dst_stride;
480  src += src_stride;
481  }
482 }
483 
484 static void write_bytes(const float *src, uint8_t *dst,
485  int src_stride, int dst_stride,
486  int width, int height, int depth,
487  float scale)
488 {
489  for (int y = 0; y < height; y++) {
490  for (int x = 0; x < width; x++)
491  dst[x] = av_clip_uint8(src[x]);
492 
493  dst += dst_stride;
494  src += src_stride;
495  }
496 }
497 
498 static void write_words(const float *src, uint8_t *dstp,
499  int src_stride, int dst_stride,
500  int width, int height, int depth,
501  float scale)
502 {
503  uint16_t *dst = (uint16_t *)dstp;
504 
505  dst_stride /= 2;
506 
507  for (int y = 0; y < height; y++) {
508  for (int x = 0; x < width; x++)
509  dst[x] = av_clip_uintp2_c(src[x] * scale, depth);
510 
511  dst += dst_stride;
512  src += src_stride;
513  }
514 }
515 
516 static void interpolation(const void *src, ptrdiff_t src_stride,
517  void *dst, const uint8_t *prescreen, int n)
518 {
519  const float *src_p = src;
520  float *dst_p = dst;
521  const float *window = src_p - 2 * src_stride;
522 
523  for (int i = 0; i < n; i++) {
524  float accum = 0.0f;
525 
526  if (!prescreen[i])
527  continue;
528 
529  accum += (-3.0f / 32.0f) * window[0 * src_stride + i];
530  accum += (19.0f / 32.0f) * window[1 * src_stride + i];
531  accum += (19.0f / 32.0f) * window[2 * src_stride + i];
532  accum += (-3.0f / 32.0f) * window[3 * src_stride + i];
533 
534  dst_p[i] = accum;
535  }
536 }
537 
538 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
539 {
540  const NNEDIContext *const s = ctx->priv;
541  AVFrame *out = arg;
542  AVFrame *in = s->prev;
543  const float in_scale = s->in_scale;
544  const float out_scale = s->out_scale;
545  const int depth = s->depth;
546  const int interlaced = !!(in->flags & AV_FRAME_FLAG_INTERLACED);
547  const int tff = s->field_n == (s->field < 0 ? interlaced ? (in->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST) : 1 :
548  (s->field & 1) ^ 1);
549 
550 
551  for (int p = 0; p < s->nb_planes; p++) {
552  const int height = s->planeheight[p];
553  const int width = s->planewidth[p];
554  const int slice_start = 2 * ((height / 2 * jobnr) / nb_jobs);
555  const int slice_end = 2 * ((height / 2 * (jobnr+1)) / nb_jobs);
556  const uint8_t *src_data = in->data[p];
557  uint8_t *dst_data = out->data[p];
558  uint8_t *dst = out->data[p] + slice_start * out->linesize[p];
559  const int src_linesize = in->linesize[p];
560  const int dst_linesize = out->linesize[p];
561  uint8_t *prescreen_buf = s->prescreen_buf[jobnr];
562  float *srcbuf = s->input_buf[jobnr];
563  const int srcbuf_stride = width + 64;
564  float *dstbuf = s->output_buf[jobnr];
565  const int dstbuf_stride = width;
566  const int slice_height = (slice_end - slice_start) / 2;
567  const int last_slice = slice_end == height;
568  const uint8_t *in_line;
569  uint8_t *out_line;
570  int y_out;
571 
572  if (!(s->process_plane & (1 << p))) {
573  av_image_copy_plane(dst, out->linesize[p],
574  in->data[p] + slice_start * in->linesize[p],
575  in->linesize[p],
576  s->linesize[p], slice_end - slice_start);
577  continue;
578  }
579 
580  y_out = slice_start + (tff ^ (slice_start & 1));
581  in_line = src_data + (y_out * src_linesize);
582  out_line = dst_data + (y_out * dst_linesize);
583 
584  while (y_out < slice_end) {
585  memcpy(out_line, in_line, s->linesize[p]);
586  y_out += 2;
587  in_line += src_linesize * 2;
588  out_line += dst_linesize * 2;
589  }
590 
591  y_out = slice_start + ((!tff) ^ (slice_start & 1));
592 
593  s->read(src_data + FFMAX(y_out - 5, tff) * src_linesize,
594  srcbuf + 32,
595  src_linesize * 2, srcbuf_stride,
596  width, 1, in_scale);
597  srcbuf += srcbuf_stride;
598 
599  s->read(src_data + FFMAX(y_out - 3, tff) * src_linesize,
600  srcbuf + 32,
601  src_linesize * 2, srcbuf_stride,
602  width, 1, in_scale);
603  srcbuf += srcbuf_stride;
604 
605  s->read(src_data + FFMAX(y_out - 1, tff) * src_linesize,
606  srcbuf + 32,
607  src_linesize * 2, srcbuf_stride,
608  width, 1, in_scale);
609  srcbuf += srcbuf_stride;
610 
611  in_line = src_data + FFMIN(y_out + 1, height - 1 - !tff) * src_linesize;
612  out_line = dst_data + (y_out * dst_linesize);
613 
614  s->read(in_line, srcbuf + 32, src_linesize * 2, srcbuf_stride,
615  width, slice_height - last_slice, in_scale);
616 
617  y_out += (slice_height - last_slice) * 2;
618 
619  s->read(src_data + FFMIN(y_out + 1, height - 1 - !tff) * src_linesize,
620  srcbuf + 32 + srcbuf_stride * (slice_height - last_slice),
621  src_linesize * 2, srcbuf_stride,
622  width, 1, in_scale);
623 
624  s->read(src_data + FFMIN(y_out + 3, height - 1 - !tff) * src_linesize,
625  srcbuf + 32 + srcbuf_stride * (slice_height + 1 - last_slice),
626  src_linesize * 2, srcbuf_stride,
627  width, 1, in_scale);
628 
629  s->read(src_data + FFMIN(y_out + 5, height - 1 - !tff) * src_linesize,
630  srcbuf + 32 + srcbuf_stride * (slice_height + 2 - last_slice),
631  src_linesize * 2, srcbuf_stride,
632  width, 1, in_scale);
633 
634  for (int y = 0; y < slice_end - slice_start; y += 2) {
635  if (s->pscrn > 0)
636  s->prescreen[s->pscrn > 1](ctx, srcbuf + (y / 2) * srcbuf_stride + 32,
637  srcbuf_stride, prescreen_buf, width,
638  &s->prescreener[s->pscrn - 1]);
639 
640  predictor(ctx,
641  srcbuf + (y / 2) * srcbuf_stride + 32,
642  srcbuf_stride,
643  dstbuf + (y / 2) * dstbuf_stride,
644  prescreen_buf, width,
645  &s->coeffs[s->etype][s->nnsparam][s->nsize], s->qual == 2);
646 
647  if (s->pscrn > 0)
648  interpolation(srcbuf + (y / 2) * srcbuf_stride + 32,
649  srcbuf_stride,
650  dstbuf + (y / 2) * dstbuf_stride,
651  prescreen_buf, width);
652  }
653 
654  s->write(dstbuf, out_line, dstbuf_stride, dst_linesize * 2,
655  width, slice_height, depth, out_scale);
656  }
657 
658  return 0;
659 }
660 
661 static int get_frame(AVFilterContext *ctx, int is_second)
662 {
663  NNEDIContext *s = ctx->priv;
664  AVFilterLink *outlink = ctx->outputs[0];
665  AVFrame *dst;
666 
667  dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
668  if (!dst)
669  return AVERROR(ENOMEM);
670  av_frame_copy_props(dst, s->prev);
671 #if FF_API_INTERLACED_FRAME
673  dst->interlaced_frame = 0;
675 #endif
676  dst->flags &= ~AV_FRAME_FLAG_INTERLACED;
677  dst->pts = s->pts;
678 
680  FFMIN(s->planeheight[1] / 2, s->nb_threads));
681 
682  if (s->field == -2 || s->field > 1)
683  s->field_n = !s->field_n;
684 
685  return ff_filter_frame(outlink, dst);
686 }
687 
689 {
690  AVFilterContext *ctx = inlink->dst;
691  NNEDIContext *s = ctx->priv;
692  int ret;
693 
694  if (!s->prev) {
695  s->prev = in;
696  return 0;
697  }
698 
699  if ((s->deint && !(s->prev->flags & AV_FRAME_FLAG_INTERLACED)) || ctx->is_disabled) {
700  s->prev->pts *= 2;
701  ret = ff_filter_frame(ctx->outputs[0], s->prev);
702  s->prev = in;
703  return ret;
704  }
705 
706  s->pts = s->prev->pts * 2;
707  ret = get_frame(ctx, 0);
708  if (ret < 0 || (s->field > -2 && s->field < 2)) {
709  av_frame_free(&s->prev);
710  s->prev = in;
711  return ret;
712  }
713 
714  s->pts = s->prev->pts + in->pts;
715  ret = get_frame(ctx, 1);
716  av_frame_free(&s->prev);
717  s->prev = in;
718  return ret;
719 }
720 
722 {
723  AVFilterContext *ctx = link->src;
724  NNEDIContext *s = ctx->priv;
725  int ret;
726 
727  if (s->eof)
728  return AVERROR_EOF;
729 
730  ret = ff_request_frame(ctx->inputs[0]);
731 
732  if (ret == AVERROR_EOF && s->prev) {
733  AVFrame *next = av_frame_clone(s->prev);
734  FilterLink *l = ff_filter_link(ctx->outputs[0]);
735 
736  if (!next)
737  return AVERROR(ENOMEM);
738 
739  next->pts = s->prev->pts + av_rescale_q(1, av_inv_q(l->frame_rate),
740  ctx->outputs[0]->time_base);
741  s->eof = 1;
742 
743  ret = filter_frame(ctx->inputs[0], next);
744  } else if (ret < 0) {
745  return ret;
746  }
747 
748  return ret;
749 }
750 
751 static void copy_weights(float *dst, int n, const float **data)
752 {
753  memcpy(dst, *data, n * sizeof(float));
754  *data += n;
755 }
756 
757 static float *allocate(float **ptr, int size)
758 {
759  float *ret = *ptr;
760 
761  *ptr += size;
762 
763  return ret;
764 }
765 
766 static int allocate_model(PredictorCoefficients *coeffs, int xdim, int ydim, int nns)
767 {
768  int filter_size = nns * xdim * ydim;
769  int bias_size = nns;
770  float *data;
771 
772  data = av_calloc(filter_size + bias_size, 4 * sizeof(float));
773  if (!data)
774  return AVERROR(ENOMEM);
775 
776  coeffs->data = data;
777  coeffs->xdim = xdim;
778  coeffs->ydim = ydim;
779  coeffs->nsize = xdim * ydim;
780  coeffs->nns = nns;
781 
782  coeffs->softmax_q1 = allocate(&data, filter_size);
783  coeffs->elliott_q1 = allocate(&data, filter_size);
784  coeffs->softmax_bias_q1 = allocate(&data, bias_size);
785  coeffs->elliott_bias_q1 = allocate(&data, bias_size);
786 
787  coeffs->softmax_q2 = allocate(&data, filter_size);
788  coeffs->elliott_q2 = allocate(&data, filter_size);
789  coeffs->softmax_bias_q2 = allocate(&data, bias_size);
790  coeffs->elliott_bias_q2 = allocate(&data, bias_size);
791 
792  return 0;
793 }
794 
795 static int read_weights(AVFilterContext *ctx, const float *bdata)
796 {
797  NNEDIContext *s = ctx->priv;
798  int ret;
799 
800  copy_weights(&s->prescreener[0].kernel_l0[0][0], 4 * 48, &bdata);
801  copy_weights(s->prescreener[0].bias_l0, 4, &bdata);
802 
803  copy_weights(&s->prescreener[0].kernel_l1[0][0], 4 * 4, &bdata);
804  copy_weights(s->prescreener[0].bias_l1, 4, &bdata);
805 
806  copy_weights(&s->prescreener[0].kernel_l2[0][0], 4 * 8, &bdata);
807  copy_weights(s->prescreener[0].bias_l2, 4, &bdata);
808 
809  for (int i = 0; i < 3; i++) {
810  PrescreenerCoefficients *data = &s->prescreener[i + 1];
811  float kernel_l0_shuffled[4 * 64];
812  float kernel_l1_shuffled[4 * 4];
813 
814  copy_weights(kernel_l0_shuffled, 4 * 64, &bdata);
815  copy_weights(data->bias_l0, 4, &bdata);
816 
817  copy_weights(kernel_l1_shuffled, 4 * 4, &bdata);
818  copy_weights(data->bias_l1, 4, &bdata);
819 
820  for (int n = 0; n < 4; n++) {
821  for (int k = 0; k < 64; k++)
822  data->kernel_l0[n][k] = kernel_l0_shuffled[(k / 8) * 32 + n * 8 + k % 8];
823  for (int k = 0; k < 4; k++)
824  data->kernel_l1[n][k] = kernel_l1_shuffled[k * 4 + n];
825  }
826  }
827 
828  for (int m = 0; m < 2; m++) {
829  // Grouping by neuron count.
830  for (int i = 0; i < 5; i++) {
831  const int nns = NNEDI_NNS[i];
832 
833  // Grouping by window size.
834  for (int j = 0; j < 7; j++) {
835  PredictorCoefficients *model = &s->coeffs[m][i][j];
836  const int xdim = NNEDI_XDIM[j];
837  const int ydim = NNEDI_YDIM[j];
838  const int filter_size = xdim * ydim;
839 
840  ret = allocate_model(model, xdim, ydim, nns);
841  if (ret < 0)
842  return ret;
843 
844  // Quality 1 model. NNS[i] * (XDIM[j] * YDIM[j]) * 2 coefficients.
845  copy_weights(model->softmax_q1, nns * filter_size, &bdata);
846  copy_weights(model->elliott_q1, nns * filter_size, &bdata);
847 
848  // Quality 1 model bias. NNS[i] * 2 coefficients.
849  copy_weights(model->softmax_bias_q1, nns, &bdata);
850  copy_weights(model->elliott_bias_q1, nns, &bdata);
851 
852  // Quality 2 model. NNS[i] * (XDIM[j] * YDIM[j]) * 2 coefficients.
853  copy_weights(model->softmax_q2, nns * filter_size, &bdata);
854  copy_weights(model->elliott_q2, nns * filter_size, &bdata);
855 
856  // Quality 2 model bias. NNS[i] * 2 coefficients.
857  copy_weights(model->softmax_bias_q2, nns, &bdata);
858  copy_weights(model->elliott_bias_q2, nns, &bdata);
859  }
860  }
861  }
862 
863  return 0;
864 }
865 
866 static float mean(const float *input, int size)
867 {
868  float sum = 0.f;
869 
870  for (int i = 0; i < size; i++)
871  sum += input[i];
872 
873  return sum / size;
874 }
875 
876 static void transform(float *input, int size, float mean, float half)
877 {
878  for (int i = 0; i < size; i++)
879  input[i] = (input[i] - mean) / half;
880 }
881 
882 static void subtract_mean_old(PrescreenerCoefficients *coeffs, float half)
883 {
884  for (int n = 0; n < 4; n++) {
885  float m = mean(coeffs->kernel_l0[n], 48);
886 
887  transform(coeffs->kernel_l0[n], 48, m, half);
888  }
889 }
890 
891 static void subtract_mean_new(PrescreenerCoefficients *coeffs, float half)
892 {
893  for (int n = 0; n < 4; n++) {
894  float m = mean(coeffs->kernel_l0[n], 64);
895 
896  transform(coeffs->kernel_l0[n], 64, m, half);
897  }
898 }
899 
901 {
902  const int filter_size = model->nsize;
903  const int nns = model->nns;
904  const float scale = 1.f / nns;
905 
906  double softmax_means[256]; // Average of individual softmax filters.
907  double elliott_means[256]; // Average of individual elliott filters.
908  double mean_filter[48 * 6] = { 0 }; // Pointwise average of all softmax filters.
909  double mean_bias;
910 
911  // Quality 1.
912  for (int nn = 0; nn < nns; nn++) {
913  softmax_means[nn] = mean(model->softmax_q1 + nn * filter_size, filter_size);
914  elliott_means[nn] = mean(model->elliott_q1 + nn * filter_size, filter_size);
915 
916  for (int k = 0; k < filter_size; k++)
917  mean_filter[k] += model->softmax_q1[nn * filter_size + k] - softmax_means[nn];
918  }
919 
920  for (int k = 0; k < filter_size; k++)
921  mean_filter[k] *= scale;
922 
923  mean_bias = mean(model->softmax_bias_q1, nns);
924 
925  for (int nn = 0; nn < nns; nn++) {
926  for (int k = 0; k < filter_size; k++) {
927  model->softmax_q1[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
928  model->elliott_q1[nn * filter_size + k] -= elliott_means[nn];
929  }
930  model->softmax_bias_q1[nn] -= mean_bias;
931  }
932 
933  // Quality 2.
934  memset(mean_filter, 0, sizeof(mean_filter));
935 
936  for (int nn = 0; nn < nns; nn++) {
937  softmax_means[nn] = mean(model->softmax_q2 + nn * filter_size, filter_size);
938  elliott_means[nn] = mean(model->elliott_q2 + nn * filter_size, filter_size);
939 
940  for (int k = 0; k < filter_size; k++) {
941  mean_filter[k] += model->softmax_q2[nn * filter_size + k] - softmax_means[nn];
942  }
943  }
944 
945  for (int k = 0; k < filter_size; k++)
946  mean_filter[k] *= scale;
947 
948  mean_bias = mean(model->softmax_bias_q2, nns);
949 
950  for (int nn = 0; nn < nns; nn++) {
951  for (int k = 0; k < filter_size; k++) {
952  model->softmax_q2[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
953  model->elliott_q2[nn * filter_size + k] -= elliott_means[nn];
954  }
955 
956  model->softmax_bias_q2[nn] -= mean_bias;
957  }
958 }
959 
961 {
962  NNEDIContext *s = ctx->priv;
963  FILE *weights_file = NULL;
964  int64_t weights_size;
965  float *bdata;
966  size_t bytes_read;
967  int ret = 0;
968 
969  weights_file = avpriv_fopen_utf8(s->weights_file, "rb");
970  if (!weights_file) {
971  av_log(ctx, AV_LOG_ERROR, "No weights file provided, aborting!\n");
972  return AVERROR(EINVAL);
973  }
974 
975  if (fseek(weights_file, 0, SEEK_END)) {
976  av_log(ctx, AV_LOG_ERROR, "Couldn't seek to the end of weights file.\n");
977  fclose(weights_file);
978  return AVERROR(EINVAL);
979  }
980 
981  weights_size = ftell(weights_file);
982 
983  if (weights_size == -1) {
984  fclose(weights_file);
985  av_log(ctx, AV_LOG_ERROR, "Couldn't get size of weights file.\n");
986  return AVERROR(EINVAL);
987  } else if (weights_size != NNEDI_WEIGHTS_SIZE) {
988  fclose(weights_file);
989  av_log(ctx, AV_LOG_ERROR, "Unexpected weights file size.\n");
990  return AVERROR(EINVAL);
991  }
992 
993  if (fseek(weights_file, 0, SEEK_SET)) {
994  fclose(weights_file);
995  av_log(ctx, AV_LOG_ERROR, "Couldn't seek to the start of weights file.\n");
996  return AVERROR(EINVAL);
997  }
998 
999  bdata = av_malloc(NNEDI_WEIGHTS_SIZE);
1000  if (!bdata) {
1001  fclose(weights_file);
1002  return AVERROR(ENOMEM);
1003  }
1004 
1005  bytes_read = fread(bdata, 1, NNEDI_WEIGHTS_SIZE, weights_file);
1006  if (bytes_read != NNEDI_WEIGHTS_SIZE) {
1007  fclose(weights_file);
1009  av_log(ctx, AV_LOG_ERROR, "Couldn't read weights file.\n");
1010  goto fail;
1011  }
1012 
1013  fclose(weights_file);
1014 
1015  s->fdsp = avpriv_float_dsp_alloc(0);
1016  if (!s->fdsp) {
1017  ret = AVERROR(ENOMEM);
1018  goto fail;
1019  }
1020 
1021  ret = read_weights(ctx, bdata);
1022  if (ret < 0)
1023  goto fail;
1024 
1025 fail:
1026  av_free(bdata);
1027  return ret;
1028 }
1029 
1031 {
1032  AVFilterContext *ctx = inlink->dst;
1033  NNEDIContext *s = ctx->priv;
1035  int ret;
1036 
1037  s->depth = desc->comp[0].depth;
1038  s->nb_threads = ff_filter_get_nb_threads(ctx);
1039  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
1040  if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
1041  return ret;
1042 
1043  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
1044  s->planewidth[0] = s->planewidth[3] = inlink->w;
1045  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
1046  s->planeheight[0] = s->planeheight[3] = inlink->h;
1047 
1048  s->half = ((1 << 8) - 1) / 2.f;
1049  s->out_scale = 1 << (s->depth - 8);
1050  s->in_scale = 1.f / s->out_scale;
1051 
1052  switch (s->depth) {
1053  case 8:
1054  s->read = read_bytes;
1055  s->write = write_bytes;
1056  break;
1057  default:
1058  s->read = read_words;
1059  s->write = write_words;
1060  break;
1061  }
1062 
1063  subtract_mean_old(&s->prescreener[0], s->half);
1064  subtract_mean_new(&s->prescreener[1], s->half);
1065  subtract_mean_new(&s->prescreener[2], s->half);
1066  subtract_mean_new(&s->prescreener[3], s->half);
1067 
1068  s->prescreen[0] = process_old;
1069  s->prescreen[1] = process_new;
1070 
1071  for (int i = 0; i < 2; i++) {
1072  for (int j = 0; j < 5; j++) {
1073  for (int k = 0; k < 7; k++)
1074  subtract_mean_predictor(&s->coeffs[i][j][k]);
1075  }
1076  }
1077 
1078  s->input_size = (s->planewidth[0] + 64) * (s->planeheight[0] + 6);
1079  s->input_buf = av_calloc(s->nb_threads, sizeof(*s->input_buf));
1080  if (!s->input_buf)
1081  return AVERROR(ENOMEM);
1082 
1083  for (int i = 0; i < s->nb_threads; i++) {
1084  s->input_buf[i] = av_calloc(s->input_size, sizeof(**s->input_buf));
1085  if (!s->input_buf[i])
1086  return AVERROR(ENOMEM);
1087  }
1088 
1089  s->output_buf = av_calloc(s->nb_threads, sizeof(*s->output_buf));
1090  if (!s->output_buf)
1091  return AVERROR(ENOMEM);
1092 
1093  for (int i = 0; i < s->nb_threads; i++) {
1094  s->output_buf[i] = av_calloc(s->input_size, sizeof(**s->output_buf));
1095  if (!s->output_buf[i])
1096  return AVERROR(ENOMEM);
1097  }
1098 
1099  s->prescreen_buf = av_calloc(s->nb_threads, sizeof(*s->prescreen_buf));
1100  if (!s->prescreen_buf)
1101  return AVERROR(ENOMEM);
1102 
1103  for (int i = 0; i < s->nb_threads; i++) {
1104  s->prescreen_buf[i] = av_calloc(s->planewidth[0], sizeof(**s->prescreen_buf));
1105  if (!s->prescreen_buf[i])
1106  return AVERROR(ENOMEM);
1107  }
1108 
1109  return 0;
1110 }
1111 
1113 {
1114  NNEDIContext *s = ctx->priv;
1115 
1116  for (int i = 0; i < s->nb_threads && s->prescreen_buf; i++)
1117  av_freep(&s->prescreen_buf[i]);
1118 
1119  av_freep(&s->prescreen_buf);
1120 
1121  for (int i = 0; i < s->nb_threads && s->input_buf; i++)
1122  av_freep(&s->input_buf[i]);
1123 
1124  av_freep(&s->input_buf);
1125 
1126  for (int i = 0; i < s->nb_threads && s->output_buf; i++)
1127  av_freep(&s->output_buf[i]);
1128 
1129  av_freep(&s->output_buf);
1130  av_freep(&s->fdsp);
1131 
1132  for (int i = 0; i < 2; i++) {
1133  for (int j = 0; j < 5; j++) {
1134  for (int k = 0; k < 7; k++) {
1135  av_freep(&s->coeffs[i][j][k].data);
1136  }
1137  }
1138  }
1139 
1140  av_frame_free(&s->prev);
1141 }
1142 
1143 static const AVFilterPad inputs[] = {
1144  {
1145  .name = "default",
1146  .type = AVMEDIA_TYPE_VIDEO,
1147  .filter_frame = filter_frame,
1148  .config_props = config_input,
1149  },
1150 };
1151 
1152 static const AVFilterPad outputs[] = {
1153  {
1154  .name = "default",
1155  .type = AVMEDIA_TYPE_VIDEO,
1156  .config_props = config_output,
1157  .request_frame = request_frame,
1158  },
1159 };
1160 
1162  .name = "nnedi",
1163  .description = NULL_IF_CONFIG_SMALL("Apply neural network edge directed interpolation intra-only deinterlacer."),
1164  .priv_size = sizeof(NNEDIContext),
1165  .priv_class = &nnedi_class,
1166  .init = init,
1167  .uninit = uninit,
1172  .process_command = ff_filter_process_command,
1173 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:116
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:522
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:501
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
dot_dsp
static float dot_dsp(const NNEDIContext *const s, const float *kernel, const float *input, int n, float scale, float bias)
Definition: vf_nnedi.c:211
NNEDIContext::prescreen_buf
uint8_t ** prescreen_buf
Definition: vf_nnedi.c:101
NNEDIContext::fdsp
AVFloatDSPContext * fdsp
Definition: vf_nnedi.c:74
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_frame
static int get_frame(AVFilterContext *ctx, int is_second)
Definition: vf_nnedi.c:661
mem_internal.h
NNEDIContext::pscrn
int pscrn
Definition: vf_nnedi.c:98
subtract_mean_predictor
static void subtract_mean_predictor(PredictorCoefficients *model)
Definition: vf_nnedi.c:900
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: filters.h:242
out
FILE * out
Definition: movenc.c:55
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1025
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
PredictorCoefficients::softmax_bias_q2
float * softmax_bias_q2
Definition: vf_nnedi.c:61
NNEDIContext::field
int field
Definition: vf_nnedi.c:92
NNEDIContext::weights_file
char * weights_file
Definition: vf_nnedi.c:68
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
softmax_q2_filter
static const float * softmax_q2_filter(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:317
PrescreenerCoefficients::kernel_l2
float kernel_l2[4][8]
Definition: vf_nnedi.c:48
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
NNEDIContext::qual
int qual
Definition: vf_nnedi.c:96
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:514
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:280
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:521
NNEDIContext
Definition: vf_nnedi.c:65
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:516
read_bytes
static void read_bytes(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
Definition: vf_nnedi.c:442
AVOption
AVOption.
Definition: opt.h:429
data
const char data[16]
Definition: mxf.c:148
expf
#define expf(x)
Definition: libm.h:283
half
static uint8_t half(int a, int b)
Definition: mobiclip.c:539
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:473
process_old
static void process_old(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
Definition: vf_nnedi.c:234
NNEDIContext::prescreen
void(* prescreen[2])(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const coeffs)
Definition: vf_nnedi.c:111
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(nnedi)
OFFSET
#define OFFSET(x)
Definition: vf_nnedi.c:117
float.h
NNEDIContext::nsize
int nsize
Definition: vf_nnedi.c:94
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_nnedi.c:1112
NNEDIContext::depth
int depth
Definition: vf_nnedi.c:75
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:661
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:517
PrescreenerCoefficients::bias_l2
float bias_l2[4]
Definition: vf_nnedi.c:49
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:458
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:653
elliott
static float elliott(float x)
Definition: vf_nnedi.c:223
NNEDIContext::pts
int64_t pts
Definition: vf_nnedi.c:72
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3005
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:513
window
static SDL_Window * window
Definition: ffplay.c:361
elliott_q2_filter
static const float * elliott_q2_filter(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:323
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1719
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
fail
#define fail()
Definition: checkasm.h:188
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:523
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
PredictorCoefficients::softmax_q1
float * softmax_q1
Definition: vf_nnedi.c:55
ff_vf_nnedi
const AVFilter ff_vf_nnedi
Definition: vf_nnedi.c:1161
NNEDIContext::nnsparam
int nnsparam
Definition: vf_nnedi.c:95
val
static double val(void *priv, double ch)
Definition: aeval.c:77
PredictorCoefficients::data
float * data
Definition: vf_nnedi.c:54
NNEDI_XDIM
static const uint8_t NNEDI_XDIM[]
Definition: vf_nnedi.c:37
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
write_words
static void write_words(const float *src, uint8_t *dstp, int src_stride, int dst_stride, int width, int height, int depth, float scale)
Definition: vf_nnedi.c:498
softmax_exp
static float softmax_exp(float x)
Definition: vf_nnedi.c:365
NNEDIContext::eof
int eof
Definition: vf_nnedi.c:71
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
filter_slice
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_nnedi.c:538
inputs
static const AVFilterPad inputs[]
Definition: vf_nnedi.c:1143
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:490
RFLAGS
#define RFLAGS
Definition: vf_nnedi.c:118
allocate
static float * allocate(float **ptr, int size)
Definition: vf_nnedi.c:757
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
NNEDI_NNS
static const uint16_t NNEDI_NNS[]
Definition: vf_nnedi.c:39
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:491
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
NNEDIContext::planewidth
int planewidth[4]
Definition: vf_nnedi.c:79
PredictorCoefficients::softmax_bias_q1
float * softmax_bias_q1
Definition: vf_nnedi.c:57
filters.h
PrescreenerCoefficients::kernel_l0
float kernel_l0[4][16 *4]
Definition: vf_nnedi.c:42
request_frame
static int request_frame(AVFilterLink *link)
Definition: vf_nnedi.c:721
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:520
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:489
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:461
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:597
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FLAGS
#define FLAGS
Definition: vf_nnedi.c:119
PredictorCoefficients::nns
int nns
Definition: vf_nnedi.c:53
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
PredictorCoefficients::ydim
int ydim
Definition: vf_nnedi.c:53
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
file_open.h
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
arg
const char * arg
Definition: jacosubdec.c:67
NNEDIContext::planeheight
int planeheight[4]
Definition: vf_nnedi.c:80
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:459
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
softmax_q1_filter
static const float * softmax_q1_filter(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:305
interpolation
static void interpolation(const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int n)
Definition: vf_nnedi.c:516
NULL
#define NULL
Definition: coverity.c:32
PrescreenerCoefficients::bias_l0
float bias_l0[4]
Definition: vf_nnedi.c:43
LOCAL_ALIGNED_32
#define LOCAL_ALIGNED_32(t, v,...)
Definition: mem_internal.h:156
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:713
NNEDIContext::coeffs
PredictorCoefficients coeffs[2][5][7]
Definition: vf_nnedi.c:84
PrescreenerCoefficients::bias_l1
float bias_l1[4]
Definition: vf_nnedi.c:46
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
subtract_mean_old
static void subtract_mean_old(PrescreenerCoefficients *coeffs, float half)
Definition: vf_nnedi.c:882
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
transform_elliott
static void transform_elliott(float *input, int size)
Definition: vf_nnedi.c:228
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:480
NNEDIContext::write
void(* write)(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
Definition: vf_nnedi.c:108
NNEDI_YDIM
static const uint8_t NNEDI_YDIM[]
Definition: vf_nnedi.c:38
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
nnedi_options
static const AVOption nnedi_options[]
Definition: vf_nnedi.c:121
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
PrescreenerCoefficients::kernel_l1
float kernel_l1[4][4]
Definition: vf_nnedi.c:45
av_clipf
av_clipf
Definition: af_crystalizer.c:122
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
NNEDIContext::etype
int etype
Definition: vf_nnedi.c:97
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
float_dsp.h
NNEDIContext::half
float half
Definition: vf_nnedi.c:86
NNEDIContext::nb_threads
int nb_threads
Definition: vf_nnedi.c:77
f
f
Definition: af_crystalizer.c:122
NNEDIContext::process_plane
int process_plane
Definition: vf_nnedi.c:93
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:85
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
wae5
static void wae5(const float *softmax, const float *el, int n, float mstd[4])
Definition: vf_nnedi.c:376
size
int size
Definition: twinvq_data.h:10344
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
NNEDIContext::nb_planes
int nb_planes
Definition: vf_nnedi.c:76
elliott_q1_filter
static const float * elliott_q1_filter(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:311
AVFloatDSPContext
Definition: float_dsp.h:24
PrescreenerCoefficients
Definition: vf_nnedi.c:41
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:896
PredictorCoefficients
Definition: vf_nnedi.c:52
predictor
static void predictor(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int N, const PredictorCoefficients *const model, int use_q2)
Definition: vf_nnedi.c:392
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:518
NNEDIContext::linesize
int linesize[4]
Definition: vf_nnedi.c:78
transform_softmax_exp
static void transform_softmax_exp(float *input, int size)
Definition: vf_nnedi.c:370
PredictorCoefficients::xdim
int xdim
Definition: vf_nnedi.c:53
N
#define N
Definition: af_mcompand.c:54
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
outputs
static const AVFilterPad outputs[]
Definition: vf_nnedi.c:1152
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_nnedi.c:960
interlaced
uint8_t interlaced
Definition: mxfenc.c:2265
filter_offset
static int filter_offset(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:300
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
state
static struct @457 state
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:836
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_nnedi.c:185
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
PredictorCoefficients::softmax_q2
float * softmax_q2
Definition: vf_nnedi.c:59
avpriv_fopen_utf8
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
Definition: file_open.c:161
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:648
write_bytes
static void write_bytes(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
Definition: vf_nnedi.c:484
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
NNEDIContext::read
void(* read)(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
Definition: vf_nnedi.c:105
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:737
NNEDIContext::input_size
int input_size
Definition: vf_nnedi.c:100
AVFilter
Filter definition.
Definition: avfilter.h:201
PredictorCoefficients::nsize
int nsize
Definition: vf_nnedi.c:53
ret
ret
Definition: filter_design.txt:187
PredictorCoefficients::elliott_bias_q2
float * elliott_bias_q2
Definition: vf_nnedi.c:62
subtract_mean_new
static void subtract_mean_new(PrescreenerCoefficients *coeffs, float half)
Definition: vf_nnedi.c:891
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:515
gather_input
static void gather_input(const float *src, ptrdiff_t src_stride, float *buf, float mstd[4], const PredictorCoefficients *const model)
Definition: vf_nnedi.c:329
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:487
PredictorCoefficients::elliott_q2
float * elliott_q2
Definition: vf_nnedi.c:60
allocate_model
static int allocate_model(PredictorCoefficients *coeffs, int xdim, int ydim, int nns)
Definition: vf_nnedi.c:766
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1653
NNEDIContext::deint
int deint
Definition: vf_nnedi.c:91
NNEDIContext::in_scale
float in_scale
Definition: vf_nnedi.c:87
NNEDI_WEIGHTS_SIZE
static const size_t NNEDI_WEIGHTS_SIZE
Definition: vf_nnedi.c:36
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:519
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
PredictorCoefficients::elliott_bias_q1
float * elliott_bias_q1
Definition: vf_nnedi.c:58
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_nnedi.c:688
process_new
static void process_new(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
Definition: vf_nnedi.c:270
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:866
NNEDIContext::input_buf
float ** input_buf
Definition: vf_nnedi.c:102
read_words
static void read_words(const uint8_t *srcp, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
Definition: vf_nnedi.c:461
transform
static void transform(float *input, int size, float mean, float half)
Definition: vf_nnedi.c:876
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:152
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
PredictorCoefficients::elliott_q1
float * elliott_q1
Definition: vf_nnedi.c:56
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
NNEDIContext::out_scale
float out_scale
Definition: vf_nnedi.c:88
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
read_weights
static int read_weights(AVFilterContext *ctx, const float *bdata)
Definition: vf_nnedi.c:795
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:146
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_nnedi.c:1030
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:190
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
copy_weights
static void copy_weights(float *dst, int n, const float **data)
Definition: vf_nnedi.c:751
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:484
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:488
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
width
#define width
Definition: dsp.h:85
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:460
NNEDIContext::field_n
int field_n
Definition: vf_nnedi.c:81
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
NNEDIContext::prev
AVFrame * prev
Definition: vf_nnedi.c:70
src
#define src
Definition: vp8dsp.c:248
NNEDIContext::output_buf
float ** output_buf
Definition: vf_nnedi.c:103
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:486
NNEDIContext::prescreener
PrescreenerCoefficients prescreener[4]
Definition: vf_nnedi.c:83
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_nnedi.c:167