FFmpeg
vf_lut.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Compute a look-up table for binding the input value to the output
24  * value, and apply it to input video.
25  */
26 
27 #include "libavutil/attributes.h"
28 #include "libavutil/bswap.h"
29 #include "libavutil/common.h"
30 #include "libavutil/eval.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "drawutils.h"
35 #include "formats.h"
36 #include "internal.h"
37 #include "video.h"
38 
39 static const char *const var_names[] = {
40  "w", ///< width of the input video
41  "h", ///< height of the input video
42  "val", ///< input value for the pixel
43  "maxval", ///< max value for the pixel
44  "minval", ///< min value for the pixel
45  "negval", ///< negated value
46  "clipval",
47  NULL
48 };
49 
50 enum var_name {
59 };
60 
61 typedef struct LutContext {
62  const AVClass *class;
63  uint16_t lut[4][256 * 256]; ///< lookup table for each component
64  char *comp_expr_str[4];
66  int hsub, vsub;
68  int is_rgb, is_yuv;
69  int is_planar;
70  int is_16bit;
71  int step;
72 } LutContext;
73 
74 #define Y 0
75 #define U 1
76 #define V 2
77 #define R 0
78 #define G 1
79 #define B 2
80 #define A 3
81 
82 #define OFFSET(x) offsetof(LutContext, x)
83 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
84 
85 static const AVOption options[] = {
86  { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
87  { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
88  { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
89  { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
90  { "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
91  { "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
92  { "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
93  { "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
94  { "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
95  { "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
96  { "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "clipval" }, .flags = FLAGS },
97  { NULL }
98 };
99 
101 {
102  LutContext *s = ctx->priv;
103  int i;
104 
105  for (i = 0; i < 4; i++) {
106  av_expr_free(s->comp_expr[i]);
107  s->comp_expr[i] = NULL;
108  av_freep(&s->comp_expr_str[i]);
109  }
110 }
111 
112 #define YUV_FORMATS \
113  AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
114  AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
115  AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \
116  AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
117  AV_PIX_FMT_YUVJ440P, \
118  AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUV420P9LE, \
119  AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV440P10LE, \
120  AV_PIX_FMT_YUV444P12LE, AV_PIX_FMT_YUV422P12LE, AV_PIX_FMT_YUV420P12LE, AV_PIX_FMT_YUV440P12LE, \
121  AV_PIX_FMT_YUV444P14LE, AV_PIX_FMT_YUV422P14LE, AV_PIX_FMT_YUV420P14LE, \
122  AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV420P16LE, \
123  AV_PIX_FMT_YUVA444P16LE, AV_PIX_FMT_YUVA422P16LE, AV_PIX_FMT_YUVA420P16LE
124 
125 #define RGB_FORMATS \
126  AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \
127  AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \
128  AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, \
129  AV_PIX_FMT_RGB48LE, AV_PIX_FMT_RGBA64LE, \
130  AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, \
131  AV_PIX_FMT_GBRP9LE, AV_PIX_FMT_GBRP10LE, \
132  AV_PIX_FMT_GBRAP10LE, \
133  AV_PIX_FMT_GBRP12LE, AV_PIX_FMT_GBRP14LE, \
134  AV_PIX_FMT_GBRP16LE, AV_PIX_FMT_GBRAP12LE, \
135  AV_PIX_FMT_GBRAP16LE
136 
137 #define GRAY_FORMATS \
138  AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9LE, AV_PIX_FMT_GRAY10LE, \
139  AV_PIX_FMT_GRAY12LE, AV_PIX_FMT_GRAY14LE, AV_PIX_FMT_GRAY16LE
140 
144 
146 {
147  LutContext *s = ctx->priv;
148 
149  const enum AVPixelFormat *pix_fmts = s->is_rgb ? rgb_pix_fmts :
150  s->is_yuv ? yuv_pix_fmts :
151  all_pix_fmts;
153 }
154 
155 /**
156  * Clip value val in the minval - maxval range.
157  */
158 static double clip(void *opaque, double val)
159 {
160  LutContext *s = opaque;
161  double minval = s->var_values[VAR_MINVAL];
162  double maxval = s->var_values[VAR_MAXVAL];
163 
164  return av_clip(val, minval, maxval);
165 }
166 
167 /**
168  * Compute gamma correction for value val, assuming the minval-maxval
169  * range, val is clipped to a value contained in the same interval.
170  */
171 static double compute_gammaval(void *opaque, double gamma)
172 {
173  LutContext *s = opaque;
174  double val = s->var_values[VAR_CLIPVAL];
175  double minval = s->var_values[VAR_MINVAL];
176  double maxval = s->var_values[VAR_MAXVAL];
177 
178  return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
179 }
180 
181 /**
182  * Compute ITU Rec.709 gamma correction of value val.
183  */
184 static double compute_gammaval709(void *opaque, double gamma)
185 {
186  LutContext *s = opaque;
187  double val = s->var_values[VAR_CLIPVAL];
188  double minval = s->var_values[VAR_MINVAL];
189  double maxval = s->var_values[VAR_MAXVAL];
190  double level = (val - minval) / (maxval - minval);
191  level = level < 0.018 ? 4.5 * level
192  : 1.099 * pow(level, 1.0 / gamma) - 0.099;
193  return level * (maxval - minval) + minval;
194 }
195 
196 static double (* const funcs1[])(void *, double) = {
197  clip,
200  NULL
201 };
202 
203 static const char * const funcs1_names[] = {
204  "clip",
205  "gammaval",
206  "gammaval709",
207  NULL
208 };
209 
211 {
212  AVFilterContext *ctx = inlink->dst;
213  LutContext *s = ctx->priv;
215  uint8_t rgba_map[4]; /* component index -> RGBA color index map */
216  int min[4], max[4];
217  int val, color, ret;
218 
219  s->hsub = desc->log2_chroma_w;
220  s->vsub = desc->log2_chroma_h;
221 
222  s->var_values[VAR_W] = inlink->w;
223  s->var_values[VAR_H] = inlink->h;
224  s->is_16bit = desc->comp[0].depth > 8;
225 
226  switch (inlink->format) {
227  case AV_PIX_FMT_YUV410P:
228  case AV_PIX_FMT_YUV411P:
229  case AV_PIX_FMT_YUV420P:
230  case AV_PIX_FMT_YUV422P:
231  case AV_PIX_FMT_YUV440P:
232  case AV_PIX_FMT_YUV444P:
233  case AV_PIX_FMT_YUVA420P:
234  case AV_PIX_FMT_YUVA422P:
235  case AV_PIX_FMT_YUVA444P:
262  min[Y] = 16 * (1 << (desc->comp[0].depth - 8));
263  min[U] = 16 * (1 << (desc->comp[1].depth - 8));
264  min[V] = 16 * (1 << (desc->comp[2].depth - 8));
265  min[A] = 0;
266  max[Y] = 235 * (1 << (desc->comp[0].depth - 8));
267  max[U] = 240 * (1 << (desc->comp[1].depth - 8));
268  max[V] = 240 * (1 << (desc->comp[2].depth - 8));
269  max[A] = (1 << desc->comp[0].depth) - 1;
270  break;
271  case AV_PIX_FMT_RGB48LE:
272  case AV_PIX_FMT_RGBA64LE:
273  min[0] = min[1] = min[2] = min[3] = 0;
274  max[0] = max[1] = max[2] = max[3] = 65535;
275  break;
276  default:
277  min[0] = min[1] = min[2] = min[3] = 0;
278  max[0] = max[1] = max[2] = max[3] = 255 * (1 << (desc->comp[0].depth - 8));
279  }
280 
281  s->is_yuv = s->is_rgb = 0;
282  s->is_planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
283  if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) s->is_yuv = 1;
284  else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) s->is_rgb = 1;
285 
286  if (s->is_rgb) {
287  ff_fill_rgba_map(rgba_map, inlink->format);
288  s->step = av_get_bits_per_pixel(desc) >> 3;
289  if (s->is_16bit) {
290  s->step = s->step >> 1;
291  }
292  }
293 
294  for (color = 0; color < desc->nb_components; color++) {
295  double res;
296  int comp = s->is_rgb ? rgba_map[color] : color;
297 
298  /* create the parsed expression */
299  av_expr_free(s->comp_expr[color]);
300  s->comp_expr[color] = NULL;
301  ret = av_expr_parse(&s->comp_expr[color], s->comp_expr_str[color],
303  if (ret < 0) {
305  "Error when parsing the expression '%s' for the component %d and color %d.\n",
306  s->comp_expr_str[comp], comp, color);
307  return AVERROR(EINVAL);
308  }
309 
310  /* compute the lut */
311  s->var_values[VAR_MAXVAL] = max[color];
312  s->var_values[VAR_MINVAL] = min[color];
313 
314  for (val = 0; val < FF_ARRAY_ELEMS(s->lut[comp]); val++) {
315  s->var_values[VAR_VAL] = val;
316  s->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
317  s->var_values[VAR_NEGVAL] =
318  av_clip(min[color] + max[color] - s->var_values[VAR_VAL],
319  min[color], max[color]);
320 
321  res = av_expr_eval(s->comp_expr[color], s->var_values, s);
322  if (isnan(res)) {
324  "Error when evaluating the expression '%s' for the value %d for the component %d.\n",
325  s->comp_expr_str[color], val, comp);
326  return AVERROR(EINVAL);
327  }
328  s->lut[comp][val] = av_clip((int)res, 0, max[A]);
329  av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, s->lut[comp][val]);
330  }
331  }
332 
333  return 0;
334 }
335 
336 struct thread_data {
339 
340  int w;
341  int h;
342 };
343 
344 #define LOAD_PACKED_COMMON\
345  LutContext *s = ctx->priv;\
346  const struct thread_data *td = arg;\
347 \
348  int i, j;\
349  const int w = td->w;\
350  const int h = td->h;\
351  AVFrame *in = td->in;\
352  AVFrame *out = td->out;\
353  const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut;\
354  const int step = s->step;\
355 \
356  const int slice_start = (h * jobnr ) / nb_jobs;\
357  const int slice_end = (h * (jobnr+1)) / nb_jobs;\
358 
359 /* packed, 16-bit */
360 static int lut_packed_16bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
361 {
363 
364  uint16_t *inrow, *outrow, *inrow0, *outrow0;
365  const int in_linesize = in->linesize[0] / 2;
366  const int out_linesize = out->linesize[0] / 2;
367  inrow0 = (uint16_t *)in ->data[0];
368  outrow0 = (uint16_t *)out->data[0];
369 
370  for (i = slice_start; i < slice_end; i++) {
371  inrow = inrow0 + i * in_linesize;
372  outrow = outrow0 + i * out_linesize;
373  for (j = 0; j < w; j++) {
374 
375  switch (step) {
376 #if HAVE_BIGENDIAN
377  case 4: outrow[3] = av_bswap16(tab[3][av_bswap16(inrow[3])]); // Fall-through
378  case 3: outrow[2] = av_bswap16(tab[2][av_bswap16(inrow[2])]); // Fall-through
379  case 2: outrow[1] = av_bswap16(tab[1][av_bswap16(inrow[1])]); // Fall-through
380  default: outrow[0] = av_bswap16(tab[0][av_bswap16(inrow[0])]);
381 #else
382  case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
383  case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
384  case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
385  default: outrow[0] = tab[0][inrow[0]];
386 #endif
387  }
388  outrow += step;
389  inrow += step;
390  }
391  }
392 
393  return 0;
394 }
395 
396 /* packed, 8-bit */
397 static int lut_packed_8bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
398 {
400 
401  uint8_t *inrow, *outrow, *inrow0, *outrow0;
402  const int in_linesize = in->linesize[0];
403  const int out_linesize = out->linesize[0];
404  inrow0 = in ->data[0];
405  outrow0 = out->data[0];
406 
407  for (i = slice_start; i < slice_end; i++) {
408  inrow = inrow0 + i * in_linesize;
409  outrow = outrow0 + i * out_linesize;
410  for (j = 0; j < w; j++) {
411  switch (step) {
412  case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
413  case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
414  case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
415  default: outrow[0] = tab[0][inrow[0]];
416  }
417  outrow += step;
418  inrow += step;
419  }
420  }
421 
422  return 0;
423 }
424 
425 #define LOAD_PLANAR_COMMON\
426  LutContext *s = ctx->priv;\
427  const struct thread_data *td = arg;\
428  int i, j, plane;\
429  AVFrame *in = td->in;\
430  AVFrame *out = td->out;\
431 
432 #define PLANAR_COMMON\
433  int vsub = plane == 1 || plane == 2 ? s->vsub : 0;\
434  int hsub = plane == 1 || plane == 2 ? s->hsub : 0;\
435  int h = AV_CEIL_RSHIFT(td->h, vsub);\
436  int w = AV_CEIL_RSHIFT(td->w, hsub);\
437  const uint16_t *tab = s->lut[plane];\
438 \
439  const int slice_start = (h * jobnr ) / nb_jobs;\
440  const int slice_end = (h * (jobnr+1)) / nb_jobs;\
441 
442 /* planar >8 bit depth */
443 static int lut_planar_16bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
444 {
446 
447  uint16_t *inrow, *outrow;
448 
449  for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
451 
452  const int in_linesize = in->linesize[plane] / 2;
453  const int out_linesize = out->linesize[plane] / 2;
454 
455  inrow = (uint16_t *)in ->data[plane] + slice_start * in_linesize;
456  outrow = (uint16_t *)out->data[plane] + slice_start * out_linesize;
457 
458  for (i = slice_start; i < slice_end; i++) {
459  for (j = 0; j < w; j++) {
460 #if HAVE_BIGENDIAN
461  outrow[j] = av_bswap16(tab[av_bswap16(inrow[j])]);
462 #else
463  outrow[j] = tab[inrow[j]];
464 #endif
465  }
466  inrow += in_linesize;
467  outrow += out_linesize;
468  }
469  }
470 
471  return 0;
472 }
473 
474 /* planar 8bit depth */
475 static int lut_planar_8bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
476 {
478 
479  uint8_t *inrow, *outrow;
480 
481  for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
483 
484  const int in_linesize = in->linesize[plane];
485  const int out_linesize = out->linesize[plane];
486 
487  inrow = in ->data[plane] + slice_start * in_linesize;
488  outrow = out->data[plane] + slice_start * out_linesize;
489 
490  for (i = slice_start; i < slice_end; i++) {
491  for (j = 0; j < w; j++)
492  outrow[j] = tab[inrow[j]];
493  inrow += in_linesize;
494  outrow += out_linesize;
495  }
496  }
497 
498  return 0;
499 }
500 
501 #define PACKED_THREAD_DATA\
502  struct thread_data td = {\
503  .in = in,\
504  .out = out,\
505  .w = inlink->w,\
506  .h = in->height,\
507  };\
508 
509 #define PLANAR_THREAD_DATA\
510  struct thread_data td = {\
511  .in = in,\
512  .out = out,\
513  .w = inlink->w,\
514  .h = inlink->h,\
515  };\
516 
518 {
519  AVFilterContext *ctx = inlink->dst;
520  LutContext *s = ctx->priv;
521  AVFilterLink *outlink = ctx->outputs[0];
522  AVFrame *out;
523  int direct = 0;
524 
525  if (av_frame_is_writable(in)) {
526  direct = 1;
527  out = in;
528  } else {
529  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
530  if (!out) {
531  av_frame_free(&in);
532  return AVERROR(ENOMEM);
533  }
535  }
536 
537  if (s->is_rgb && s->is_16bit && !s->is_planar) {
538  /* packed, 16-bit */
542  } else if (s->is_rgb && !s->is_planar) {
543  /* packed 8 bits */
547  } else if (s->is_16bit) {
548  /* planar >8 bit depth */
552  } else {
553  /* planar 8bit depth */
557  }
558 
559  if (!direct)
560  av_frame_free(&in);
561 
562  return ff_filter_frame(outlink, out);
563 }
564 
565 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
566  char *res, int res_len, int flags)
567 {
568  int ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
569 
570  if (ret < 0)
571  return ret;
572 
573  return config_props(ctx->inputs[0]);
574 }
575 
576 static const AVFilterPad inputs[] = {
577  { .name = "default",
578  .type = AVMEDIA_TYPE_VIDEO,
579  .filter_frame = filter_frame,
580  .config_props = config_props,
581  },
582 };
583 static const AVFilterPad outputs[] = {
584  { .name = "default",
585  .type = AVMEDIA_TYPE_VIDEO,
586  },
587 };
588 
589 #define DEFINE_LUT_FILTER(name_, description_, priv_class_) \
590  const AVFilter ff_vf_##name_ = { \
591  .name = #name_, \
592  .description = NULL_IF_CONFIG_SMALL(description_), \
593  .priv_class = &priv_class_ ## _class, \
594  .priv_size = sizeof(LutContext), \
595  .init = name_##_init, \
596  .uninit = uninit, \
597  FILTER_INPUTS(inputs), \
598  FILTER_OUTPUTS(outputs), \
599  FILTER_QUERY_FUNC(query_formats), \
600  .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | \
601  AVFILTER_FLAG_SLICE_THREADS, \
602  .process_command = process_command, \
603  }
604 
605 AVFILTER_DEFINE_CLASS_EXT(lut, "lut/lutyuv/lutrgb", options);
606 
607 #if CONFIG_LUT_FILTER
608 
609 #define lut_init NULL
610 DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.",
611  lut);
612 #undef lut_init
613 #endif
614 
615 #if CONFIG_LUTYUV_FILTER
616 
617 static av_cold int lutyuv_init(AVFilterContext *ctx)
618 {
619  LutContext *s = ctx->priv;
620 
621  s->is_yuv = 1;
622 
623  return 0;
624 }
625 
626 DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.",
627  lut);
628 #endif
629 
630 #if CONFIG_LUTRGB_FILTER
631 
632 static av_cold int lutrgb_init(AVFilterContext *ctx)
633 {
634  LutContext *s = ctx->priv;
635 
636  s->is_rgb = 1;
637 
638  return 0;
639 }
640 
641 DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.",
642  lut);
643 #endif
AVFILTER_DEFINE_CLASS_EXT
AVFILTER_DEFINE_CLASS_EXT(lut, "lut/lutyuv/lutrgb", options)
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
AV_PIX_FMT_YUV420P9LE
@ AV_PIX_FMT_YUV420P9LE
planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:147
A
#define A
Definition: vf_lut.c:80
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:61
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
G
#define G
Definition: vf_lut.c:78
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
LutContext::is_yuv
int is_yuv
Definition: vf_lut.c:68
compute_gammaval709
static double compute_gammaval709(void *opaque, double gamma)
Compute ITU Rec.709 gamma correction of value val.
Definition: vf_lut.c:184
GRAY_FORMATS
#define GRAY_FORMATS
Definition: vf_lut.c:137
PLANAR_COMMON
#define PLANAR_COMMON
Definition: vf_lut.c:432
out
FILE * out
Definition: movenc.c:54
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_lut.c:517
color
Definition: vf_paletteuse.c:599
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:85
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
AV_PIX_FMT_YUV422P14LE
@ AV_PIX_FMT_YUV422P14LE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:239
options
static const AVOption options[]
Definition: vf_lut.c:85
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
VAR_W
@ VAR_W
Definition: vf_lut.c:51
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:247
B
#define B
Definition: vf_lut.c:79
data
const char data[16]
Definition: mxf.c:143
LutContext::comp_expr_str
char * comp_expr_str[4]
Definition: vf_lut.c:64
AV_PIX_FMT_YUV420P16LE
@ AV_PIX_FMT_YUV420P16LE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:121
av_get_bits_per_pixel
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2612
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
thread_data::w
int w
Definition: vf_lut.c:340
video.h
AV_PIX_FMT_YUV444P16LE
@ AV_PIX_FMT_YUV444P16LE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:125
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
yuv_pix_fmts
static enum AVPixelFormat yuv_pix_fmts[]
Definition: vf_lut.c:141
VAR_H
@ VAR_H
Definition: vf_lut.c:52
LutContext::is_16bit
int is_16bit
Definition: vf_lut.c:70
AV_PIX_FMT_YUV420P12LE
@ AV_PIX_FMT_YUV420P12LE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:233
var_names
static const char *const var_names[]
Definition: vf_lut.c:39
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
PACKED_THREAD_DATA
#define PACKED_THREAD_DATA
Definition: vf_lut.c:501
val
static double val(void *priv, double ch)
Definition: aeval.c:76
LOAD_PACKED_COMMON
#define LOAD_PACKED_COMMON
Definition: vf_lut.c:344
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
funcs1_names
static const char *const funcs1_names[]
Definition: vf_lut.c:203
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUV420P10LE
@ AV_PIX_FMT_YUV420P10LE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:149
LutContext::lut
uint16_t lut[4][256 *256]
lookup table for each component
Definition: vf_lut.c:63
AV_PIX_FMT_YUV444P12LE
@ AV_PIX_FMT_YUV444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:241
AV_PIX_FMT_YUV444P14LE
@ AV_PIX_FMT_YUV444P14LE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:243
LutContext
Definition: vf_lut.c:61
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
thread_data
Definition: vf_lut.c:336
funcs1
static double(*const funcs1[])(void *, double)
Definition: vf_lut.c:196
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
R
#define R
Definition: vf_lut.c:77
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2042
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:705
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
var_name
var_name
Definition: noise_bsf.c:47
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
all_pix_fmts
static enum AVPixelFormat all_pix_fmts[]
Definition: vf_lut.c:143
AVExpr
Definition: eval.c:157
YUV_FORMATS
#define YUV_FORMATS
Definition: vf_lut.c:112
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PLANAR_THREAD_DATA
#define PLANAR_THREAD_DATA
Definition: vf_lut.c:509
AV_PIX_FMT_YUV444P10LE
@ AV_PIX_FMT_YUV444P10LE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:155
U
#define U
Definition: vf_lut.c:75
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_YUVA422P10LE
@ AV_PIX_FMT_YUVA422P10LE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
Definition: pixfmt.h:177
if
if(ret)
Definition: filter_design.txt:179
AV_PIX_FMT_YUV422P16LE
@ AV_PIX_FMT_YUV422P16LE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:123
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
isnan
#define isnan(x)
Definition: libm.h:340
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:196
AV_PIX_FMT_YUVA444P9LE
@ AV_PIX_FMT_YUVA444P9LE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
Definition: pixfmt.h:173
ff_fmt_is_in
int ff_fmt_is_in(int fmt, const int *fmts)
Tell if an integer is contained in the provided -1-terminated list of integers.
Definition: formats.c:352
AV_PIX_FMT_YUVA420P16LE
@ AV_PIX_FMT_YUVA420P16LE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
Definition: pixfmt.h:181
AV_PIX_FMT_YUV440P10LE
@ AV_PIX_FMT_YUV440P10LE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
Definition: pixfmt.h:265
AV_PIX_FMT_YUVA420P9LE
@ AV_PIX_FMT_YUVA420P9LE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
Definition: pixfmt.h:169
lut_packed_8bits
static int lut_packed_8bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lut.c:397
AV_PIX_FMT_YUV420P14LE
@ AV_PIX_FMT_YUV420P14LE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:235
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
thread_data::h
int h
Definition: vf_lut.c:341
eval.h
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_lut.c:565
AV_PIX_FMT_YUV440P12LE
@ AV_PIX_FMT_YUV440P12LE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
Definition: pixfmt.h:267
OFFSET
#define OFFSET(x)
Definition: vf_lut.c:82
lut_planar_8bits
static int lut_planar_8bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lut.c:475
LutContext::var_values
double var_values[VAR_VARS_NB]
Definition: vf_lut.c:67
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:151
LutContext::hsub
int hsub
Definition: vf_lut.c:66
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
VAR_MAXVAL
@ VAR_MAXVAL
Definition: vf_lut.c:54
VAR_VAL
@ VAR_VAL
Definition: vf_lut.c:53
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
LutContext::comp_expr
AVExpr * comp_expr[4]
Definition: vf_lut.c:65
LutContext::step
int step
Definition: vf_lut.c:71
LOAD_PLANAR_COMMON
#define LOAD_PLANAR_COMMON
Definition: vf_lut.c:425
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
lut_packed_16bits
static int lut_packed_16bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lut.c:360
av_bswap16
#define av_bswap16
Definition: bswap.h:31
attributes.h
rgb_pix_fmts
static enum AVPixelFormat rgb_pix_fmts[]
Definition: vf_lut.c:142
AV_PIX_FMT_YUVA420P10LE
@ AV_PIX_FMT_YUVA420P10LE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
Definition: pixfmt.h:175
internal.h
VAR_CLIPVAL
@ VAR_CLIPVAL
Definition: vf_lut.c:57
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:803
outputs
static const AVFilterPad outputs[]
Definition: vf_lut.c:583
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
inputs
static const AVFilterPad inputs[]
Definition: vf_lut.c:576
ret
ret
Definition: filter_design.txt:187
bswap.h
LutContext::is_rgb
int is_rgb
Definition: vf_lut.c:68
VAR_NEGVAL
@ VAR_NEGVAL
Definition: vf_lut.c:56
AVFrame::height
int height
Definition: frame.h:389
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
LutContext::vsub
int vsub
Definition: vf_lut.c:66
avfilter.h
AV_PIX_FMT_YUV444P9LE
@ AV_PIX_FMT_YUV444P9LE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:153
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_lut.c:145
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
V
#define V
Definition: vf_lut.c:76
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
VAR_MINVAL
@ VAR_MINVAL
Definition: vf_lut.c:55
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AV_PIX_FMT_YUVA444P10LE
@ AV_PIX_FMT_YUVA444P10LE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
Definition: pixfmt.h:179
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:34
LutContext::is_planar
int is_planar
Definition: vf_lut.c:69
thread_data::in
AVFrame * in
Definition: vf_lut.c:337
AV_PIX_FMT_YUV422P9LE
@ AV_PIX_FMT_YUV422P9LE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:157
AV_PIX_FMT_YUVA422P16LE
@ AV_PIX_FMT_YUVA422P16LE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
Definition: pixfmt.h:183
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
thread_data::out
AVFrame * out
Definition: vf_lut.c:338
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_lut.c:210
AV_PIX_FMT_YUVA444P16LE
@ AV_PIX_FMT_YUVA444P16LE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
Definition: pixfmt.h:185
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
drawutils.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
lut_planar_16bits
static int lut_planar_16bits(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lut.c:443
RGB_FORMATS
#define RGB_FORMATS
Definition: vf_lut.c:125
AV_PIX_FMT_YUV422P12LE
@ AV_PIX_FMT_YUV422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:237
FLAGS
#define FLAGS
Definition: vf_lut.c:83
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_lut.c:100
clip
static double clip(void *opaque, double val)
Clip value val in the minval - maxval range.
Definition: vf_lut.c:158
compute_gammaval
static double compute_gammaval(void *opaque, double gamma)
Compute gamma correction for value val, assuming the minval-maxval range, val is clipped to a value c...
Definition: vf_lut.c:171
DEFINE_LUT_FILTER
#define DEFINE_LUT_FILTER(name_, description_, priv_class_)
Definition: vf_lut.c:589
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_lut.c:58
Y
#define Y
Definition: vf_lut.c:74
min
float min
Definition: vorbis_enc_data.h:429
AV_PIX_FMT_YUVA422P9LE
@ AV_PIX_FMT_YUVA422P9LE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
Definition: pixfmt.h:171