FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_geq.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (C) 2012 Clément Bœsch <u pkh me>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Generic equation change filter
25  * Originally written by Michael Niedermayer for the MPlayer project, and
26  * ported by Clément Bœsch for FFmpeg.
27  */
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "internal.h"
35 
36 static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
38 
39 typedef struct GEQContext {
40  const AVClass *class;
41  AVExpr *e[4]; ///< expressions for each plane
42  char *expr_str[4+3]; ///< expression strings for each plane
43  AVFrame *picref; ///< current input buffer
44  uint8_t *dst; ///< reference pointer to the 8bits output
45  uint16_t *dst16; ///< reference pointer to the 16bits output
46  double values[VAR_VARS_NB]; ///< expression values
47  int hsub, vsub; ///< chroma subsampling
48  int planes; ///< number of planes
49  int is_rgb;
50  int bps;
51 } GEQContext;
52 
53 enum { Y = 0, U, V, A, G, B, R };
54 
55 #define OFFSET(x) offsetof(GEQContext, x)
56 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
57 
58 static const AVOption geq_options[] = {
59  { "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
60  { "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
61  { "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
62  { "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
63  { "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
64  { "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
65  { "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
66  { "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
67  { "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
68  { "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
69  { "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
70  { "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
71  { "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
72  { "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
73  {NULL},
74 };
75 
77 
78 static inline double getpix(void *priv, double x, double y, int plane)
79 {
80  int xi, yi;
81  GEQContext *geq = priv;
82  AVFrame *picref = geq->picref;
83  const uint8_t *src = picref->data[plane];
84  int linesize = picref->linesize[plane];
85  const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
86  const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
87 
88  if (!src)
89  return 0;
90 
91  xi = x = av_clipf(x, 0, w - 2);
92  yi = y = av_clipf(y, 0, h - 2);
93 
94  x -= xi;
95  y -= yi;
96 
97  if (geq->bps > 8) {
98  const uint16_t *src16 = (const uint16_t*)src;
99  linesize /= 2;
100 
101  return (1-y)*((1-x)*src16[xi + yi * linesize] + x*src16[xi + 1 + yi * linesize])
102  + y *((1-x)*src16[xi + (yi+1) * linesize] + x*src16[xi + 1 + (yi+1) * linesize]);
103  } else {
104  return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
105  + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
106  }
107 }
108 
109 //TODO: cubic interpolate
110 //TODO: keep the last few frames
111 static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
112 static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
113 static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
114 static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
115 
117 {
118  GEQContext *geq = ctx->priv;
119  int plane, ret = 0;
120 
121  if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
122  av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
123  ret = AVERROR(EINVAL);
124  goto end;
125  }
126  geq->is_rgb = !geq->expr_str[Y];
127 
128  if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
129  av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
130  ret = AVERROR(EINVAL);
131  goto end;
132  }
133 
134  if (!geq->expr_str[U] && !geq->expr_str[V]) {
135  /* No chroma at all: fallback on luma */
136  geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
137  geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
138  } else {
139  /* One chroma unspecified, fallback on the other */
140  if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
141  if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
142  }
143 
144  if (!geq->expr_str[A]) {
145  char bps_string[8];
146  snprintf(bps_string, sizeof(bps_string), "%d", (1<<geq->bps) - 1);
147  geq->expr_str[A] = av_strdup(bps_string);
148  }
149  if (!geq->expr_str[G])
150  geq->expr_str[G] = av_strdup("g(X,Y)");
151  if (!geq->expr_str[B])
152  geq->expr_str[B] = av_strdup("b(X,Y)");
153  if (!geq->expr_str[R])
154  geq->expr_str[R] = av_strdup("r(X,Y)");
155 
156  if (geq->is_rgb ?
157  (!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
158  :
159  (!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
160  ret = AVERROR(ENOMEM);
161  goto end;
162  }
163 
164  for (plane = 0; plane < 4; plane++) {
165  static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
166  static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
167  static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL };
168  const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
169  double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
170 
171  ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
172  NULL, NULL, func2_names, func2, 0, ctx);
173  if (ret < 0)
174  break;
175  }
176 
177 end:
178  return ret;
179 }
180 
182 {
183  GEQContext *geq = ctx->priv;
184  static const enum AVPixelFormat yuv_pix_fmts[] = {
202  };
203  static const enum AVPixelFormat rgb_pix_fmts[] = {
211  };
212  AVFilterFormats *fmts_list;
213 
214  if (geq->is_rgb) {
215  fmts_list = ff_make_format_list(rgb_pix_fmts);
216  } else
217  fmts_list = ff_make_format_list(yuv_pix_fmts);
218  if (!fmts_list)
219  return AVERROR(ENOMEM);
220  return ff_set_common_formats(ctx, fmts_list);
221 }
222 
223 static int geq_config_props(AVFilterLink *inlink)
224 {
225  GEQContext *geq = inlink->dst->priv;
227 
228  av_assert0(desc);
229 
230  geq->hsub = desc->log2_chroma_w;
231  geq->vsub = desc->log2_chroma_h;
232  geq->bps = desc->comp[0].depth;
233  geq->planes = desc->nb_components;
234  return 0;
235 }
236 
237 typedef struct ThreadData {
238  int height;
239  int width;
240  int plane;
241  int linesize;
242 } ThreadData;
243 
244 static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
245 {
246  GEQContext *geq = ctx->priv;
247  ThreadData *td = arg;
248  const int height = td->height;
249  const int width = td->width;
250  const int plane = td->plane;
251  const int linesize = td->linesize;
252  const int slice_start = (height * jobnr) / nb_jobs;
253  const int slice_end = (height * (jobnr+1)) / nb_jobs;
254  int x, y;
255  uint8_t *ptr;
256  uint16_t *ptr16;
257 
258  double values[VAR_VARS_NB];
259  values[VAR_W] = geq->values[VAR_W];
260  values[VAR_H] = geq->values[VAR_H];
261  values[VAR_N] = geq->values[VAR_N];
262  values[VAR_SW] = geq->values[VAR_SW];
263  values[VAR_SH] = geq->values[VAR_SH];
264  values[VAR_T] = geq->values[VAR_T];
265 
266  if (geq->bps == 8) {
267  for (y = slice_start; y < slice_end; y++) {
268  ptr = geq->dst + linesize * y;
269  values[VAR_Y] = y;
270 
271  for (x = 0; x < width; x++) {
272  values[VAR_X] = x;
273  ptr[x] = av_expr_eval(geq->e[plane], values, geq);
274  }
275  ptr += linesize;
276  }
277  }
278  else {
279  for (y = slice_start; y < slice_end; y++) {
280  ptr16 = geq->dst16 + (linesize/2) * y;
281  values[VAR_Y] = y;
282  for (x = 0; x < width; x++) {
283  values[VAR_X] = x;
284  ptr16[x] = av_expr_eval(geq->e[plane], values, geq);
285  }
286  }
287  }
288 
289  return 0;
290 }
291 
293 {
294  int plane;
295  AVFilterContext *ctx = inlink->dst;
296  const int nb_threads = ff_filter_get_nb_threads(ctx);
297  GEQContext *geq = ctx->priv;
298  AVFilterLink *outlink = inlink->dst->outputs[0];
299  AVFrame *out;
300 
301  geq->values[VAR_N] = inlink->frame_count_out,
302  geq->values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
303 
304  geq->picref = in;
305  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
306  if (!out) {
307  av_frame_free(&in);
308  return AVERROR(ENOMEM);
309  }
310  av_frame_copy_props(out, in);
311 
312  for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
313  const int width = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
314  const int height = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
315  const int linesize = out->linesize[plane];
316  ThreadData td;
317 
318  geq->dst = out->data[plane];
319  geq->dst16 = (uint16_t*)out->data[plane];
320 
321  geq->values[VAR_W] = width;
322  geq->values[VAR_H] = height;
323  geq->values[VAR_SW] = width / (double)inlink->w;
324  geq->values[VAR_SH] = height / (double)inlink->h;
325 
326  td.width = width;
327  td.height = height;
328  td.plane = plane;
329  td.linesize = linesize;
330 
331  ctx->internal->execute(ctx, slice_geq_filter, &td, NULL, FFMIN(height, nb_threads));
332  }
333 
334  av_frame_free(&geq->picref);
335  return ff_filter_frame(outlink, out);
336 }
337 
339 {
340  int i;
341  GEQContext *geq = ctx->priv;
342 
343  for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
344  av_expr_free(geq->e[i]);
345 }
346 
347 static const AVFilterPad geq_inputs[] = {
348  {
349  .name = "default",
350  .type = AVMEDIA_TYPE_VIDEO,
351  .config_props = geq_config_props,
352  .filter_frame = geq_filter_frame,
353  },
354  { NULL }
355 };
356 
357 static const AVFilterPad geq_outputs[] = {
358  {
359  .name = "default",
360  .type = AVMEDIA_TYPE_VIDEO,
361  },
362  { NULL }
363 };
364 
366  .name = "geq",
367  .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
368  .priv_size = sizeof(GEQContext),
369  .init = geq_init,
370  .uninit = geq_uninit,
372  .inputs = geq_inputs,
373  .outputs = geq_outputs,
374  .priv_class = &geq_class,
376 };
int plane
Definition: avisynth_c.h:422
#define NULL
Definition: coverity.c:32
static const AVFilterPad geq_inputs[]
Definition: vf_geq.c:347
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:420
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:381
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:414
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2446
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:416
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:389
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:399
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:417
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
const char * desc
Definition: nvenc.c:65
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static av_cold void geq_uninit(AVFilterContext *ctx)
Definition: vf_geq.c:338
static double getpix(void *priv, double x, double y, int plane)
Definition: vf_geq.c:78
Definition: vf_geq.c:53
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
static const char *const var_names[]
Definition: vf_geq.c:36
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:395
static const AVFilterPad geq_outputs[]
Definition: vf_geq.c:357
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:359
Definition: vf_geq.c:37
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:383
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
Definition: vf_geq.c:37
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
Definition: vf_geq.c:53
static enum AVPixelFormat rgb_pix_fmts[]
Definition: jpeg2000dec.c:247
AVFrame * picref
current input buffer
Definition: vf_geq.c:43
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:360
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:361
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int geq_query_formats(AVFilterContext *ctx)
Definition: vf_geq.c:181
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:112
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
AVFilter ff_vf_geq
Definition: vf_geq.c:365
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Definition: vf_geq.c:37
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:319
Definition: eval.c:157
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:413
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:394
int height
Definition: vf_avgblur.c:61
#define height
int plane
Definition: vf_blend.c:57
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int linesize
Definition: vf_avgblur.c:64
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:384
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:419
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:114
int width
Definition: frame.h:284
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
int planes
number of planes
Definition: vf_geq.c:48
#define td
Definition: regdef.h:70
AVExpr * e[4]
expressions for each plane
Definition: vf_geq.c:41
static int geq_config_props(AVFilterLink *inlink)
Definition: vf_geq.c:223
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define OFFSET(x)
Definition: vf_geq.c:55
double values[VAR_VARS_NB]
expression values
Definition: vf_geq.c:46
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:421
const char * arg
Definition: jacosubdec.c:66
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:400
simple assert() macros that are a bit more flexible than ISO C assert().
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:382
static const AVOption geq_options[]
Definition: vf_geq.c:58
Definition: vf_geq.c:37
Definition: vf_geq.c:53
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:401
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:377
Definition: vf_geq.c:53
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:398
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:363
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:96
#define width
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
Definition: vf_geq.c:37
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:418
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
Definition: vf_geq.c:53
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:378
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:397
Definition: vf_geq.c:37
int bps
Definition: vf_geq.c:50
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:390
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:387
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
static enum AVPixelFormat yuv_pix_fmts[]
Definition: jpeg2000dec.c:249
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
Used for passing data between threads.
Definition: af_adeclick.c:484
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:257
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:362
Definition: vf_geq.c:53
char * expr_str[4+3]
expression strings for each plane
Definition: vf_geq.c:42
int hsub
Definition: vf_geq.c:47
#define FLAGS
Definition: vf_geq.c:56
static av_cold int geq_init(AVFilterContext *ctx)
Definition: vf_geq.c:116
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
Definition: vf_geq.c:53
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:379
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
uint8_t * dst
reference pointer to the 8bits output
Definition: vf_geq.c:44
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:376
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:388
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:396
#define flags(name, subs,...)
Definition: cbs_av1.c:596
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:380
uint16_t * dst16
reference pointer to the 16bits output
Definition: vf_geq.c:45
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:386
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:240
static double lum(void *priv, double x, double y)
Definition: vf_geq.c:111
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:415
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
avfilter_execute_func * execute
Definition: internal.h:155
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2029
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
int is_rgb
Definition: vf_geq.c:49
Definition: vf_geq.c:37
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_geq.c:292
A list of supported formats for one end of a filter link.
Definition: formats.h:64
Definition: vf_geq.c:37
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:284
FILE * out
Definition: movenc.c:54
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_geq.c:244
internal API functions
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:113
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int vsub
chroma subsampling
Definition: vf_geq.c:47
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:391
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AVFILTER_DEFINE_CLASS(geq)