FFmpeg
vf_eq.c
Go to the documentation of this file.
1 /*
2  * Original MPlayer filters by Richard Felker, Hampa Hug, Daniel Moreno,
3  * and Michael Niedermeyer.
4  *
5  * Copyright (c) 2014 James Darnley <james.darnley@gmail.com>
6  * Copyright (c) 2015 Arwa Arif <arwaarif1994@gmail.com>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License along
21  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
22  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23  */
24 
25 /**
26  * @file
27  * very simple video equalizer
28  */
29 
30 #include "libavfilter/internal.h"
31 #include "libavutil/common.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "vf_eq.h"
36 
37 static void create_lut(EQParameters *param)
38 {
39  int i;
40  double g = 1.0 / param->gamma;
41  double lw = 1.0 - param->gamma_weight;
42 
43  for (i = 0; i < 256; i++) {
44  double v = i / 255.0;
45  v = param->contrast * (v - 0.5) + 0.5 + param->brightness;
46 
47  if (v <= 0.0) {
48  param->lut[i] = 0;
49  } else {
50  v = v * lw + pow(v, g) * param->gamma_weight;
51 
52  if (v >= 1.0)
53  param->lut[i] = 255;
54  else
55  param->lut[i] = 256.0 * v;
56  }
57  }
58 
59  param->lut_clean = 1;
60 }
61 
62 static void apply_lut(EQParameters *param, uint8_t *dst, int dst_stride,
63  const uint8_t *src, int src_stride, int w, int h)
64 {
65  int x, y;
66 
67  if (!param->lut_clean)
68  create_lut(param);
69 
70  for (y = 0; y < h; y++) {
71  for (x = 0; x < w; x++) {
72  dst[y * dst_stride + x] = param->lut[src[y * src_stride + x]];
73  }
74  }
75 }
76 
77 static void process_c(EQParameters *param, uint8_t *dst, int dst_stride,
78  const uint8_t *src, int src_stride, int w, int h)
79 {
80  int x, y, pel;
81 
82  int contrast = (int) (param->contrast * 256 * 16);
83  int brightness = ((int) (100.0 * param->brightness + 100.0) * 511) / 200 - 128 - contrast / 32;
84 
85  for (y = 0; y < h; y++) {
86  for (x = 0; x < w; x++) {
87  pel = ((src[y * src_stride + x] * contrast) >> 12) + brightness;
88 
89  if (pel & ~255)
90  pel = (-pel) >> 31;
91 
92  dst[y * dst_stride + x] = pel;
93  }
94  }
95 }
96 
97 static void check_values(EQParameters *param, EQContext *eq)
98 {
99  if (param->contrast == 1.0 && param->brightness == 0.0 && param->gamma == 1.0)
100  param->adjust = NULL;
101  else if (param->gamma == 1.0 && fabs(param->contrast) < 7.9)
102  param->adjust = eq->process;
103  else
104  param->adjust = apply_lut;
105 }
106 
107 static void set_contrast(EQContext *eq)
108 {
109  eq->contrast = av_clipf(av_expr_eval(eq->contrast_pexpr, eq->var_values, eq), -1000.0, 1000.0);
110  eq->param[0].contrast = eq->contrast;
111  eq->param[0].lut_clean = 0;
112  check_values(&eq->param[0], eq);
113 }
114 
116 {
117  eq->brightness = av_clipf(av_expr_eval(eq->brightness_pexpr, eq->var_values, eq), -1.0, 1.0);
118  eq->param[0].brightness = eq->brightness;
119  eq->param[0].lut_clean = 0;
120  check_values(&eq->param[0], eq);
121 }
122 
123 static void set_gamma(EQContext *eq)
124 {
125  int i;
126 
127  eq->gamma = av_clipf(av_expr_eval(eq->gamma_pexpr, eq->var_values, eq), 0.1, 10.0);
128  eq->gamma_r = av_clipf(av_expr_eval(eq->gamma_r_pexpr, eq->var_values, eq), 0.1, 10.0);
129  eq->gamma_g = av_clipf(av_expr_eval(eq->gamma_g_pexpr, eq->var_values, eq), 0.1, 10.0);
130  eq->gamma_b = av_clipf(av_expr_eval(eq->gamma_b_pexpr, eq->var_values, eq), 0.1, 10.0);
131  eq->gamma_weight = av_clipf(av_expr_eval(eq->gamma_weight_pexpr, eq->var_values, eq), 0.0, 1.0);
132 
133  eq->param[0].gamma = eq->gamma * eq->gamma_g;
134  eq->param[1].gamma = sqrt(eq->gamma_b / eq->gamma_g);
135  eq->param[2].gamma = sqrt(eq->gamma_r / eq->gamma_g);
136 
137  for (i = 0; i < 3; i++) {
138  eq->param[i].gamma_weight = eq->gamma_weight;
139  eq->param[i].lut_clean = 0;
140  check_values(&eq->param[i], eq);
141  }
142 }
143 
145 {
146  int i;
147 
148  eq->saturation = av_clipf(av_expr_eval(eq->saturation_pexpr, eq->var_values, eq), 0.0, 3.0);
149 
150  for (i = 1; i < 3; i++) {
151  eq->param[i].contrast = eq->saturation;
152  eq->param[i].lut_clean = 0;
153  check_values(&eq->param[i], eq);
154  }
155 }
156 
157 static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
158 {
159  int ret;
160  AVExpr *old = NULL;
161 
162  if (*pexpr)
163  old = *pexpr;
164  ret = av_expr_parse(pexpr, expr, var_names, NULL, NULL, NULL, NULL, 0, log_ctx);
165  if (ret < 0) {
166  av_log(log_ctx, AV_LOG_ERROR,
167  "Error when parsing the expression '%s' for %s\n",
168  expr, option);
169  *pexpr = old;
170  return ret;
171  }
172 
173  av_expr_free(old);
174  return 0;
175 }
176 
178 {
179  EQContext *eq = ctx->priv;
180  int ret;
181 
182  eq->process = process_c;
183 
184  if ((ret = set_expr(&eq->contrast_pexpr, eq->contrast_expr, "contrast", ctx)) < 0 ||
185  (ret = set_expr(&eq->brightness_pexpr, eq->brightness_expr, "brightness", ctx)) < 0 ||
186  (ret = set_expr(&eq->saturation_pexpr, eq->saturation_expr, "saturation", ctx)) < 0 ||
187  (ret = set_expr(&eq->gamma_pexpr, eq->gamma_expr, "gamma", ctx)) < 0 ||
188  (ret = set_expr(&eq->gamma_r_pexpr, eq->gamma_r_expr, "gamma_r", ctx)) < 0 ||
189  (ret = set_expr(&eq->gamma_g_pexpr, eq->gamma_g_expr, "gamma_g", ctx)) < 0 ||
190  (ret = set_expr(&eq->gamma_b_pexpr, eq->gamma_b_expr, "gamma_b", ctx)) < 0 ||
191  (ret = set_expr(&eq->gamma_weight_pexpr, eq->gamma_weight_expr, "gamma_weight", ctx)) < 0 )
192  return ret;
193 
194  if (ARCH_X86)
195  ff_eq_init_x86(eq);
196 
197  if (eq->eval_mode == EVAL_MODE_INIT) {
198  set_gamma(eq);
199  set_contrast(eq);
200  set_brightness(eq);
201  set_saturation(eq);
202  }
203 
204  return 0;
205 }
206 
208 {
209  EQContext *eq = ctx->priv;
210 
219 }
220 
222 {
223  EQContext *eq = inlink->dst->priv;
224 
225  eq->var_values[VAR_N] = 0;
226  eq->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
227  NAN : av_q2d(inlink->frame_rate);
228 
229  return 0;
230 }
231 
233 {
234  static const enum AVPixelFormat pixel_fmts_eq[] = {
242  };
243  AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_eq);
244  if (!fmts_list)
245  return AVERROR(ENOMEM);
246  return ff_set_common_formats(ctx, fmts_list);
247 }
248 
249 #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
250 
252 {
253  AVFilterContext *ctx = inlink->dst;
254  AVFilterLink *outlink = inlink->dst->outputs[0];
255  EQContext *eq = ctx->priv;
256  AVFrame *out;
257  int64_t pos = in->pkt_pos;
258  const AVPixFmtDescriptor *desc;
259  int i;
260 
261  out = ff_get_video_buffer(outlink, inlink->w, inlink->h);
262  if (!out) {
263  av_frame_free(&in);
264  return AVERROR(ENOMEM);
265  }
266 
267  av_frame_copy_props(out, in);
268  desc = av_pix_fmt_desc_get(inlink->format);
269 
270  eq->var_values[VAR_N] = inlink->frame_count_out;
271  eq->var_values[VAR_POS] = pos == -1 ? NAN : pos;
272  eq->var_values[VAR_T] = TS2T(in->pts, inlink->time_base);
273 
274  if (eq->eval_mode == EVAL_MODE_FRAME) {
275  set_gamma(eq);
276  set_contrast(eq);
277  set_brightness(eq);
278  set_saturation(eq);
279  }
280 
281  for (i = 0; i < desc->nb_components; i++) {
282  int w = inlink->w;
283  int h = inlink->h;
284 
285  if (i == 1 || i == 2) {
286  w = AV_CEIL_RSHIFT(w, desc->log2_chroma_w);
287  h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
288  }
289 
290  if (eq->param[i].adjust)
291  eq->param[i].adjust(&eq->param[i], out->data[i], out->linesize[i],
292  in->data[i], in->linesize[i], w, h);
293  else
294  av_image_copy_plane(out->data[i], out->linesize[i],
295  in->data[i], in->linesize[i], w, h);
296  }
297 
298  av_frame_free(&in);
299  return ff_filter_frame(outlink, out);
300 }
301 
302 static inline int set_param(AVExpr **pexpr, const char *args, const char *cmd,
303  void (*set_fn)(EQContext *eq), AVFilterContext *ctx)
304 {
305  EQContext *eq = ctx->priv;
306  int ret;
307  if ((ret = set_expr(pexpr, args, cmd, ctx)) < 0)
308  return ret;
309  if (eq->eval_mode == EVAL_MODE_INIT)
310  set_fn(eq);
311  return 0;
312 }
313 
314 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
315  char *res, int res_len, int flags)
316 {
317  EQContext *eq = ctx->priv;
318 
319 #define SET_PARAM(param_name, set_fn_name) \
320  if (!strcmp(cmd, #param_name)) return set_param(&eq->param_name##_pexpr, args, cmd, set_##set_fn_name, ctx);
321 
322  SET_PARAM(contrast, contrast)
323  else SET_PARAM(brightness, brightness)
324  else SET_PARAM(saturation, saturation)
325  else SET_PARAM(gamma, gamma)
326  else SET_PARAM(gamma_r, gamma)
327  else SET_PARAM(gamma_g, gamma)
328  else SET_PARAM(gamma_b, gamma)
329  else SET_PARAM(gamma_weight, gamma)
330  else return AVERROR(ENOSYS);
331 }
332 
333 static const AVFilterPad eq_inputs[] = {
334  {
335  .name = "default",
336  .type = AVMEDIA_TYPE_VIDEO,
337  .filter_frame = filter_frame,
338  .config_props = config_props,
339  },
340  { NULL }
341 };
342 
343 static const AVFilterPad eq_outputs[] = {
344  {
345  .name = "default",
346  .type = AVMEDIA_TYPE_VIDEO,
347  },
348  { NULL }
349 };
350 
351 #define OFFSET(x) offsetof(EQContext, x)
352 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
353 
354 static const AVOption eq_options[] = {
355  { "contrast", "set the contrast adjustment, negative values give a negative image",
356  OFFSET(contrast_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
357  { "brightness", "set the brightness adjustment",
358  OFFSET(brightness_expr), AV_OPT_TYPE_STRING, {.str = "0.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
359  { "saturation", "set the saturation adjustment",
360  OFFSET(saturation_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
361  { "gamma", "set the initial gamma value",
362  OFFSET(gamma_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
363  { "gamma_r", "gamma value for red",
364  OFFSET(gamma_r_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
365  { "gamma_g", "gamma value for green",
366  OFFSET(gamma_g_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
367  { "gamma_b", "gamma value for blue",
368  OFFSET(gamma_b_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
369  { "gamma_weight", "set the gamma weight which reduces the effect of gamma on bright areas",
370  OFFSET(gamma_weight_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS },
371  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
372  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
373  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
374  { NULL }
375 };
376 
378 
380  .name = "eq",
381  .description = NULL_IF_CONFIG_SMALL("Adjust brightness, contrast, gamma, and saturation."),
382  .priv_size = sizeof(EQContext),
383  .priv_class = &eq_class,
384  .inputs = eq_inputs,
385  .outputs = eq_outputs,
388  .init = initialize,
389  .uninit = uninit,
391 };
char * gamma_b_expr
Definition: vf_eq.h:91
#define NULL
Definition: coverity.c:32
char * gamma_weight_expr
Definition: vf_eq.h:79
AVExpr * gamma_weight_pexpr
Definition: vf_eq.h:80
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
char * gamma_g_expr
Definition: vf_eq.h:87
AVOption.
Definition: opt.h:246
static void set_contrast(EQContext *eq)
Definition: vf_eq.c:107
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:566
Definition: aeval.c:48
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
const char * g
Definition: vf_curves.c:115
const char * desc
Definition: nvenc.c:68
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int config_props(AVFilterLink *inlink)
Definition: vf_eq.c:221
int num
Numerator.
Definition: rational.h:59
static void check_values(EQParameters *param, EQContext *eq)
Definition: vf_eq.c:97
static const AVOption eq_options[]
Definition: vf_eq.c:354
double contrast
Definition: vf_eq.h:53
static void create_lut(EQParameters *param)
Definition: vf_eq.c:37
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
static void uninit(AVFilterContext *ctx)
Definition: vf_eq.c:207
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
void(* adjust)(struct EQParameters *eq, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: vf_eq.h:48
double var_values[VAR_NB]
Definition: vf_eq.h:95
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVExpr * brightness_pexpr
Definition: vf_eq.h:68
AVExpr * contrast_pexpr
Definition: vf_eq.h:64
uint8_t
AVOptions.
uint8_t lut[256]
Definition: vf_eq.h:51
double gamma_b
Definition: vf_eq.h:93
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
Definition: eval.c:157
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
char * contrast_expr
Definition: vf_eq.h:63
av_cold void ff_eq_init_x86(EQContext *eq)
Definition: vf_eq.c:87
AVFilter ff_vf_eq
Definition: vf_eq.c:379
static const char *const var_names[]
Definition: aeval.c:36
AVExpr * gamma_g_pexpr
Definition: vf_eq.h:88
#define av_log(a,...)
static const AVFilterPad eq_outputs[]
Definition: vf_eq.c:343
A filter pad used for either input or output.
Definition: internal.h:54
AVExpr * gamma_b_pexpr
Definition: vf_eq.h:92
void(* process)(struct EQParameters *par, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: vf_eq.h:97
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
static int query_formats(AVFilterContext *ctx)
Definition: vf_eq.c:232
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define FLAGS
Definition: vf_eq.c:352
int lut_clean
Definition: vf_eq.h:54
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
enum EQContext::EvalMode eval_mode
double gamma_g
Definition: vf_eq.h:89
static void set_gamma(EQContext *eq)
Definition: vf_eq.c:123
static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
Definition: vf_eq.c:157
char * gamma_r_expr
Definition: vf_eq.h:83
#define eq(A, B)
Definition: vf_xbr.c:91
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
double gamma_weight
Definition: vf_eq.h:53
#define NAN
Definition: mathematics.h:64
#define OFFSET(x)
Definition: vf_eq.c:351
AVExpr * gamma_r_pexpr
Definition: vf_eq.h:84
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
Definition: aeval.c:51
EQParameters param[3]
Definition: vf_eq.h:61
char * brightness_expr
Definition: vf_eq.h:67
double gamma
Definition: vf_eq.h:77
static int initialize(AVFilterContext *ctx)
Definition: vf_eq.c:177
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AVExpr * gamma_pexpr
Definition: vf_eq.h:76
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_eq.c:314
double gamma_weight
Definition: vf_eq.h:81
double brightness
Definition: vf_eq.h:53
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
Definition: vf_eq.h:42
char * gamma_expr
Definition: vf_eq.h:75
static void process_c(EQParameters *param, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: vf_eq.c:77
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define SET_PARAM(param_name, set_fn_name)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
static int set_param(AVExpr **pexpr, const char *args, const char *cmd, void(*set_fn)(EQContext *eq), AVFilterContext *ctx)
Definition: vf_eq.c:302
AVExpr * saturation_pexpr
Definition: vf_eq.h:72
Filter definition.
Definition: avfilter.h:144
option
Definition: libkvazaar.c:291
const char * name
Filter name.
Definition: avfilter.h:148
static void set_saturation(EQContext *eq)
Definition: vf_eq.c:144
double gamma
Definition: vf_eq.h:53
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal and external API header
double brightness
Definition: vf_eq.h:69
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
int den
Denominator.
Definition: rational.h:60
AVFILTER_DEFINE_CLASS(eq)
static void apply_lut(EQParameters *param, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: vf_eq.c:62
#define TS2T(ts, tb)
Definition: vf_eq.c:249
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_eq.c:251
A list of supported formats for one end of a filter link.
Definition: formats.h:64
double saturation
Definition: vf_eq.h:73
An instance of a filter.
Definition: avfilter.h:338
double gamma_r
Definition: vf_eq.h:85
FILE * out
Definition: movenc.c:54
static void set_brightness(EQContext *eq)
Definition: vf_eq.c:115
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static const AVFilterPad eq_inputs[]
Definition: vf_eq.c:333
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
char * saturation_expr
Definition: vf_eq.h:71
double contrast
Definition: vf_eq.h:65
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58