FFmpeg
vf_colorbalance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "libavutil/pixdesc.h"
23 #include "avfilter.h"
24 #include "drawutils.h"
25 #include "formats.h"
26 #include "internal.h"
27 #include "video.h"
28 
29 #define R 0
30 #define G 1
31 #define B 2
32 #define A 3
33 
34 typedef struct ThreadData {
35  AVFrame *in, *out;
36 } ThreadData;
37 
38 typedef struct Range {
39  float shadows;
40  float midtones;
41  float highlights;
42 } Range;
43 
44 typedef struct ColorBalanceContext {
45  const AVClass *class;
50 
52  int depth;
53  int max;
54  int step;
55 
56  int (*color_balance)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
58 
59 #define OFFSET(x) offsetof(ColorBalanceContext, x)
60 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
61 static const AVOption colorbalance_options[] = {
62  { "rs", "set red shadows", OFFSET(cyan_red.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
63  { "gs", "set green shadows", OFFSET(magenta_green.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
64  { "bs", "set blue shadows", OFFSET(yellow_blue.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
65  { "rm", "set red midtones", OFFSET(cyan_red.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
66  { "gm", "set green midtones", OFFSET(magenta_green.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
67  { "bm", "set blue midtones", OFFSET(yellow_blue.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
68  { "rh", "set red highlights", OFFSET(cyan_red.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
69  { "gh", "set green highlights", OFFSET(magenta_green.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
70  { "bh", "set blue highlights", OFFSET(yellow_blue.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
71  { "pl", "preserve lightness", OFFSET(preserve_lightness), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
72  { NULL }
73 };
74 
75 AVFILTER_DEFINE_CLASS(colorbalance);
76 
78 {
79  static const enum AVPixelFormat pix_fmts[] = {
94  };
96  if (!fmts_list)
97  return AVERROR(ENOMEM);
98  return ff_set_common_formats(ctx, fmts_list);
99 }
100 
101 static float get_component(float v, float l,
102  float s, float m, float h)
103 {
104  const float a = 4.f, b = 0.333f, scale = 0.7f;
105 
106  s *= av_clipf((b - l) * a + 0.5f, 0, 1) * scale;
107  m *= av_clipf((l - b) * a + 0.5f, 0, 1) * av_clipf((1.0 - l - b) * a + 0.5f, 0, 1) * scale;
108  h *= av_clipf((l + b - 1) * a + 0.5f, 0, 1) * scale;
109 
110  v += s;
111  v += m;
112  v += h;
113 
114  return av_clipf(v + 0.5f, 0, 1);
115 }
116 
117 static float hfun(float n, float h, float s, float l)
118 {
119  float a = s * FFMIN(l, 1. - l);
120  float k = fmodf(n + h / 30.f, 12.f);
121 
122  return av_clipf(l - a * FFMAX(FFMIN3(k - 3.f, 9.f - k, 1), -1.f), 0, 1);
123 }
124 
125 static void preservel(float *r, float *g, float *b, float l)
126 {
127  float max = FFMAX3(*r, *g, *b);
128  float min = FFMIN3(*r, *g, *b);
129  float h, s;
130 
131  l *= 0.5;
132 
133  if (*r == *g && *g == *b) {
134  h = 0.;
135  } else if (max == *r) {
136  h = 60. * (0. + (*g - *b) / (max - min));
137  } else if (max == *g) {
138  h = 60. * (2. + (*b - *r) / (max - min));
139  } else if (max == *b) {
140  h = 60. * (4. + (*r - *g) / (max - min));
141  } else {
142  h = 0.;
143  }
144  if (h < 0.)
145  h += 360.;
146 
147  if (max == 0. || min == 1.) {
148  s = 0.;
149  } else {
150  s = (max - min) / (1. - FFABS(2. * l - 1));
151  }
152 
153  *r = hfun(0, h, s, l);
154  *g = hfun(8, h, s, l);
155  *b = hfun(4, h, s, l);
156 }
157 
158 static int color_balance8_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
159 {
160  ColorBalanceContext *s = ctx->priv;
161  ThreadData *td = arg;
162  AVFrame *in = td->in;
163  AVFrame *out = td->out;
164  const int slice_start = (out->height * jobnr) / nb_jobs;
165  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
166  const uint8_t *srcg = in->data[0] + slice_start * in->linesize[0];
167  const uint8_t *srcb = in->data[1] + slice_start * in->linesize[1];
168  const uint8_t *srcr = in->data[2] + slice_start * in->linesize[2];
169  const uint8_t *srca = in->data[3] + slice_start * in->linesize[3];
170  uint8_t *dstg = out->data[0] + slice_start * out->linesize[0];
171  uint8_t *dstb = out->data[1] + slice_start * out->linesize[1];
172  uint8_t *dstr = out->data[2] + slice_start * out->linesize[2];
173  uint8_t *dsta = out->data[3] + slice_start * out->linesize[3];
174  const float max = s->max;
175  int i, j;
176 
177  for (i = slice_start; i < slice_end; i++) {
178  for (j = 0; j < out->width; j++) {
179  float r = srcr[j] / max;
180  float g = srcg[j] / max;
181  float b = srcb[j] / max;
182  const float l = FFMAX3(r, g, b) + FFMIN3(r, g, b);
183 
184  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
185  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
186  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
187 
188  if (s->preserve_lightness)
189  preservel(&r, &g, &b, l);
190 
191  dstr[j] = av_clip_uint8(r * max);
192  dstg[j] = av_clip_uint8(g * max);
193  dstb[j] = av_clip_uint8(b * max);
194  if (in != out && out->linesize[3])
195  dsta[j] = srca[j];
196  }
197 
198  srcg += in->linesize[0];
199  srcb += in->linesize[1];
200  srcr += in->linesize[2];
201  srca += in->linesize[3];
202  dstg += out->linesize[0];
203  dstb += out->linesize[1];
204  dstr += out->linesize[2];
205  dsta += out->linesize[3];
206  }
207 
208  return 0;
209 }
210 
211 static int color_balance16_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
212 {
213  ColorBalanceContext *s = ctx->priv;
214  ThreadData *td = arg;
215  AVFrame *in = td->in;
216  AVFrame *out = td->out;
217  const int slice_start = (out->height * jobnr) / nb_jobs;
218  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
219  const uint16_t *srcg = (const uint16_t *)in->data[0] + slice_start * in->linesize[0] / 2;
220  const uint16_t *srcb = (const uint16_t *)in->data[1] + slice_start * in->linesize[1] / 2;
221  const uint16_t *srcr = (const uint16_t *)in->data[2] + slice_start * in->linesize[2] / 2;
222  const uint16_t *srca = (const uint16_t *)in->data[3] + slice_start * in->linesize[3] / 2;
223  uint16_t *dstg = (uint16_t *)out->data[0] + slice_start * out->linesize[0] / 2;
224  uint16_t *dstb = (uint16_t *)out->data[1] + slice_start * out->linesize[1] / 2;
225  uint16_t *dstr = (uint16_t *)out->data[2] + slice_start * out->linesize[2] / 2;
226  uint16_t *dsta = (uint16_t *)out->data[3] + slice_start * out->linesize[3] / 2;
227  const int depth = s->depth;
228  const float max = s->max;
229  int i, j;
230 
231  for (i = slice_start; i < slice_end; i++) {
232  for (j = 0; j < out->width; j++) {
233  float r = srcr[j] / max;
234  float g = srcg[j] / max;
235  float b = srcb[j] / max;
236  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
237 
238  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
239  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
240  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
241 
242  if (s->preserve_lightness)
243  preservel(&r, &g, &b, l);
244 
245  dstr[j] = av_clip_uintp2_c(r * max, depth);
246  dstg[j] = av_clip_uintp2_c(g * max, depth);
247  dstb[j] = av_clip_uintp2_c(b * max, depth);
248  if (in != out && out->linesize[3])
249  dsta[j] = srca[j];
250  }
251 
252  srcg += in->linesize[0] / 2;
253  srcb += in->linesize[1] / 2;
254  srcr += in->linesize[2] / 2;
255  srca += in->linesize[3] / 2;
256  dstg += out->linesize[0] / 2;
257  dstb += out->linesize[1] / 2;
258  dstr += out->linesize[2] / 2;
259  dsta += out->linesize[3] / 2;
260  }
261 
262  return 0;
263 }
264 
265 static int color_balance8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
266 {
267  ColorBalanceContext *s = ctx->priv;
268  ThreadData *td = arg;
269  AVFrame *in = td->in;
270  AVFrame *out = td->out;
271  AVFilterLink *outlink = ctx->outputs[0];
272  const int slice_start = (out->height * jobnr) / nb_jobs;
273  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
274  const uint8_t *srcrow = in->data[0] + slice_start * in->linesize[0];
275  const uint8_t roffset = s->rgba_map[R];
276  const uint8_t goffset = s->rgba_map[G];
277  const uint8_t boffset = s->rgba_map[B];
278  const uint8_t aoffset = s->rgba_map[A];
279  const float max = s->max;
280  const int step = s->step;
281  uint8_t *dstrow;
282  int i, j;
283 
284  dstrow = out->data[0] + slice_start * out->linesize[0];
285  for (i = slice_start; i < slice_end; i++) {
286  const uint8_t *src = srcrow;
287  uint8_t *dst = dstrow;
288 
289  for (j = 0; j < outlink->w * step; j += step) {
290  float r = src[j + roffset] / max;
291  float g = src[j + goffset] / max;
292  float b = src[j + boffset] / max;
293  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
294 
295  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
296  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
297  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
298 
299  if (s->preserve_lightness)
300  preservel(&r, &g, &b, l);
301 
302  dst[j + roffset] = av_clip_uint8(r * max);
303  dst[j + goffset] = av_clip_uint8(g * max);
304  dst[j + boffset] = av_clip_uint8(b * max);
305  if (in != out && step == 4)
306  dst[j + aoffset] = src[j + aoffset];
307  }
308 
309  srcrow += in->linesize[0];
310  dstrow += out->linesize[0];
311  }
312 
313  return 0;
314 }
315 
316 static int color_balance16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
317 {
318  ColorBalanceContext *s = ctx->priv;
319  ThreadData *td = arg;
320  AVFrame *in = td->in;
321  AVFrame *out = td->out;
322  AVFilterLink *outlink = ctx->outputs[0];
323  const int slice_start = (out->height * jobnr) / nb_jobs;
324  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
325  const uint16_t *srcrow = (const uint16_t *)in->data[0] + slice_start * in->linesize[0] / 2;
326  const uint8_t roffset = s->rgba_map[R];
327  const uint8_t goffset = s->rgba_map[G];
328  const uint8_t boffset = s->rgba_map[B];
329  const uint8_t aoffset = s->rgba_map[A];
330  const int step = s->step / 2;
331  const int depth = s->depth;
332  const float max = s->max;
333  uint16_t *dstrow;
334  int i, j;
335 
336  dstrow = (uint16_t *)out->data[0] + slice_start * out->linesize[0] / 2;
337  for (i = slice_start; i < slice_end; i++) {
338  const uint16_t *src = srcrow;
339  uint16_t *dst = dstrow;
340 
341  for (j = 0; j < outlink->w * step; j += step) {
342  float r = src[j + roffset] / max;
343  float g = src[j + goffset] / max;
344  float b = src[j + boffset] / max;
345  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
346 
347  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
348  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
349  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
350 
351  if (s->preserve_lightness)
352  preservel(&r, &g, &b, l);
353 
354  dst[j + roffset] = av_clip_uintp2_c(r * max, depth);
355  dst[j + goffset] = av_clip_uintp2_c(g * max, depth);
356  dst[j + boffset] = av_clip_uintp2_c(b * max, depth);
357  if (in != out && step == 4)
358  dst[j + aoffset] = src[j + aoffset];
359  }
360 
361  srcrow += in->linesize[0] / 2;
362  dstrow += out->linesize[0] / 2;
363  }
364 
365  return 0;
366 }
367 
368 static int config_output(AVFilterLink *outlink)
369 {
370  AVFilterContext *ctx = outlink->src;
371  ColorBalanceContext *s = ctx->priv;
373  const int depth = desc->comp[0].depth;
374  const int max = (1 << depth) - 1;
375  const int planar = av_pix_fmt_count_planes(outlink->format) > 1;
376 
377  s->depth = depth;
378  s->max = max;
379 
380  if (max == 255 && planar) {
381  s->color_balance = color_balance8_p;
382  } else if (planar) {
383  s->color_balance = color_balance16_p;
384  } else if (max == 255) {
385  s->color_balance = color_balance8;
386  } else {
387  s->color_balance = color_balance16;
388  }
389 
390  ff_fill_rgba_map(s->rgba_map, outlink->format);
391  s->step = av_get_padded_bits_per_pixel(desc) >> 3;
392 
393  return 0;
394 }
395 
397 {
398  AVFilterContext *ctx = inlink->dst;
399  ColorBalanceContext *s = ctx->priv;
400  AVFilterLink *outlink = ctx->outputs[0];
401  ThreadData td;
402  AVFrame *out;
403 
404  if (av_frame_is_writable(in)) {
405  out = in;
406  } else {
407  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
408  if (!out) {
409  av_frame_free(&in);
410  return AVERROR(ENOMEM);
411  }
413  }
414 
415  td.in = in;
416  td.out = out;
417  ctx->internal->execute(ctx, s->color_balance, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
418 
419  if (in != out)
420  av_frame_free(&in);
421  return ff_filter_frame(outlink, out);
422 }
423 
425  {
426  .name = "default",
427  .type = AVMEDIA_TYPE_VIDEO,
428  .filter_frame = filter_frame,
429  },
430  { NULL }
431 };
432 
434  {
435  .name = "default",
436  .type = AVMEDIA_TYPE_VIDEO,
437  .config_props = config_output,
438  },
439  { NULL }
440 };
441 
443  .name = "colorbalance",
444  .description = NULL_IF_CONFIG_SMALL("Adjust the color balance."),
445  .priv_size = sizeof(ColorBalanceContext),
446  .priv_class = &colorbalance_class,
452 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:419
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_colorbalance.c:396
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
colorbalance_outputs
static const AVFilterPad colorbalance_outputs[]
Definition: vf_colorbalance.c:433
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:300
get_component
static float get_component(float v, float l, float s, float m, float h)
Definition: vf_colorbalance.c:101
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
colorbalance_inputs
static const AVFilterPad colorbalance_inputs[]
Definition: vf_colorbalance.c:424
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
G
#define G
Definition: vf_colorbalance.c:30
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ColorBalanceContext::cyan_red
Range cyan_red
Definition: vf_colorbalance.c:46
ColorBalanceContext::color_balance
int(* color_balance)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:56
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:229
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_colorbalance.c:77
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:494
video.h
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1788
preservel
static void preservel(float *r, float *g, float *b, float l)
Definition: vf_colorbalance.c:125
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
Range::highlights
float highlights
Definition: vf_colorbalance.c:41
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2589
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:415
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
color_balance16_p
static int color_balance16_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:211
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:413
ColorBalanceContext
Definition: vf_colorbalance.c:44
ColorBalanceContext::preserve_lightness
int preserve_lightness
Definition: vf_colorbalance.c:49
FLAGS
#define FLAGS
Definition: vf_colorbalance.c:60
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:605
ColorBalanceContext::step
int step
Definition: vf_colorbalance.c:54
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:417
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
s
#define s(width, name)
Definition: cbs_vp9.c:257
ColorBalanceContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_colorbalance.c:51
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:418
colorbalance_options
static const AVOption colorbalance_options[]
Definition: vf_colorbalance.c:61
g
const char * g
Definition: vf_curves.c:115
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2040
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
ctx
AVFormatContext * ctx
Definition: movenc.c:48
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:66
Range::shadows
float shadows
Definition: vf_colorbalance.c:39
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:416
ColorBalanceContext::yellow_blue
Range yellow_blue
Definition: vf_colorbalance.c:48
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:387
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:388
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
hfun
static float hfun(float n, float h, float s, float l)
Definition: vf_colorbalance.c:117
src
#define src
Definition: vp8dsp.c:254
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorbalance)
A
#define A
Definition: vf_colorbalance.c:32
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:412
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
R
#define R
Definition: vf_colorbalance.c:29
Range
Definition: vf_colorbalance.c:38
ColorBalanceContext::max
int max
Definition: vf_colorbalance.c:53
desc
const char * desc
Definition: nvenc.c:79
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_get_padded_bits_per_pixel
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
Definition: pixdesc.c:2514
color_balance16
static int color_balance16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:316
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
B
#define B
Definition: vf_colorbalance.c:31
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:383
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
color_balance8_p
static int color_balance8_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:158
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:869
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
ColorBalanceContext::depth
int depth
Definition: vf_colorbalance.c:52
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:226
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_colorbalance.c:368
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:392
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
OFFSET
#define OFFSET(x)
Definition: vf_colorbalance.c:59
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:414
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:784
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_afftdn.c:1374
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
ColorBalanceContext::magenta_green
Range magenta_green
Definition: vf_colorbalance.c:47
uint8_t
uint8_t
Definition: audio_convert.c:194
color_balance8
static int color_balance8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:265
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AVFilter
Filter definition.
Definition: avfilter.h:144
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:239
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1083
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
Range::midtones
float midtones
Definition: vf_colorbalance.c:40
ff_vf_colorbalance
AVFilter ff_vf_colorbalance
Definition: vf_colorbalance.c:442
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:237
h
h
Definition: vp9dsp_template.c:2038
drawutils.h
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
int
int
Definition: ffmpeg_filter.c:192
min
float min
Definition: vorbis_enc_data.h:456