FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/pixfmt.h"
30 
31 #include "avfilter.h"
32 #include "colorspacedsp.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 
37 enum DitherMode {
41 };
42 
43 enum Colorspace {
54 };
55 
56 enum Whitepoint {
60 };
61 
68 };
69 
81 };
82 
83 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
94 };
95 
96 static const enum AVColorSpace default_csp[CS_NB + 1] = {
107 };
108 
111  double xr, yr, xg, yg, xb, yb;
112 };
113 
115  double alpha, beta, gamma, delta;
116 };
117 
119  double cr, cg, cb;
120 };
121 
123  double xw, yw;
124 };
125 
126 typedef struct ColorSpaceContext {
127  const AVClass *class;
128 
130 
132  enum AVColorSpace in_csp, out_csp, user_csp;
133  enum AVColorRange in_rng, out_rng, user_rng;
135  enum AVColorPrimaries in_prm, out_prm, user_prm;
136  enum AVPixelFormat in_format, user_format;
140 
141  int16_t *rgb[3];
142  ptrdiff_t rgb_stride;
143  unsigned rgb_sz;
145 
148  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
149 
152  int16_t *lin_lut, *delin_lut;
153 
156  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
157  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
158  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
159  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
167 
168 // FIXME deal with odd width/heights (or just forbid it)
169 // FIXME faster linearize/delinearize implementation (integer pow)
170 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
171 // FIXME test that the values in (de)lin_lut don't exceed their container storage
172 // type size (only useful if we keep the LUT and don't move to fast integer pow)
173 // FIXME dithering if bitdepth goes down?
174 // FIXME bitexact for fate integration?
175 
176 /*
177  * All constants explained in e.g. https://linuxtv.org/downloads/v4l-dvb-apis/ch02s06.html
178  * The older ones (bt470bg/m) are also explained in their respective ITU docs
179  * (e.g. https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.470-5-199802-S!!PDF-E.pdf)
180  * whereas the newer ones can typically be copied directly from wikipedia :)
181  */
183  [AVCOL_SPC_FCC] = { 0.30, 0.59, 0.11 },
184  [AVCOL_SPC_BT470BG] = { 0.299, 0.587, 0.114 },
185  [AVCOL_SPC_SMPTE170M] = { 0.299, 0.587, 0.114 },
186  [AVCOL_SPC_BT709] = { 0.2126, 0.7152, 0.0722 },
187  [AVCOL_SPC_SMPTE240M] = { 0.212, 0.701, 0.087 },
188  [AVCOL_SPC_BT2020_NCL] = { 0.2627, 0.6780, 0.0593 },
189  [AVCOL_SPC_BT2020_CL] = { 0.2627, 0.6780, 0.0593 },
190 };
191 
193 {
194  const struct LumaCoefficients *coeffs;
195 
196  if (csp >= AVCOL_SPC_NB)
197  return NULL;
198  coeffs = &luma_coefficients[csp];
199  if (!coeffs->cr)
200  return NULL;
201 
202  return coeffs;
203 }
204 
205 static void fill_rgb2yuv_table(const struct LumaCoefficients *coeffs,
206  double rgb2yuv[3][3])
207 {
208  double bscale, rscale;
209 
210  rgb2yuv[0][0] = coeffs->cr;
211  rgb2yuv[0][1] = coeffs->cg;
212  rgb2yuv[0][2] = coeffs->cb;
213  bscale = 0.5 / (coeffs->cb - 1.0);
214  rscale = 0.5 / (coeffs->cr - 1.0);
215  rgb2yuv[1][0] = bscale * coeffs->cr;
216  rgb2yuv[1][1] = bscale * coeffs->cg;
217  rgb2yuv[1][2] = 0.5;
218  rgb2yuv[2][0] = 0.5;
219  rgb2yuv[2][1] = rscale * coeffs->cg;
220  rgb2yuv[2][2] = rscale * coeffs->cb;
221 }
222 
223 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
224 // find any actual tables that document their real values...
225 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
227  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
228  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
229  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
230  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
231  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
232  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
233  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
234 };
235 
236 static const struct TransferCharacteristics *
238 {
239  const struct TransferCharacteristics *coeffs;
240 
241  if (trc >= AVCOL_TRC_NB)
242  return NULL;
243  coeffs = &transfer_characteristics[trc];
244  if (!coeffs->alpha)
245  return NULL;
246 
247  return coeffs;
248 }
249 
251  [WP_D65] = { 0.3127, 0.3290 },
252  [WP_C] = { 0.3100, 0.3160 },
253 };
254 
256  [AVCOL_PRI_BT709] = { WP_D65, 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 },
257  [AVCOL_PRI_BT470M] = { WP_C, 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 },
258  [AVCOL_PRI_BT470BG] = { WP_D65, 0.640, 0.330, 0.290, 0.600, 0.150, 0.060,},
259  [AVCOL_PRI_SMPTE170M] = { WP_D65, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
260  [AVCOL_PRI_SMPTE240M] = { WP_D65, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
261  [AVCOL_PRI_BT2020] = { WP_D65, 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 },
262 };
263 
265 {
266  const struct ColorPrimaries *coeffs;
267 
268  if (prm >= AVCOL_PRI_NB)
269  return NULL;
270  coeffs = &color_primaries[prm];
271  if (!coeffs->xr)
272  return NULL;
273 
274  return coeffs;
275 }
276 
277 static void invert_matrix3x3(const double in[3][3], double out[3][3])
278 {
279  double m00 = in[0][0], m01 = in[0][1], m02 = in[0][2],
280  m10 = in[1][0], m11 = in[1][1], m12 = in[1][2],
281  m20 = in[2][0], m21 = in[2][1], m22 = in[2][2];
282  int i, j;
283  double det;
284 
285  out[0][0] = (m11 * m22 - m21 * m12);
286  out[0][1] = -(m01 * m22 - m21 * m02);
287  out[0][2] = (m01 * m12 - m11 * m02);
288  out[1][0] = -(m10 * m22 - m20 * m12);
289  out[1][1] = (m00 * m22 - m20 * m02);
290  out[1][2] = -(m00 * m12 - m10 * m02);
291  out[2][0] = (m10 * m21 - m20 * m11);
292  out[2][1] = -(m00 * m21 - m20 * m01);
293  out[2][2] = (m00 * m11 - m10 * m01);
294 
295  det = m00 * out[0][0] + m10 * out[0][1] + m20 * out[0][2];
296  det = 1.0 / det;
297 
298  for (i = 0; i < 3; i++) {
299  for (j = 0; j < 3; j++)
300  out[i][j] *= det;
301  }
302 }
303 
305 {
306  int n;
307  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
308  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
309  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
310  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
311  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
312 
313  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
314  if (!s->lin_lut)
315  return AVERROR(ENOMEM);
316  s->delin_lut = &s->lin_lut[32768];
317  for (n = 0; n < 32768; n++) {
318  double v = (n - 2048.0) / 28672.0, d, l;
319 
320  // delinearize
321  if (v <= -out_beta) {
322  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
323  } else if (v < out_beta) {
324  d = out_delta * v;
325  } else {
326  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
327  }
328  s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
329 
330  // linearize
331  if (v <= -in_beta) {
332  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
333  } else if (v < in_beta) {
334  l = v * in_idelta;
335  } else {
336  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
337  }
338  s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
339  }
340 
341  return 0;
342 }
343 
344 /*
345  * see e.g. http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
346  */
347 static void fill_rgb2xyz_table(const struct ColorPrimaries *coeffs,
348  double rgb2xyz[3][3])
349 {
350  const struct WhitepointCoefficients *wp = &whitepoint_coefficients[coeffs->wp];
351  double i[3][3], sr, sg, sb, zw;
352 
353  rgb2xyz[0][0] = coeffs->xr / coeffs->yr;
354  rgb2xyz[0][1] = coeffs->xg / coeffs->yg;
355  rgb2xyz[0][2] = coeffs->xb / coeffs->yb;
356  rgb2xyz[1][0] = rgb2xyz[1][1] = rgb2xyz[1][2] = 1.0;
357  rgb2xyz[2][0] = (1.0 - coeffs->xr - coeffs->yr) / coeffs->yr;
358  rgb2xyz[2][1] = (1.0 - coeffs->xg - coeffs->yg) / coeffs->yg;
359  rgb2xyz[2][2] = (1.0 - coeffs->xb - coeffs->yb) / coeffs->yb;
360  invert_matrix3x3(rgb2xyz, i);
361  zw = 1.0 - wp->xw - wp->yw;
362  sr = i[0][0] * wp->xw + i[0][1] * wp->yw + i[0][2] * zw;
363  sg = i[1][0] * wp->xw + i[1][1] * wp->yw + i[1][2] * zw;
364  sb = i[2][0] * wp->xw + i[2][1] * wp->yw + i[2][2] * zw;
365  rgb2xyz[0][0] *= sr;
366  rgb2xyz[0][1] *= sg;
367  rgb2xyz[0][2] *= sb;
368  rgb2xyz[1][0] *= sr;
369  rgb2xyz[1][1] *= sg;
370  rgb2xyz[1][2] *= sb;
371  rgb2xyz[2][0] *= sr;
372  rgb2xyz[2][1] *= sg;
373  rgb2xyz[2][2] *= sb;
374 }
375 
376 static void mul3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
377 {
378  int m, n;
379 
380  for (m = 0; m < 3; m++)
381  for (n = 0; n < 3; n++)
382  dst[m][n] = src2[m][0] * src1[0][n] +
383  src2[m][1] * src1[1][n] +
384  src2[m][2] * src1[2][n];
385 }
386 
387 /*
388  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
389  * This function uses the Bradford mechanism.
390  */
391 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
392  enum Whitepoint src, enum Whitepoint dst)
393 {
394  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
395  [WP_ADAPT_BRADFORD] = {
396  { 0.8951, 0.2664, -0.1614 },
397  { -0.7502, 1.7135, 0.0367 },
398  { 0.0389, -0.0685, 1.0296 },
399  }, [WP_ADAPT_VON_KRIES] = {
400  { 0.40024, 0.70760, -0.08081 },
401  { -0.22630, 1.16532, 0.04570 },
402  { 0.00000, 0.00000, 0.91822 },
403  },
404  };
405  const double (*ma)[3] = ma_tbl[wp_adapt];
406  const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
407  double zw_src = 1.0 - wp_src->xw - wp_src->yw;
408  const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
409  double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
410  double mai[3][3], fac[3][3], tmp[3][3];
411  double rs, gs, bs, rd, gd, bd;
412 
413  invert_matrix3x3(ma, mai);
414  rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
415  gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
416  bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
417  rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
418  gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
419  bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
420  fac[0][0] = rd / rs;
421  fac[1][1] = gd / gs;
422  fac[2][2] = bd / bs;
423  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
424  mul3x3(tmp, ma, fac);
425  mul3x3(out, tmp, mai);
426 }
427 
428 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
429  int w, int h, const int16_t *lut)
430 {
431  int y, x, n;
432 
433  for (n = 0; n < 3; n++) {
434  int16_t *data = buf[n];
435 
436  for (y = 0; y < h; y++) {
437  for (x = 0; x < w; x++)
438  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
439 
440  data += stride;
441  }
442  }
443 }
444 
445 struct ThreadData {
446  AVFrame *in, *out;
447  ptrdiff_t in_linesize[3], out_linesize[3];
449 };
450 
451 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
452 {
453  struct ThreadData *td = data;
454  ColorSpaceContext *s = ctx->priv;
455  uint8_t *in_data[3], *out_data[3];
456  int16_t *rgb[3];
457  int h_in = (td->in->height + 1) >> 1;
458  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
459  int w = td->in->width, h = h2 - h1;
460 
461  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
462  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
463  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
464  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
465  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
466  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
467  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
468  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
469  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
470 
471  // FIXME for simd, also make sure we do pictures with negative stride
472  // top-down so we don't overwrite lines with padding of data before it
473  // in the same buffer (same as swscale)
474 
475  if (s->yuv2yuv_fastmode) {
476  // FIXME possibly use a fast mode in case only the y range changes?
477  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
478  // are non-zero
479  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
480  s->yuv2yuv_coeffs, s->yuv_offset);
481  } else {
482  // FIXME maybe (for caching effciency) do pipeline per-line instead of
483  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
484  // 2 lines, for yuv420.)
485  /*
486  * General design:
487  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
488  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
489  * of RGB in psuedo-restricted 15+sign bits. That means that the float
490  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
491  * range is used for overflow/underflow outside the representable
492  * range of this RGB type. rgb2yuv is the exact opposite.
493  * - gamma correction is done using a LUT since that appears to work
494  * fairly fast.
495  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
496  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
497  * read chroma pixels at luma resolution. If you want some more fancy
498  * filter, you can use swscale to convert to yuv444p.
499  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
500  */
501  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
502  s->yuv2rgb_coeffs, s->yuv_offset[0]);
503  if (!s->rgb2rgb_passthrough) {
504  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
505  if (!s->lrgb2lrgb_passthrough)
506  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
507  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
508  }
509  if (s->dither == DITHER_FSB) {
510  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
512  } else {
513  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
514  s->rgb2yuv_coeffs, s->yuv_offset[1]);
515  }
516  }
517 
518  return 0;
519 }
520 
521 static int get_range_off(int *off, int *y_rng, int *uv_rng,
522  enum AVColorRange rng, int depth)
523 {
524  switch (rng) {
525  case AVCOL_RANGE_MPEG:
526  *off = 16 << (depth - 8);
527  *y_rng = 219 << (depth - 8);
528  *uv_rng = 224 << (depth - 8);
529  break;
530  case AVCOL_RANGE_JPEG:
531  *off = 0;
532  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
533  break;
534  default:
535  return AVERROR(EINVAL);
536  }
537 
538  return 0;
539 }
540 
542  const AVFrame *in, const AVFrame *out)
543 {
544  ColorSpaceContext *s = ctx->priv;
545  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
546  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
547  int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
548 
549 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
550 #define supported_subsampling(lcw, lch) \
551  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
552 #define supported_format(d) \
553  ((d) != NULL && (d)->nb_components == 3 && \
554  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
555  supported_depth((d)->comp[0].depth) && \
556  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
557 
558  if (!supported_format(in_desc)) {
559  av_log(ctx, AV_LOG_ERROR,
560  "Unsupported input format %d (%s) or bitdepth (%d)\n",
562  in_desc ? in_desc->comp[0].depth : -1);
563  return AVERROR(EINVAL);
564  }
565  if (!supported_format(out_desc)) {
566  av_log(ctx, AV_LOG_ERROR,
567  "Unsupported output format %d (%s) or bitdepth (%d)\n",
568  out->format, av_get_pix_fmt_name(out->format),
569  out_desc ? out_desc->comp[0].depth : -1);
570  return AVERROR(EINVAL);
571  }
572 
573  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
574  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
575  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
576  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
577  if (in->colorspace != s->in_csp ||
578  in->color_range != s->in_rng) s->in_lumacoef = NULL;
579  if (out->colorspace != s->out_csp ||
580  out->color_range != s->out_rng) s->out_lumacoef = NULL;
581 
582  if (!s->out_primaries || !s->in_primaries) {
583  s->in_prm = in->color_primaries;
584  s->in_primaries = get_color_primaries(s->in_prm);
585  if (!s->in_primaries) {
586  av_log(ctx, AV_LOG_ERROR,
587  "Unsupported input primaries %d (%s)\n",
588  s->in_prm, av_color_primaries_name(s->in_prm));
589  return AVERROR(EINVAL);
590  }
591  s->out_prm = out->color_primaries;
592  s->out_primaries = get_color_primaries(s->out_prm);
593  if (!s->out_primaries) {
594  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
595  if (s->user_all == CS_UNSPECIFIED) {
596  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
597  } else {
598  av_log(ctx, AV_LOG_ERROR,
599  "Unsupported output color property %d\n", s->user_all);
600  }
601  } else {
602  av_log(ctx, AV_LOG_ERROR,
603  "Unsupported output primaries %d (%s)\n",
604  s->out_prm, av_color_primaries_name(s->out_prm));
605  }
606  return AVERROR(EINVAL);
607  }
609  sizeof(*s->in_primaries));
610  if (!s->lrgb2lrgb_passthrough) {
611  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
612 
613  fill_rgb2xyz_table(s->out_primaries, rgb2xyz);
614  invert_matrix3x3(rgb2xyz, xyz2rgb);
615  fill_rgb2xyz_table(s->in_primaries, rgb2xyz);
616  if (s->out_primaries->wp != s->in_primaries->wp &&
617  s->wp_adapt != WP_ADAPT_IDENTITY) {
618  double wpconv[3][3], tmp[3][3];
619 
621  s->out_primaries->wp);
622  mul3x3(tmp, rgb2xyz, wpconv);
623  mul3x3(rgb2rgb, tmp, xyz2rgb);
624  } else {
625  mul3x3(rgb2rgb, rgb2xyz, xyz2rgb);
626  }
627  for (m = 0; m < 3; m++)
628  for (n = 0; n < 3; n++) {
629  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
630  for (o = 1; o < 8; o++)
631  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
632  }
633 
634  emms = 1;
635  }
636  }
637 
638  if (!s->in_txchr) {
639  av_freep(&s->lin_lut);
640  s->in_trc = in->color_trc;
641  s->in_txchr = get_transfer_characteristics(s->in_trc);
642  if (!s->in_txchr) {
643  av_log(ctx, AV_LOG_ERROR,
644  "Unsupported input transfer characteristics %d (%s)\n",
645  s->in_trc, av_color_transfer_name(s->in_trc));
646  return AVERROR(EINVAL);
647  }
648  }
649 
650  if (!s->out_txchr) {
651  av_freep(&s->lin_lut);
652  s->out_trc = out->color_trc;
653  s->out_txchr = get_transfer_characteristics(s->out_trc);
654  if (!s->out_txchr) {
655  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
656  if (s->user_all == CS_UNSPECIFIED) {
657  av_log(ctx, AV_LOG_ERROR,
658  "Please specify output transfer characteristics\n");
659  } else {
660  av_log(ctx, AV_LOG_ERROR,
661  "Unsupported output color property %d\n", s->user_all);
662  }
663  } else {
664  av_log(ctx, AV_LOG_ERROR,
665  "Unsupported output transfer characteristics %d (%s)\n",
666  s->out_trc, av_color_transfer_name(s->out_trc));
667  }
668  return AVERROR(EINVAL);
669  }
670  }
671 
673  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
674  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
675  res = fill_gamma_table(s);
676  if (res < 0)
677  return res;
678  emms = 1;
679  }
680 
681  if (!s->in_lumacoef) {
682  s->in_csp = in->colorspace;
683  s->in_rng = in->color_range;
684  s->in_lumacoef = get_luma_coefficients(s->in_csp);
685  if (!s->in_lumacoef) {
686  av_log(ctx, AV_LOG_ERROR,
687  "Unsupported input colorspace %d (%s)\n",
688  s->in_csp, av_color_space_name(s->in_csp));
689  return AVERROR(EINVAL);
690  }
691  redo_yuv2rgb = 1;
692  }
693 
694  if (!s->out_lumacoef) {
695  s->out_csp = out->colorspace;
696  s->out_rng = out->color_range;
697  s->out_lumacoef = get_luma_coefficients(s->out_csp);
698  if (!s->out_lumacoef) {
699  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
700  if (s->user_all == CS_UNSPECIFIED) {
701  av_log(ctx, AV_LOG_ERROR,
702  "Please specify output transfer characteristics\n");
703  } else {
704  av_log(ctx, AV_LOG_ERROR,
705  "Unsupported output color property %d\n", s->user_all);
706  }
707  } else {
708  av_log(ctx, AV_LOG_ERROR,
709  "Unsupported output transfer characteristics %d (%s)\n",
710  s->out_csp, av_color_space_name(s->out_csp));
711  }
712  return AVERROR(EINVAL);
713  }
714  redo_rgb2yuv = 1;
715  }
716 
717  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
718  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
719  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
720  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
721  !memcmp(s->in_lumacoef, s->out_lumacoef,
722  sizeof(*s->in_lumacoef)) &&
723  in_desc->comp[0].depth == out_desc->comp[0].depth;
724  if (!s->yuv2yuv_passthrough) {
725  if (redo_yuv2rgb) {
726  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
727  int off, bits, in_rng;
728 
729  res = get_range_off(&off, &s->in_y_rng, &s->in_uv_rng,
730  s->in_rng, in_desc->comp[0].depth);
731  if (res < 0) {
732  av_log(ctx, AV_LOG_ERROR,
733  "Unsupported input color range %d (%s)\n",
734  s->in_rng, av_color_range_name(s->in_rng));
735  return res;
736  }
737  for (n = 0; n < 8; n++)
738  s->yuv_offset[0][n] = off;
739  fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
740  invert_matrix3x3(rgb2yuv, yuv2rgb);
741  bits = 1 << (in_desc->comp[0].depth - 1);
742  for (n = 0; n < 3; n++) {
743  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
744  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
745  for (o = 1; o < 8; o++)
746  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
747  }
748  }
749  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
750  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
751  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
752  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
753  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
754  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
755  emms = 1;
756  }
757 
758  if (redo_rgb2yuv) {
759  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
760  int off, out_rng, bits;
761 
762  res = get_range_off(&off, &s->out_y_rng, &s->out_uv_rng,
763  s->out_rng, out_desc->comp[0].depth);
764  if (res < 0) {
765  av_log(ctx, AV_LOG_ERROR,
766  "Unsupported output color range %d (%s)\n",
767  s->out_rng, av_color_range_name(s->out_rng));
768  return res;
769  }
770  for (n = 0; n < 8; n++)
771  s->yuv_offset[1][n] = off;
773  bits = 1 << (29 - out_desc->comp[0].depth);
774  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
775  for (m = 0; m < 3; m++) {
776  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
777  for (o = 1; o < 8; o++)
778  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
779  }
780  }
781  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
782  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
783  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
784  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
785  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
786  emms = 1;
787  }
788 
789  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
790  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
791  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
792  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
793  double yuv2yuv[3][3];
794  int in_rng, out_rng;
795 
796  mul3x3(yuv2yuv, yuv2rgb, rgb2yuv);
797  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
798  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
799  s->yuv2yuv_coeffs[m][n][0] =
800  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
801  (in_rng * (1 << odepth)));
802  for (o = 1; o < 8; o++)
803  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
804  }
805  }
806  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
807  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
808  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
809  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
810  }
811  }
812 
813  if (emms)
814  emms_c();
815 
816  return 0;
817 }
818 
820 {
821  ColorSpaceContext *s = ctx->priv;
822 
824 
825  return 0;
826 }
827 
829 {
830  ColorSpaceContext *s = ctx->priv;
831 
832  av_freep(&s->rgb[0]);
833  av_freep(&s->rgb[1]);
834  av_freep(&s->rgb[2]);
835  s->rgb_sz = 0;
836  av_freep(&s->dither_scratch_base[0][0]);
837  av_freep(&s->dither_scratch_base[0][1]);
838  av_freep(&s->dither_scratch_base[1][0]);
839  av_freep(&s->dither_scratch_base[1][1]);
840  av_freep(&s->dither_scratch_base[2][0]);
841  av_freep(&s->dither_scratch_base[2][1]);
842 
843  av_freep(&s->lin_lut);
844 }
845 
846 static int filter_frame(AVFilterLink *link, AVFrame *in)
847 {
848  AVFilterContext *ctx = link->dst;
849  AVFilterLink *outlink = ctx->outputs[0];
850  ColorSpaceContext *s = ctx->priv;
851  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
852  // input one if it is writable *OR* the actual literal values of in_*
853  // and out_* are identical (not just their respective properties)
854  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
855  int res;
856  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
857  unsigned rgb_sz = rgb_stride * in->height;
858  struct ThreadData td;
859 
860  if (!out) {
861  av_frame_free(&in);
862  return AVERROR(ENOMEM);
863  }
864  av_frame_copy_props(out, in);
865 
868  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
870 
872  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
874  } else {
875  out->color_trc = s->user_trc;
876  }
880  in->color_range : s->user_rng;
881  if (rgb_sz != s->rgb_sz) {
883  int uvw = in->width >> desc->log2_chroma_w;
884 
885  av_freep(&s->rgb[0]);
886  av_freep(&s->rgb[1]);
887  av_freep(&s->rgb[2]);
888  s->rgb_sz = 0;
889  av_freep(&s->dither_scratch_base[0][0]);
890  av_freep(&s->dither_scratch_base[0][1]);
891  av_freep(&s->dither_scratch_base[1][0]);
892  av_freep(&s->dither_scratch_base[1][1]);
893  av_freep(&s->dither_scratch_base[2][0]);
894  av_freep(&s->dither_scratch_base[2][1]);
895 
896  s->rgb[0] = av_malloc(rgb_sz);
897  s->rgb[1] = av_malloc(rgb_sz);
898  s->rgb[2] = av_malloc(rgb_sz);
899  s->dither_scratch_base[0][0] =
900  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
901  s->dither_scratch_base[0][1] =
902  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
903  s->dither_scratch_base[1][0] =
904  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
905  s->dither_scratch_base[1][1] =
906  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
907  s->dither_scratch_base[2][0] =
908  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
909  s->dither_scratch_base[2][1] =
910  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
911  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
912  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
913  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
914  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
915  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
916  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
917  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
918  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
919  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
920  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
921  uninit(ctx);
922  return AVERROR(ENOMEM);
923  }
924  s->rgb_sz = rgb_sz;
925  }
926  res = create_filtergraph(ctx, in, out);
927  if (res < 0)
928  return res;
929  s->rgb_stride = rgb_stride / sizeof(int16_t);
930  td.in = in;
931  td.out = out;
932  td.in_linesize[0] = in->linesize[0];
933  td.in_linesize[1] = in->linesize[1];
934  td.in_linesize[2] = in->linesize[2];
935  td.out_linesize[0] = out->linesize[0];
936  td.out_linesize[1] = out->linesize[1];
937  td.out_linesize[2] = out->linesize[2];
940  if (s->yuv2yuv_passthrough) {
941  res = av_frame_copy(out, in);
942  if (res < 0)
943  return res;
944  } else {
945  ctx->internal->execute(ctx, convert, &td, NULL,
946  FFMIN((in->height + 1) >> 1, ctx->graph->nb_threads));
947  }
948  av_frame_free(&in);
949 
950  return ff_filter_frame(outlink, out);
951 }
952 
954 {
955  static const enum AVPixelFormat pix_fmts[] = {
960  };
961  int res;
962  ColorSpaceContext *s = ctx->priv;
964 
965  if (!formats)
966  return AVERROR(ENOMEM);
967  if (s->user_format == AV_PIX_FMT_NONE)
968  return ff_set_common_formats(ctx, formats);
969  res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
970  if (res < 0)
971  return res;
972  formats = NULL;
973  res = ff_add_format(&formats, s->user_format);
974  if (res < 0)
975  return res;
976 
977  return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
978 }
979 
980 static int config_props(AVFilterLink *outlink)
981 {
982  AVFilterLink *inlink = outlink->src->inputs[0];
983 
984  outlink->w = inlink->w;
985  outlink->h = inlink->h;
986  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
987  outlink->time_base = inlink->time_base;
988 
989  return 0;
990 }
991 
992 #define OFFSET(x) offsetof(ColorSpaceContext, x)
993 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
994 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
995 
996 static const AVOption colorspace_options[] = {
997  { "all", "Set all color properties together",
998  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
999  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
1000  ENUM("bt470m", CS_BT470M, "all"),
1001  ENUM("bt470bg", CS_BT470BG, "all"),
1002  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
1003  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
1004  ENUM("bt709", CS_BT709, "all"),
1005  ENUM("smpte170m", CS_SMPTE170M, "all"),
1006  ENUM("smpte240m", CS_SMPTE240M, "all"),
1007  ENUM("bt2020", CS_BT2020, "all"),
1008 
1009  { "space", "Output colorspace",
1010  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1011  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
1012  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
1013  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
1014  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
1015  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
1016  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
1017  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
1018 
1019  { "range", "Output color range",
1020  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1022  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
1023  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
1024 
1025  { "primaries", "Output color primaries",
1026  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1027  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
1028  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
1029  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
1030  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
1031  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
1032  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
1033  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
1034 
1035  { "trc", "Output transfer characteristics",
1036  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1037  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
1038  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
1039  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
1040  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
1041  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
1042  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
1043  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
1044  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
1045 
1046  { "format", "Output pixel format",
1047  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
1049  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
1050  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
1051  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
1052  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
1053  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
1054  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
1055  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
1056  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
1057  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
1058 
1059  { "fast", "Ignore primary chromaticity and gamma correction",
1060  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1061  0, 1, FLAGS },
1062 
1063  { "dither", "Dithering mode",
1064  OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1065  DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
1066  ENUM("none", DITHER_NONE, "dither"),
1067  ENUM("fsb", DITHER_FSB, "dither"),
1068 
1069  { "wpadapt", "Whitepoint adaptation method",
1070  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1071  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
1072  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1073  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1074  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1075 
1076  { NULL }
1077 };
1078 
1079 AVFILTER_DEFINE_CLASS(colorspace);
1080 
1081 static const AVFilterPad inputs[] = {
1082  {
1083  .name = "default",
1084  .type = AVMEDIA_TYPE_VIDEO,
1085  .filter_frame = filter_frame,
1086  },
1087  { NULL }
1088 };
1089 
1090 static const AVFilterPad outputs[] = {
1091  {
1092  .name = "default",
1093  .type = AVMEDIA_TYPE_VIDEO,
1094  .config_props = config_props,
1095  },
1096  { NULL }
1097 };
1098 
1100  .name = "colorspace",
1101  .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1102  .init = init,
1103  .uninit = uninit,
1104  .query_formats = query_formats,
1105  .priv_size = sizeof(ColorSpaceContext),
1106  .priv_class = &colorspace_class,
1107  .inputs = inputs,
1108  .outputs = outputs,
1110 };
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:424
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:436
#define NULL
Definition: coverity.c:32
const char * s
Definition: avisynth_c.h:631
AVFrame * out
Definition: af_sofalizer.c:585
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2222
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
rgb2yuv_fn rgb2yuv
int16_t yuv_offset[2][8]
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:83
AVOption.
Definition: opt.h:245
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
enum Colorspace user_all
double yuv2rgb_dbl_coeffs[3][3]
#define ma
static void fn() rgb2yuv(uint8_t *_yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t s, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
static void invert_matrix3x3(const double in[3][3], double out[3][3])
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
int * dither_scratch[3][2]
Main libavfilter public API header.
static int init(AVFilterContext *ctx)
enum AVColorTransferCharacteristic in_trc out_trc user_trc
const char * desc
Definition: nvenc.c:89
static const AVOption colorspace_options[]
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:440
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:276
int16_t yuv2rgb_coeffs[3][3][8]
ptrdiff_t in_linesize[3]
static enum AVSampleFormat formats[]
Definition: avresample.c:163
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:53
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:441
enum AVColorSpace in_csp out_csp user_csp
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:345
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:76
enum DitherMode dither
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:408
functionally identical to above
Definition: pixfmt.h:442
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2553
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static const struct ColorPrimaries * get_color_primaries(enum AVColorPrimaries prm)
struct AVFilterGraph * graph
filtergraph this filter belongs to
Definition: avfilter.h:322
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:123
const char * name
Pad name.
Definition: internal.h:59
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:313
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1180
AVFrame * in
Definition: af_sofalizer.c:585
uint8_t bits
Definition: crc.c:296
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_malloc(s)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
AVOptions.
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:434
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2535
AVFilter ff_vf_colorspace
enum Whitepoint wp
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:413
static void uninit(AVFilterContext *ctx)
yuv2rgb_fn yuv2rgb
const struct ColorPrimaries * out_primaries
Colorspace
Definition: vf_colorspace.c:43
ptrdiff_t out_linesize[3]
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:804
Not part of ABI.
Definition: pixfmt.h:458
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:454
ColorSpaceDSPContext dsp
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:346
const struct ColorPrimaries * in_primaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:389
#define FFALIGN(x, a)
Definition: macros.h:48
static const struct LumaCoefficients * get_luma_coefficients(enum AVColorSpace csp)
#define av_log(a,...)
unsigned m
Definition: audioconvert.c:187
A filter pad used for either input or output.
Definition: internal.h:53
ptrdiff_t rgb_stride
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:394
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
double rgb2yuv_dbl_coeffs[3][3]
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
const struct LumaCoefficients * out_lumacoef
static const uint8_t dither[8][8]
Definition: vf_fspp.c:57
void * priv
private data for use by the filter
Definition: avfilter.h:320
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:413
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:114
Not part of ABI.
Definition: pixfmt.h:402
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:426
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:391
static void fill_rgb2yuv_table(const struct LumaCoefficients *coeffs, double rgb2yuv[3][3])
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:337
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:344
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
int depth
Definition: v4l.c:62
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:710
static const AVFilterPad inputs[]
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2541
#define supported_format(d)
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
#define ENUM(x, y, z)
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB]
#define FFMIN(a, b)
Definition: common.h:96
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
static enum AVColorSpace default_csp[CS_NB+1]
Definition: vf_colorspace.c:96
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:444
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:396
static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB]
AVFormatContext * ctx
Definition: movenc.c:48
static const AVFilterPad outputs[]
int16_t * rgb[3]
int16_t lrgb2lrgb_coeffs[3][3][8]
int n
Definition: avisynth_c.h:547
#define FLAGS
static void fill_rgb2xyz_table(const struct ColorPrimaries *coeffs, double rgb2xyz[3][3])
#define src
Definition: vp9dsp.c:530
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:439
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:457
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:301
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
also ITU-R BT1361
Definition: pixfmt.h:410
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, enum Whitepoint src, enum Whitepoint dst)
#define src1
Definition: h264pred.c:139
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:415
enum AVColorPrimaries in_prm out_prm user_prm
static int query_formats(AVFilterContext *ctx)
int16_t yuv2yuv_coeffs[3][3][8]
functionally identical to above
Definition: pixfmt.h:398
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
void(* multiply3x3)(int16_t *data[3], ptrdiff_t stride, int w, int h, const int16_t m[3][3][8])
Definition: colorspacedsp.h:74
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
rgb2yuv_fsb_fn rgb2yuv_fsb
enum AVColorRange in_rng out_rng user_rng
WhitepointAdaptation
Definition: vf_colorspace.c:62
yuv2yuv_fn yuv2yuv[NB_BPP][NB_BPP][NB_SS]
Definition: colorspacedsp.h:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
static int fill_gamma_table(ColorSpaceContext *s)
void * buf
Definition: avisynth_c.h:553
Whitepoint
Definition: vf_colorspace.c:56
rgb2yuv_fn rgb2yuv[NB_BPP][NB_SS]
Definition: colorspacedsp.h:65
int * dither_scratch_base[3][2]
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:341
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:142
yuv2rgb_fn yuv2rgb[NB_BPP][NB_SS]
Definition: colorspacedsp.h:62
Not part of ABI.
Definition: pixfmt.h:428
const struct LumaCoefficients * in_lumacoef
const char * name
Filter name.
Definition: avfilter.h:146
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:70
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:317
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB]
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:345
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:342
static int flags
Definition: cpu.c:47
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:348
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:456
ITU-R BT2020 constant luminance system.
Definition: pixfmt.h:445
static int get_range_off(int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
enum WhitepointAdaptation wp_adapt
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2547
also ITU-R BT470BG
Definition: pixfmt.h:414
enum AVPixelFormat in_format user_format
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
avfilter_execute_func * execute
Definition: internal.h:153
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
int16_t rgb2yuv_coeffs[3][3][8]
static void mul3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
pixel format definitions
static const int16_t coeffs[]
static uint8_t tmp[8]
Definition: des.c:38
const struct TransferCharacteristics * in_txchr
const struct TransferCharacteristics * out_txchr
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
enum AVColorPrimaries color_primaries
Definition: frame.h:415
An instance of a filter.
Definition: avfilter.h:305
AVFILTER_DEFINE_CLASS(colorspace)
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:423
static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB]
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:397
ITU-R BT2020.
Definition: pixfmt.h:400
int height
Definition: frame.h:236
FILE * out
Definition: movenc.c:54
#define av_freep(p)
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:417
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2138
#define stride
AVFilterLink * inlink
Definition: vf_blend.c:56
internal API functions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
static int filter_frame(AVFilterLink *link, AVFrame *in)
static int config_props(AVFilterLink *outlink)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
rgb2yuv_fsb_fn rgb2yuv_fsb[NB_BPP][NB_SS]
Definition: colorspacedsp.h:67
Not part of ABI.
Definition: pixfmt.h:446
DitherMode
Definition: vf_colorspace.c:37
yuv2yuv_fn yuv2yuv
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:580
#define OFFSET(x)