FFmpeg
vf_colormatrix.c
Go to the documentation of this file.
1 /*
2  * ColorMatrix v2.2 for Avisynth 2.5.x
3  *
4  * Copyright (C) 2006-2007 Kevin Stone
5  *
6  * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the
10  * Free Software Foundation; either version 2 of the License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16  * License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
26  * Dijkhof. It adds the ability to convert between any of: Rec.709, FCC,
27  * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
28  * adds an option to use scaled or non-scaled coefficients, and more...
29  */
30 
31 #include <float.h>
32 #include "avfilter.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/avstring.h"
39 
40 #define NS(n) ((n) < 0 ? (int)((n)*65536.0-0.5+DBL_EPSILON) : (int)((n)*65536.0+0.5))
41 #define CB(n) av_clip_uint8(n)
42 
43 static const double yuv_coeff_luma[5][3] = {
44  { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
45  { +0.5900, +0.1100, +0.3000 }, // FCC (1)
46  { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
47  { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
48  { +0.6780, +0.0593, +0.2627 }, // Rec.2020 (4)
49 };
50 
51 enum ColorMode {
59 };
60 
61 typedef struct ColorMatrixContext {
62  const AVClass *class;
63  int yuv_convert[25][3][3];
65  int source, dest; ///< ColorMode
66  int mode;
67  int hsub, vsub;
69 
70 typedef struct ThreadData {
71  AVFrame *dst;
72  const AVFrame *src;
73  int c2;
74  int c3;
75  int c4;
76  int c5;
77  int c6;
78  int c7;
79 } ThreadData;
80 
81 #define OFFSET(x) offsetof(ColorMatrixContext, x)
82 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
83 
84 static const AVOption colormatrix_options[] = {
85  { "src", "set source color matrix", OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
86  { "dst", "set destination color matrix", OFFSET(dest), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
87  { "bt709", "set BT.709 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709}, .flags=FLAGS, .unit="color_mode" },
88  { "fcc", "set FCC colorspace ", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC}, .flags=FLAGS, .unit="color_mode" },
89  { "bt601", "set BT.601 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
90  { "bt470", "set BT.470 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
91  { "bt470bg", "set BT.470 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
92  { "smpte170m", "set SMTPE-170M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
93  { "smpte240m", "set SMPTE-240M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M}, .flags=FLAGS, .unit="color_mode" },
94  { "bt2020", "set BT.2020 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT2020}, .flags=FLAGS, .unit="color_mode" },
95  { NULL }
96 };
97 
98 AVFILTER_DEFINE_CLASS(colormatrix);
99 
100 #define ma m[0][0]
101 #define mb m[0][1]
102 #define mc m[0][2]
103 #define md m[1][0]
104 #define me m[1][1]
105 #define mf m[1][2]
106 #define mg m[2][0]
107 #define mh m[2][1]
108 #define mi m[2][2]
109 
110 #define ima im[0][0]
111 #define imb im[0][1]
112 #define imc im[0][2]
113 #define imd im[1][0]
114 #define ime im[1][1]
115 #define imf im[1][2]
116 #define img im[2][0]
117 #define imh im[2][1]
118 #define imi im[2][2]
119 
120 static void inverse3x3(double im[3][3], double m[3][3])
121 {
122  double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
123  det = 1.0 / det;
124  ima = det * (me * mi - mf * mh);
125  imb = det * (mc * mh - mb * mi);
126  imc = det * (mb * mf - mc * me);
127  imd = det * (mf * mg - md * mi);
128  ime = det * (ma * mi - mc * mg);
129  imf = det * (mc * md - ma * mf);
130  img = det * (md * mh - me * mg);
131  imh = det * (mb * mg - ma * mh);
132  imi = det * (ma * me - mb * md);
133 }
134 
135 static void solve_coefficients(double cm[3][3], double rgb[3][3], double yuv[3][3])
136 {
137  int i, j;
138  for (i = 0; i < 3; i++)
139  for (j = 0; j < 3; j++)
140  cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
141 }
142 
144 {
145  ColorMatrixContext *color = ctx->priv;
146  double yuv_coeff[5][3][3];
147  double rgb_coeffd[5][3][3];
148  double yuv_convertd[25][3][3];
149  double bscale, rscale;
150  int v = 0;
151  int i, j, k;
152  for (i = 0; i < 5; i++) {
153  yuv_coeff[i][0][0] = yuv_coeff_luma[i][0];
154  yuv_coeff[i][0][1] = yuv_coeff_luma[i][1];
155  yuv_coeff[i][0][2] = yuv_coeff_luma[i][2];
156  bscale = 0.5 / (yuv_coeff[i][0][1] - 1.0);
157  rscale = 0.5 / (yuv_coeff[i][0][2] - 1.0);
158  yuv_coeff[i][1][0] = bscale * yuv_coeff[i][0][0];
159  yuv_coeff[i][1][1] = 0.5;
160  yuv_coeff[i][1][2] = bscale * yuv_coeff[i][0][2];
161  yuv_coeff[i][2][0] = rscale * yuv_coeff[i][0][0];
162  yuv_coeff[i][2][1] = rscale * yuv_coeff[i][0][1];
163  yuv_coeff[i][2][2] = 0.5;
164  }
165  for (i = 0; i < 5; i++)
166  inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
167  for (i = 0; i < 5; i++) {
168  for (j = 0; j < 5; j++) {
169  solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
170  for (k = 0; k < 3; k++) {
171  color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
172  color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
173  color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
174  }
175  if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
176  color->yuv_convert[v][2][0] != 0) {
177  av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
178  }
179  v++;
180  }
181  }
182 }
183 
184 static const char * const color_modes[] = {"bt709", "fcc", "bt601", "smpte240m", "bt2020"};
185 
187 {
188  ColorMatrixContext *color = ctx->priv;
189 
190  if (color->dest == COLOR_MODE_NONE) {
191  av_log(ctx, AV_LOG_ERROR, "Unspecified destination color space\n");
192  return AVERROR(EINVAL);
193  }
194 
195  if (color->source == color->dest) {
196  av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
197  return AVERROR(EINVAL);
198  }
199 
201 
202  return 0;
203 }
204 
205 static int process_slice_uyvy422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
206 {
207  const ThreadData *td = arg;
208  const AVFrame *src = td->src;
209  AVFrame *dst = td->dst;
210  const int height = src->height;
211  const int width = src->width*2;
212  const int src_pitch = src->linesize[0];
213  const int dst_pitch = dst->linesize[0];
214  const int slice_start = (height * jobnr ) / nb_jobs;
215  const int slice_end = (height * (jobnr+1)) / nb_jobs;
216  const unsigned char *srcp = src->data[0] + slice_start * src_pitch;
217  unsigned char *dstp = dst->data[0] + slice_start * dst_pitch;
218  const int c2 = td->c2;
219  const int c3 = td->c3;
220  const int c4 = td->c4;
221  const int c5 = td->c5;
222  const int c6 = td->c6;
223  const int c7 = td->c7;
224  int x, y;
225 
226  for (y = slice_start; y < slice_end; y++) {
227  for (x = 0; x < width; x += 4) {
228  const int u = srcp[x + 0] - 128;
229  const int v = srcp[x + 2] - 128;
230  const int uvval = c2 * u + c3 * v + 1081344;
231  dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
232  dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
233  dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
234  dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
235  }
236  srcp += src_pitch;
237  dstp += dst_pitch;
238  }
239 
240  return 0;
241 }
242 
243 static int process_slice_yuv444p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
244 {
245  const ThreadData *td = arg;
246  const AVFrame *src = td->src;
247  AVFrame *dst = td->dst;
248  const int height = src->height;
249  const int width = src->width;
250  const int slice_start = (height * jobnr ) / nb_jobs;
251  const int slice_end = (height * (jobnr+1)) / nb_jobs;
252  const int src_pitchY = src->linesize[0];
253  const int src_pitchUV = src->linesize[1];
254  const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
255  const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
256  const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
257  const int dst_pitchY = dst->linesize[0];
258  const int dst_pitchUV = dst->linesize[1];
259  unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
260  unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
261  unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
262  const int c2 = td->c2;
263  const int c3 = td->c3;
264  const int c4 = td->c4;
265  const int c5 = td->c5;
266  const int c6 = td->c6;
267  const int c7 = td->c7;
268  int x, y;
269 
270  for (y = slice_start; y < slice_end; y++) {
271  for (x = 0; x < width; x++) {
272  const int u = srcpU[x] - 128;
273  const int v = srcpV[x] - 128;
274  const int uvval = c2 * u + c3 * v + 1081344;
275  dstpY[x] = CB((65536 * (srcpY[x] - 16) + uvval) >> 16);
276  dstpU[x] = CB((c4 * u + c5 * v + 8421376) >> 16);
277  dstpV[x] = CB((c6 * u + c7 * v + 8421376) >> 16);
278  }
279  srcpY += src_pitchY;
280  dstpY += dst_pitchY;
281  srcpU += src_pitchUV;
282  srcpV += src_pitchUV;
283  dstpU += dst_pitchUV;
284  dstpV += dst_pitchUV;
285  }
286 
287  return 0;
288 }
289 
290 static int process_slice_yuv422p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
291 {
292  const ThreadData *td = arg;
293  const AVFrame *src = td->src;
294  AVFrame *dst = td->dst;
295  const int height = src->height;
296  const int width = src->width;
297  const int slice_start = (height * jobnr ) / nb_jobs;
298  const int slice_end = (height * (jobnr+1)) / nb_jobs;
299  const int src_pitchY = src->linesize[0];
300  const int src_pitchUV = src->linesize[1];
301  const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
302  const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
303  const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
304  const int dst_pitchY = dst->linesize[0];
305  const int dst_pitchUV = dst->linesize[1];
306  unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
307  unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
308  unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
309  const int c2 = td->c2;
310  const int c3 = td->c3;
311  const int c4 = td->c4;
312  const int c5 = td->c5;
313  const int c6 = td->c6;
314  const int c7 = td->c7;
315  int x, y;
316 
317  for (y = slice_start; y < slice_end; y++) {
318  for (x = 0; x < width; x += 2) {
319  const int u = srcpU[x >> 1] - 128;
320  const int v = srcpV[x >> 1] - 128;
321  const int uvval = c2 * u + c3 * v + 1081344;
322  dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
323  dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
324  dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
325  dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
326  }
327  srcpY += src_pitchY;
328  dstpY += dst_pitchY;
329  srcpU += src_pitchUV;
330  srcpV += src_pitchUV;
331  dstpU += dst_pitchUV;
332  dstpV += dst_pitchUV;
333  }
334 
335  return 0;
336 }
337 
338 static int process_slice_yuv420p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
339 {
340  const ThreadData *td = arg;
341  const AVFrame *src = td->src;
342  AVFrame *dst = td->dst;
343  const int height = FFALIGN(src->height, 2) >> 1;
344  const int width = src->width;
345  const int slice_start = ((height * jobnr ) / nb_jobs) << 1;
346  const int slice_end = ((height * (jobnr+1)) / nb_jobs) << 1;
347  const int src_pitchY = src->linesize[0];
348  const int src_pitchUV = src->linesize[1];
349  const int dst_pitchY = dst->linesize[0];
350  const int dst_pitchUV = dst->linesize[1];
351  const unsigned char *srcpY = src->data[0] + src_pitchY * slice_start;
352  const unsigned char *srcpU = src->data[1] + src_pitchUV * (slice_start >> 1);
353  const unsigned char *srcpV = src->data[2] + src_pitchUV * (slice_start >> 1);
354  const unsigned char *srcpN = src->data[0] + src_pitchY * (slice_start + 1);
355  unsigned char *dstpU = dst->data[1] + dst_pitchUV * (slice_start >> 1);
356  unsigned char *dstpV = dst->data[2] + dst_pitchUV * (slice_start >> 1);
357  unsigned char *dstpY = dst->data[0] + dst_pitchY * slice_start;
358  unsigned char *dstpN = dst->data[0] + dst_pitchY * (slice_start + 1);
359  const int c2 = td->c2;
360  const int c3 = td->c3;
361  const int c4 = td->c4;
362  const int c5 = td->c5;
363  const int c6 = td->c6;
364  const int c7 = td->c7;
365  int x, y;
366 
367  for (y = slice_start; y < slice_end; y += 2) {
368  for (x = 0; x < width; x += 2) {
369  const int u = srcpU[x >> 1] - 128;
370  const int v = srcpV[x >> 1] - 128;
371  const int uvval = c2 * u + c3 * v + 1081344;
372  dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
373  dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
374  dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
375  dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
376  dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
377  dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
378  }
379  srcpY += src_pitchY << 1;
380  dstpY += dst_pitchY << 1;
381  srcpN += src_pitchY << 1;
382  dstpN += dst_pitchY << 1;
383  srcpU += src_pitchUV;
384  srcpV += src_pitchUV;
385  dstpU += dst_pitchUV;
386  dstpV += dst_pitchUV;
387  }
388 
389  return 0;
390 }
391 
393 {
394  AVFilterContext *ctx = inlink->dst;
395  ColorMatrixContext *color = ctx->priv;
396  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
397 
398  color->hsub = pix_desc->log2_chroma_w;
399  color->vsub = pix_desc->log2_chroma_h;
400 
401  av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
402  color_modes[color->source], color_modes[color->dest]);
403 
404  return 0;
405 }
406 
408 {
409  static const enum AVPixelFormat pix_fmts[] = {
415  };
417  if (!fmts_list)
418  return AVERROR(ENOMEM);
419  return ff_set_common_formats(ctx, fmts_list);
420 }
421 
423 {
424  AVFilterContext *ctx = link->dst;
425  ColorMatrixContext *color = ctx->priv;
426  AVFilterLink *outlink = ctx->outputs[0];
427  AVFrame *out;
428  ThreadData td = {0};
429 
430  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
431  if (!out) {
432  av_frame_free(&in);
433  return AVERROR(ENOMEM);
434  }
436 
437  if (color->source == COLOR_MODE_NONE) {
438  enum AVColorSpace cs = in->colorspace;
439  enum ColorMode source;
440 
441  switch(cs) {
442  case AVCOL_SPC_BT709 : source = COLOR_MODE_BT709 ; break;
443  case AVCOL_SPC_FCC : source = COLOR_MODE_FCC ; break;
445  case AVCOL_SPC_BT470BG : source = COLOR_MODE_BT601 ; break;
449  default :
450  av_log(ctx, AV_LOG_ERROR, "Input frame does not specify a supported colorspace, and none has been specified as source either\n");
451  av_frame_free(&out);
452  return AVERROR(EINVAL);
453  }
454  color->mode = source * 5 + color->dest;
455  } else
456  color->mode = color->source * 5 + color->dest;
457 
458  switch(color->dest) {
459  case COLOR_MODE_BT709 : out->colorspace = AVCOL_SPC_BT709 ; break;
460  case COLOR_MODE_FCC : out->colorspace = AVCOL_SPC_FCC ; break;
461  case COLOR_MODE_SMPTE240M: out->colorspace = AVCOL_SPC_SMPTE240M ; break;
462  case COLOR_MODE_BT601 : out->colorspace = AVCOL_SPC_BT470BG ; break;
463  case COLOR_MODE_BT2020 : out->colorspace = AVCOL_SPC_BT2020_NCL; break;
464  }
465 
466  td.src = in;
467  td.dst = out;
468  td.c2 = color->yuv_convert[color->mode][0][1];
469  td.c3 = color->yuv_convert[color->mode][0][2];
470  td.c4 = color->yuv_convert[color->mode][1][1];
471  td.c5 = color->yuv_convert[color->mode][1][2];
472  td.c6 = color->yuv_convert[color->mode][2][1];
473  td.c7 = color->yuv_convert[color->mode][2][2];
474 
475  if (in->format == AV_PIX_FMT_YUV444P)
477  FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
478  else if (in->format == AV_PIX_FMT_YUV422P)
480  FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
481  else if (in->format == AV_PIX_FMT_YUV420P)
483  FFMIN(in->height / 2, ff_filter_get_nb_threads(ctx)));
484  else
486  FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
487 
488  av_frame_free(&in);
489  return ff_filter_frame(outlink, out);
490 }
491 
492 static const AVFilterPad colormatrix_inputs[] = {
493  {
494  .name = "default",
495  .type = AVMEDIA_TYPE_VIDEO,
496  .config_props = config_input,
497  .filter_frame = filter_frame,
498  },
499  { NULL }
500 };
501 
503  {
504  .name = "default",
505  .type = AVMEDIA_TYPE_VIDEO,
506  },
507  { NULL }
508 };
509 
511  .name = "colormatrix",
512  .description = NULL_IF_CONFIG_SMALL("Convert color matrix."),
513  .priv_size = sizeof(ColorMatrixContext),
514  .init = init,
518  .priv_class = &colormatrix_class,
520 };
ff_vf_colormatrix
AVFilter ff_vf_colormatrix
Definition: vf_colormatrix.c:510
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ColorMatrixContext::vsub
int vsub
Definition: vf_colormatrix.c:67
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:583
imh
#define imh
Definition: vf_colormatrix.c:117
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1094
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
ColorMatrixContext::yuv_convert
int yuv_convert[25][3][3]
Definition: vf_colormatrix.c:63
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
md
#define md
Definition: vf_colormatrix.c:103
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:204
im
float im
Definition: fft.c:82
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:324
ThreadData::c3
int c3
Definition: vf_colormatrix.c:74
pixdesc.h
AVOption
AVOption.
Definition: opt.h:248
mh
#define mh
Definition: vf_colormatrix.c:107
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
solve_coefficients
static void solve_coefficients(double cm[3][3], double rgb[3][3], double yuv[3][3])
Definition: vf_colormatrix.c:135
float.h
COLOR_MODE_COUNT
@ COLOR_MODE_COUNT
Definition: vf_colormatrix.c:58
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
video.h
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1698
AVCOL_SPC_BT2020_CL
@ AVCOL_SPC_BT2020_CL
ITU-R BT2020 constant luminance system.
Definition: pixfmt.h:524
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
formats.h
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:518
rgb
Definition: rpzaenc.c:58
process_slice_uyvy422
static int process_slice_uyvy422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colormatrix.c:205
ColorMode
ColorMode
Definition: avf_showspectrum.c:50
ThreadData::c7
int c7
Definition: vf_colormatrix.c:78
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
ColorMatrixContext::dest
int dest
ColorMode.
Definition: vf_colormatrix.c:65
colormatrix_inputs
static const AVFilterPad colormatrix_inputs[]
Definition: vf_colormatrix.c:492
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
imd
#define imd
Definition: vf_colormatrix.c:113
width
#define width
ColorMatrixContext::mode
int mode
Definition: vf_colormatrix.c:66
process_slice_yuv420p
static int process_slice_yuv420p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colormatrix.c:338
mi
#define mi
Definition: vf_colormatrix.c:108
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
calc_coefficients
static void calc_coefficients(AVFilterContext *ctx)
Definition: vf_colormatrix.c:143
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2032
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
ctx
AVFormatContext * ctx
Definition: movenc.c:48
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_colormatrix.c:422
COLOR_MODE_FCC
@ COLOR_MODE_FCC
Definition: vf_colormatrix.c:54
mg
#define mg
Definition: vf_colormatrix.c:106
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
CB
#define CB(n)
Definition: vf_colormatrix.c:41
yuv_coeff_luma
static const double yuv_coeff_luma[5][3]
Definition: vf_colormatrix.c:43
inverse3x3
static void inverse3x3(double im[3][3], double m[3][3])
Definition: vf_colormatrix.c:120
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
arg
const char * arg
Definition: jacosubdec.c:66
ColorMatrixContext::interlaced
int interlaced
Definition: vf_colormatrix.c:64
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:56
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
me
#define me
Definition: vf_colormatrix.c:104
src
#define src
Definition: vp8dsp.c:255
process_slice_yuv422p
static int process_slice_yuv422p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colormatrix.c:290
ColorMatrixContext::hsub
int hsub
Definition: vf_colormatrix.c:67
COLOR_MODE_SMPTE240M
@ COLOR_MODE_SMPTE240M
Definition: vf_colormatrix.c:56
process_slice_yuv444p
static int process_slice_yuv444p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colormatrix.c:243
ThreadData::c6
int c6
Definition: vf_colormatrix.c:77
source
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
Definition: filter_design.txt:255
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
ThreadData::c2
int c2
Definition: vf_colormatrix.c:73
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
img
#define img
Definition: vf_colormatrix.c:116
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_colormatrix.c:392
mb
#define mb
Definition: vf_colormatrix.c:101
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colormatrix)
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
COLOR_MODE_BT709
@ COLOR_MODE_BT709
Definition: vf_colormatrix.c:53
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:520
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_colormatrix.c:186
i
int i
Definition: input.c:407
imc
#define imc
Definition: vf_colormatrix.c:112
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:523
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:512
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
mf
#define mf
Definition: vf_colormatrix.c:105
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AVFilter
Filter definition.
Definition: avfilter.h:145
color_modes
static const char *const color_modes[]
Definition: vf_colormatrix.c:184
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_colormatrix.c:407
COLOR_MODE_BT601
@ COLOR_MODE_BT601
Definition: vf_colormatrix.c:55
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
colormatrix_outputs
static const AVFilterPad colormatrix_outputs[]
Definition: vf_colormatrix.c:502
c2
static const uint64_t c2
Definition: murmur3.c:52
ima
#define ima
Definition: vf_colormatrix.c:110
colormatrix_options
static const AVOption colormatrix_options[]
Definition: vf_colormatrix.c:84
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:517
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
ThreadData::c4
int c4
Definition: vf_colormatrix.c:75
cm
#define cm
Definition: dvbsubdec.c:37
imf
#define imf
Definition: vf_colormatrix.c:115
COLOR_MODE_NONE
@ COLOR_MODE_NONE
Definition: vf_colormatrix.c:52
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ColorMatrixContext::source
int source
Definition: vf_colormatrix.c:65
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
ColorMatrixContext
Definition: vf_colormatrix.c:61
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
ime
#define ime
Definition: vf_colormatrix.c:114
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ThreadData::c5
int c5
Definition: vf_colormatrix.c:76
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
COLOR_MODE_BT2020
@ COLOR_MODE_BT2020
Definition: vf_colormatrix.c:57
imb
#define imb
Definition: vf_colormatrix.c:111
ThreadData::src
const AVFrame * src
Definition: vf_colormatrix.c:72
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:355
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ma
#define ma
Definition: vf_colormatrix.c:100
FLAGS
#define FLAGS
Definition: vf_colormatrix.c:82
NS
#define NS(n)
Definition: vf_colormatrix.c:40
avstring.h
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
mc
#define mc
Definition: vf_colormatrix.c:102
OFFSET
#define OFFSET(x)
Definition: vf_colormatrix.c:81
imi
#define imi
Definition: vf_colormatrix.c:118