FFmpeg
vf_colorconstancy.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Mina Sami
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Color Constancy filter
24  *
25  * @see http://colorconstancy.com/
26  *
27  * @cite
28  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
29  */
30 
31 #include "libavutil/imgutils.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 #include <math.h>
41 
42 #define GREY_EDGE "greyedge"
43 
44 #define SQRT3 1.73205080757
45 
46 #define NUM_PLANES 3
47 #define MAX_DIFF_ORD 2
48 #define MAX_META_DATA 4
49 #define MAX_DATA 4
50 
51 #define INDEX_TEMP 0
52 #define INDEX_DX 1
53 #define INDEX_DY 2
54 #define INDEX_DXY 3
55 #define INDEX_NORM INDEX_DX
56 #define INDEX_SRC 0
57 #define INDEX_DST 1
58 #define INDEX_ORD 2
59 #define INDEX_DIR 3
60 #define DIR_X 0
61 #define DIR_Y 1
62 
63 /**
64  * Used for passing data between threads.
65  */
66 typedef struct ThreadData {
67  AVFrame *in, *out;
70 } ThreadData;
71 
72 /**
73  * Common struct for all algorithms contexts.
74  */
75 typedef struct ColorConstancyContext {
76  const AVClass *class;
77 
78  int difford;
79  int minknorm; /**< @minknorm = 0 : getMax instead */
80  double sigma;
81 
83  int planeheight[4];
84  int planewidth[4];
85 
87  double *gauss[MAX_DIFF_ORD+1];
88 
89  double white[NUM_PLANES];
91 
92 #define OFFSET(x) offsetof(ColorConstancyContext, x)
93 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
94 
95 #define GINDX(s, i) ( (i) - ((s) >> 2) )
96 
97 /**
98  * Sets gauss filters used for calculating gauss derivatives. Filter size
99  * depends on sigma which is a user option hence we calculate these
100  * filters each time. Also each higher order depends on lower ones. Sigma
101  * can be zero only at difford = 0, then we only convert data to double
102  * instead.
103  *
104  * @param ctx the filter context.
105  *
106  * @return 0 in case of success, a negative value corresponding to an
107  * AVERROR code in case of failure.
108  */
110 {
111  ColorConstancyContext *s = ctx->priv;
112  int filtersize = s->filtersize;
113  int difford = s->difford;
114  double sigma = s->sigma;
115  double sum1, sum2;
116  int i;
117 
118  for (i = 0; i <= difford; ++i) {
119  s->gauss[i] = av_mallocz_array(filtersize, sizeof(*s->gauss[i]));
120  if (!s->gauss[i]) {
121  for (; i >= 0; --i) {
122  av_freep(&s->gauss[i]);
123  }
124  return AVERROR(ENOMEM);
125  }
126  }
127 
128  // Order 0
129  av_log(ctx, AV_LOG_TRACE, "Setting 0-d gauss with filtersize = %d.\n", filtersize);
130  sum1 = 0.0;
131  if (!sigma) {
132  s->gauss[0][0] = 1; // Copying data to double instead of convolution
133  } else {
134  for (i = 0; i < filtersize; ++i) {
135  s->gauss[0][i] = exp(- pow(GINDX(filtersize, i), 2.) / (2 * sigma * sigma)) / ( sqrt(2 * M_PI) * sigma );
136  sum1 += s->gauss[0][i];
137  }
138  for (i = 0; i < filtersize; ++i) {
139  s->gauss[0][i] /= sum1;
140  }
141  }
142  // Order 1
143  if (difford > 0) {
144  av_log(ctx, AV_LOG_TRACE, "Setting 1-d gauss with filtersize = %d.\n", filtersize);
145  sum1 = 0.0;
146  for (i = 0; i < filtersize; ++i) {
147  s->gauss[1][i] = - (GINDX(filtersize, i) / pow(sigma, 2)) * s->gauss[0][i];
148  sum1 += s->gauss[1][i] * GINDX(filtersize, i);
149  }
150 
151  for (i = 0; i < filtersize; ++i) {
152  s->gauss[1][i] /= sum1;
153  }
154 
155  // Order 2
156  if (difford > 1) {
157  av_log(ctx, AV_LOG_TRACE, "Setting 2-d gauss with filtersize = %d.\n", filtersize);
158  sum1 = 0.0;
159  for (i = 0; i < filtersize; ++i) {
160  s->gauss[2][i] = ( pow(GINDX(filtersize, i), 2) / pow(sigma, 4) - 1/pow(sigma, 2) )
161  * s->gauss[0][i];
162  sum1 += s->gauss[2][i];
163  }
164 
165  sum2 = 0.0;
166  for (i = 0; i < filtersize; ++i) {
167  s->gauss[2][i] -= sum1 / (filtersize);
168  sum2 += (0.5 * GINDX(filtersize, i) * GINDX(filtersize, i) * s->gauss[2][i]);
169  }
170  for (i = 0; i < filtersize ; ++i) {
171  s->gauss[2][i] /= sum2;
172  }
173  }
174  }
175  return 0;
176 }
177 
178 /**
179  * Frees up buffers used by grey edge for storing derivatives final
180  * and intermidiate results. Number of buffers and number of planes
181  * for last buffer are given so it can be safely called at allocation
182  * failure instances.
183  *
184  * @param td holds the buffers.
185  * @param nb_buff number of buffers to be freed.
186  * @param nb_planes number of planes for last buffer to be freed.
187  */
188 static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
189 {
190  int b, p;
191 
192  for (b = 0; b < nb_buff; ++b) {
193  for (p = 0; p < NUM_PLANES; ++p) {
194  av_freep(&td->data[b][p]);
195  }
196  }
197  // Final buffer may not be fully allocated at fail cases
198  for (p = 0; p < nb_planes; ++p) {
199  av_freep(&td->data[b][p]);
200  }
201 }
202 
203 /**
204  * Allocates buffers used by grey edge for storing derivatives final
205  * and intermidiate results.
206  *
207  * @param ctx the filter context.
208  * @param td holds the buffers.
209  *
210  * @return 0 in case of success, a negative value corresponding to an
211  * AVERROR code in case of failure.
212  */
214 {
215  ColorConstancyContext *s = ctx->priv;
216  int nb_buff = s->difford + 1;
217  int b, p;
218 
219  av_log(ctx, AV_LOG_TRACE, "Allocating %d buffer(s) for grey edge.\n", nb_buff);
220  for (b = 0; b <= nb_buff; ++b) { // We need difford + 1 buffers
221  for (p = 0; p < NUM_PLANES; ++p) {
222  td->data[b][p] = av_mallocz_array(s->planeheight[p] * s->planewidth[p], sizeof(*td->data[b][p]));
223  if (!td->data[b][p]) {
225  return AVERROR(ENOMEM);
226  }
227  }
228  }
229  return 0;
230 }
231 
232 #define CLAMP(x, mx) av_clip((x), 0, (mx-1))
233 #define INDX2D(r, c, w) ( (r) * (w) + (c) )
234 #define GAUSS(s, sr, sc, sls, sh, sw, g) ( (s)[ INDX2D(CLAMP((sr), (sh)), CLAMP((sc), (sw)), (sls)) ] * (g) )
235 
236 /**
237  * Slice calculation of gaussian derivatives. Applies 1-D gaussian derivative filter
238  * either horizontally or vertically according to meta data given in thread data.
239  * When convoluting horizontally source is always the in frame withing thread data
240  * while when convoluting vertically source is a buffer.
241  *
242  * @param ctx the filter context.
243  * @param arg data to be passed between threads.
244  * @param jobnr current job nubmer.
245  * @param nb_jobs total number of jobs.
246  *
247  * @return 0.
248  */
249 static int slice_get_derivative(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
250 {
251  ColorConstancyContext *s = ctx->priv;
252  ThreadData *td = arg;
253  AVFrame *in = td->in;
254  const int ord = td->meta_data[INDEX_ORD];
255  const int dir = td->meta_data[INDEX_DIR];
256  const int src_index = td->meta_data[INDEX_SRC];
257  const int dst_index = td->meta_data[INDEX_DST];
258  const int filtersize = s->filtersize;
259  const double *gauss = s->gauss[ord];
260  int plane;
261 
262  for (plane = 0; plane < NUM_PLANES; ++plane) {
263  const int height = s->planeheight[plane];
264  const int width = s->planewidth[plane];
265  const int in_linesize = in->linesize[plane];
266  double *dst = td->data[dst_index][plane];
267  int slice_start, slice_end;
268  int r, c, g;
269 
270  if (dir == DIR_X) {
271  /** Applying gauss horizontally along each row */
272  const uint8_t *src = in->data[plane];
273  slice_start = (height * jobnr ) / nb_jobs;
274  slice_end = (height * (jobnr + 1)) / nb_jobs;
275 
276  for (r = slice_start; r < slice_end; ++r) {
277  for (c = 0; c < width; ++c) {
278  dst[INDX2D(r, c, width)] = 0;
279  for (g = 0; g < filtersize; ++g) {
280  dst[INDX2D(r, c, width)] += GAUSS(src, r, c + GINDX(filtersize, g),
281  in_linesize, height, width, gauss[g]);
282  }
283  }
284  }
285  } else {
286  /** Applying gauss vertically along each column */
287  const double *src = td->data[src_index][plane];
288  slice_start = (width * jobnr ) / nb_jobs;
289  slice_end = (width * (jobnr + 1)) / nb_jobs;
290 
291  for (c = slice_start; c < slice_end; ++c) {
292  for (r = 0; r < height; ++r) {
293  dst[INDX2D(r, c, width)] = 0;
294  for (g = 0; g < filtersize; ++g) {
295  dst[INDX2D(r, c, width)] += GAUSS(src, r + GINDX(filtersize, g), c,
296  width, height, width, gauss[g]);
297  }
298  }
299  }
300  }
301 
302  }
303  return 0;
304 }
305 
306 /**
307  * Slice Frobius normalization of gaussian derivatives. Only called for difford values of
308  * 1 or 2.
309  *
310  * @param ctx the filter context.
311  * @param arg data to be passed between threads.
312  * @param jobnr current job nubmer.
313  * @param nb_jobs total number of jobs.
314  *
315  * @return 0.
316  */
317 static int slice_normalize(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
318 {
319  ColorConstancyContext *s = ctx->priv;
320  ThreadData *td = arg;
321  const int difford = s->difford;
322  int plane;
323 
324  for (plane = 0; plane < NUM_PLANES; ++plane) {
325  const int height = s->planeheight[plane];
326  const int width = s->planewidth[plane];
327  const int64_t numpixels = width * (int64_t)height;
328  const int slice_start = (numpixels * jobnr ) / nb_jobs;
329  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
330  const double *dx = td->data[INDEX_DX][plane];
331  const double *dy = td->data[INDEX_DY][plane];
332  double *norm = td->data[INDEX_NORM][plane];
333  int i;
334 
335  if (difford == 1) {
336  for (i = slice_start; i < slice_end; ++i) {
337  norm[i] = sqrt( pow(dx[i], 2) + pow(dy[i], 2));
338  }
339  } else {
340  const double *dxy = td->data[INDEX_DXY][plane];
341  for (i = slice_start; i < slice_end; ++i) {
342  norm[i] = sqrt( pow(dx[i], 2) + 4 * pow(dxy[i], 2) + pow(dy[i], 2) );
343  }
344  }
345  }
346 
347  return 0;
348 }
349 
350 /**
351  * Utility function for setting up differentiation data/metadata.
352  *
353  * @param ctx the filter context.
354  * @param td to be used for passing data between threads.
355  * @param ord ord of differentiation.
356  * @param dir direction of differentiation.
357  * @param src index of source used for differentiation.
358  * @param dst index destination used for saving differentiation result.
359  * @param dim maximum dimension in current direction.
360  * @param nb_threads number of threads to use.
361  */
362 static void av_always_inline
364  int src, int dst, int dim, int nb_threads) {
365  td->meta_data[INDEX_ORD] = ord;
366  td->meta_data[INDEX_DIR] = dir;
367  td->meta_data[INDEX_SRC] = src;
368  td->meta_data[INDEX_DST] = dst;
369  ctx->internal->execute(ctx, slice_get_derivative, td, NULL, FFMIN(dim, nb_threads));
370 }
371 
372 /**
373  * Main control function for calculating gaussian derivatives.
374  *
375  * @param ctx the filter context.
376  * @param td holds the buffers used for storing results.
377  *
378  * @return 0 in case of success, a negative value corresponding to an
379  * AVERROR code in case of failure.
380  */
382 {
383  ColorConstancyContext *s = ctx->priv;
384  int nb_threads = s->nb_threads;
385  int height = s->planeheight[1];
386  int width = s->planewidth[1];
387 
388  switch(s->difford) {
389  case 0:
390  if (!s->sigma) { // Only copy once
391  get_deriv(ctx, td, 0, DIR_X, 0 , INDEX_NORM, height, nb_threads);
392  } else {
393  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
394  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_NORM, width , nb_threads);
395  // save to INDEX_NORM because this will not be normalied and
396  // end gry edge filter expects result to be found in INDEX_NORM
397  }
398  return 0;
399 
400  case 1:
401  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
402  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
403 
404  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
405  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
406  return 0;
407 
408  case 2:
409  get_deriv(ctx, td, 2, DIR_X, 0, INDEX_TEMP, height, nb_threads);
410  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
411 
412  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
413  get_deriv(ctx, td, 2, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
414 
415  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
416  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DXY, width , nb_threads);
417  return 0;
418 
419  default:
420  av_log(ctx, AV_LOG_ERROR, "Unsupported difford value: %d.\n", s->difford);
421  return AVERROR(EINVAL);
422  }
423 
424 }
425 
426 /**
427  * Slice function for grey edge algorithm that does partial summing/maximizing
428  * of gaussian derivatives.
429  *
430  * @param ctx the filter context.
431  * @param arg data to be passed between threads.
432  * @param jobnr current job nubmer.
433  * @param nb_jobs total number of jobs.
434  *
435  * @return 0.
436  */
437 static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
438 {
439  ColorConstancyContext *s = ctx->priv;
440  ThreadData *td = arg;
441  AVFrame *in = td->in;
442  int minknorm = s->minknorm;
443  const uint8_t thresh = 255;
444  int plane;
445 
446  for (plane = 0; plane < NUM_PLANES; ++plane) {
447  const int height = s->planeheight[plane];
448  const int width = s->planewidth[plane];
449  const int in_linesize = in->linesize[plane];
450  const int slice_start = (height * jobnr) / nb_jobs;
451  const int slice_end = (height * (jobnr+1)) / nb_jobs;
452  const uint8_t *img_data = in->data[plane];
453  const double *src = td->data[INDEX_NORM][plane];
454  double *dst = td->data[INDEX_DST][plane];
455  int r, c;
456 
457  dst[jobnr] = 0;
458  if (!minknorm) {
459  for (r = slice_start; r < slice_end; ++r) {
460  for (c = 0; c < width; ++c) {
461  dst[jobnr] = FFMAX( dst[jobnr], fabs(src[INDX2D(r, c, width)])
462  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
463  }
464  }
465  } else {
466  for (r = slice_start; r < slice_end; ++r) {
467  for (c = 0; c < width; ++c) {
468  dst[jobnr] += ( pow( fabs(src[INDX2D(r, c, width)] / 255.), minknorm)
469  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
470  }
471  }
472  }
473  }
474  return 0;
475 }
476 
477 /**
478  * Main control function for grey edge algorithm.
479  *
480  * @param ctx the filter context.
481  * @param in frame to perfrom grey edge on.
482  *
483  * @return 0 in case of success, a negative value corresponding to an
484  * AVERROR code in case of failure.
485  */
487 {
488  ColorConstancyContext *s = ctx->priv;
489  ThreadData td;
490  int minknorm = s->minknorm;
491  int difford = s->difford;
492  double *white = s->white;
493  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
494  int plane, job, ret;
495 
496  td.in = in;
498  if (ret) {
499  return ret;
500  }
501  get_derivative(ctx, &td);
502  if (difford > 0) {
503  ctx->internal->execute(ctx, slice_normalize, &td, NULL, nb_jobs);
504  }
505 
506  ctx->internal->execute(ctx, filter_slice_grey_edge, &td, NULL, nb_jobs);
507  if (!minknorm) {
508  for (plane = 0; plane < NUM_PLANES; ++plane) {
509  white[plane] = 0; // All values are absolute
510  for (job = 0; job < nb_jobs; ++job) {
511  white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
512  }
513  }
514  } else {
515  for (plane = 0; plane < NUM_PLANES; ++plane) {
516  white[plane] = 0;
517  for (job = 0; job < nb_jobs; ++job) {
518  white[plane] += td.data[INDEX_DST][plane][job];
519  }
520  white[plane] = pow(white[plane], 1./minknorm);
521  }
522  }
523 
524  cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
525  return 0;
526 }
527 
528 /**
529  * Normalizes estimated illumination since only illumination vector
530  * direction is required for color constancy.
531  *
532  * @param light the estimated illumination to be normalized in place
533  */
534 static void normalize_light(double *light)
535 {
536  double abs_val = pow( pow(light[0], 2.0) + pow(light[1], 2.0) + pow(light[2], 2.0), 0.5);
537  int plane;
538 
539  // TODO: check if setting to 1.0 when estimated = 0.0 is the best thing to do
540 
541  if (!abs_val) {
542  for (plane = 0; plane < NUM_PLANES; ++plane) {
543  light[plane] = 1.0;
544  }
545  } else {
546  for (plane = 0; plane < NUM_PLANES; ++plane) {
547  light[plane] = (light[plane] / abs_val);
548  if (!light[plane]) { // to avoid division by zero when correcting
549  light[plane] = 1.0;
550  }
551  }
552  }
553 }
554 
555 /**
556  * Redirects to corresponding algorithm estimation function and performs normalization
557  * after estimation.
558  *
559  * @param ctx the filter context.
560  * @param in frame to perfrom estimation on.
561  *
562  * @return 0 in case of success, a negative value corresponding to an
563  * AVERROR code in case of failure.
564  */
566 {
567  ColorConstancyContext *s = ctx->priv;
568  int ret;
569 
571 
572  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
573  s->white[0], s->white[1], s->white[2]);
574  normalize_light(s->white);
575  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
576  s->white[0], s->white[1], s->white[2]);
577 
578  return ret;
579 }
580 
581 /**
582  * Performs simple correction via diagonal transformation model.
583  *
584  * @param ctx the filter context.
585  * @param arg data to be passed between threads.
586  * @param jobnr current job nubmer.
587  * @param nb_jobs total number of jobs.
588  *
589  * @return 0.
590  */
591 static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
592 {
593  ColorConstancyContext *s = ctx->priv;
594  ThreadData *td = arg;
595  AVFrame *in = td->in;
596  AVFrame *out = td->out;
597  int plane;
598 
599  for (plane = 0; plane < NUM_PLANES; ++plane) {
600  const int height = s->planeheight[plane];
601  const int width = s->planewidth[plane];
602  const int64_t numpixels = width * (int64_t)height;
603  const int slice_start = (numpixels * jobnr) / nb_jobs;
604  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
605  const uint8_t *src = in->data[plane];
606  uint8_t *dst = out->data[plane];
607  double temp;
608  unsigned i;
609 
610  for (i = slice_start; i < slice_end; ++i) {
611  temp = src[i] / (s->white[plane] * SQRT3);
612  dst[i] = av_clip_uint8((int)(temp + 0.5));
613  }
614  }
615  return 0;
616 }
617 
618 /**
619  * Main control function for correcting scene illumination based on
620  * estimated illumination.
621  *
622  * @param ctx the filter context.
623  * @param in holds frame to correct
624  * @param out holds corrected frame
625  */
627 {
628  ColorConstancyContext *s = ctx->priv;
629  ThreadData td;
630  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
631 
632  td.in = in;
633  td.out = out;
634  ctx->internal->execute(ctx, diagonal_transformation, &td, NULL, nb_jobs);
635 }
636 
638 {
639  static const enum AVPixelFormat pix_fmts[] = {
640  // TODO: support more formats
641  // FIXME: error when saving to .jpg
644  };
645 
647 }
648 
650 {
651  AVFilterContext *ctx = inlink->dst;
652  ColorConstancyContext *s = ctx->priv;
654  const double break_off_sigma = 3.0;
655  double sigma = s->sigma;
656  int ret;
657 
658  if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
659  av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
660  return AVERROR(EINVAL);
661  }
662 
663  s->filtersize = 2 * floor(break_off_sigma * sigma + 0.5) + 1;
664  if (ret=set_gauss(ctx)) {
665  return ret;
666  }
667 
668  s->nb_threads = ff_filter_get_nb_threads(ctx);
669  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
670  s->planewidth[0] = s->planewidth[3] = inlink->w;
671  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
672  s->planeheight[0] = s->planeheight[3] = inlink->h;
673 
674  return 0;
675 }
676 
678 {
679  AVFilterContext *ctx = inlink->dst;
680  AVFilterLink *outlink = ctx->outputs[0];
681  AVFrame *out;
682  int ret;
683  int direct = 0;
684 
686  if (ret) {
687  av_frame_free(&in);
688  return ret;
689  }
690 
691  if (av_frame_is_writable(in)) {
692  direct = 1;
693  out = in;
694  } else {
695  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
696  if (!out) {
697  av_frame_free(&in);
698  return AVERROR(ENOMEM);
699  }
701  }
703 
704  if (!direct)
705  av_frame_free(&in);
706 
707  return ff_filter_frame(outlink, out);
708 }
709 
711 {
712  ColorConstancyContext *s = ctx->priv;
713  int difford = s->difford;
714  int i;
715 
716  for (i = 0; i <= difford; ++i) {
717  av_freep(&s->gauss[i]);
718  }
719 }
720 
722  {
723  .name = "default",
724  .type = AVMEDIA_TYPE_VIDEO,
725  .config_props = config_props,
726  .filter_frame = filter_frame,
727  },
728  { NULL }
729 };
730 
732  {
733  .name = "default",
734  .type = AVMEDIA_TYPE_VIDEO,
735  },
736  { NULL }
737 };
738 
739 #if CONFIG_GREYEDGE_FILTER
740 
741 static const AVOption greyedge_options[] = {
742  { "difford", "set differentiation order", OFFSET(difford), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS },
743  { "minknorm", "set Minkowski norm", OFFSET(minknorm), AV_OPT_TYPE_INT, {.i64=1}, 0, 20, FLAGS },
744  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0, 1024.0, FLAGS },
745  { NULL }
746 };
747 
748 AVFILTER_DEFINE_CLASS(greyedge);
749 
751  .name = GREY_EDGE,
752  .description = NULL_IF_CONFIG_SMALL("Estimates scene illumination by grey edge assumption."),
753  .priv_size = sizeof(ColorConstancyContext),
754  .priv_class = &greyedge_class,
756  .uninit = uninit,
760 };
761 
762 #endif /* CONFIG_GREY_EDGE_FILTER */
FLAGS
#define FLAGS
Definition: vf_colorconstancy.c:93
INDEX_ORD
#define INDEX_ORD
Definition: vf_colorconstancy.c:58
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
ColorConstancyContext
Common struct for all algorithms contexts.
Definition: vf_colorconstancy.c:75
ColorConstancyContext::difford
int difford
Definition: vf_colorconstancy.c:78
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:300
out
FILE * out
Definition: movenc.c:54
filter_slice_grey_edge
static int filter_slice_grey_edge(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice function for grey edge algorithm that does partial summing/maximizing of gaussian derivatives.
Definition: vf_colorconstancy.c:437
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_colorconstancy.c:710
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
get_derivative
static int get_derivative(AVFilterContext *ctx, ThreadData *td)
Main control function for calculating gaussian derivatives.
Definition: vf_colorconstancy.c:381
GAUSS
#define GAUSS(s, sr, sc, sls, sh, sw, g)
Definition: vf_colorconstancy.c:234
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
pixdesc.h
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
SQRT3
#define SQRT3
Definition: vf_colorconstancy.c:44
OFFSET
#define OFFSET(x)
Definition: vf_colorconstancy.c:92
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
ColorConstancyContext::white
double white[NUM_PLANES]
Definition: vf_colorconstancy.c:89
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
slice_get_derivative
static int slice_get_derivative(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice calculation of gaussian derivatives.
Definition: vf_colorconstancy.c:249
ColorConstancyContext::planewidth
int planewidth[4]
Definition: vf_colorconstancy.c:84
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:494
video.h
normalize_light
static void normalize_light(double *light)
Normalizes estimated illumination since only illumination vector direction is required for color cons...
Definition: vf_colorconstancy.c:534
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1788
chromatic_adaptation
static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
Main control function for correcting scene illumination based on estimated illumination.
Definition: vf_colorconstancy.c:626
MAX_DATA
#define MAX_DATA
Definition: vf_colorconstancy.c:49
GREY_EDGE
#define GREY_EDGE
Definition: vf_colorconstancy.c:42
ColorConstancyContext::nb_threads
int nb_threads
Definition: vf_colorconstancy.c:82
filter_grey_edge
static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
Main control function for grey edge algorithm.
Definition: vf_colorconstancy.c:486
formats.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_colorconstancy.c:677
INDEX_NORM
#define INDEX_NORM
Definition: vf_colorconstancy.c:55
get_deriv
static void av_always_inline get_deriv(AVFilterContext *ctx, ThreadData *td, int ord, int dir, int src, int dst, int dim, int nb_threads)
Utility function for setting up differentiation data/metadata.
Definition: vf_colorconstancy.c:363
ff_vf_greyedge
AVFilter ff_vf_greyedge
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:605
INDEX_SRC
#define INDEX_SRC
Definition: vf_colorconstancy.c:56
DIR_X
#define DIR_X
Definition: vf_colorconstancy.c:60
width
#define width
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_colorconstancy.c:649
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
g
const char * g
Definition: vf_curves.c:115
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2040
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ColorConstancyContext::minknorm
int minknorm
@minknorm = 0 : getMax instead
Definition: vf_colorconstancy.c:79
arg
const char * arg
Definition: jacosubdec.c:66
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
ColorConstancyContext::filtersize
int filtersize
Definition: vf_colorconstancy.c:86
GINDX
#define GINDX(s, i)
Definition: vf_colorconstancy.c:95
src
#define src
Definition: vp8dsp.c:254
ColorConstancyContext::planeheight
int planeheight[4]
Definition: vf_colorconstancy.c:83
setup_derivative_buffers
static int setup_derivative_buffers(AVFilterContext *ctx, ThreadData *td)
Allocates buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:213
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
exp
int8_t exp
Definition: eval.c:72
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
INDX2D
#define INDX2D(r, c, w)
Definition: vf_colorconstancy.c:233
cleanup_derivative_buffers
static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
Frees up buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:188
desc
const char * desc
Definition: nvenc.c:79
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
INDEX_TEMP
#define INDEX_TEMP
Definition: vf_colorconstancy.c:51
set_gauss
static int set_gauss(AVFilterContext *ctx)
Sets gauss filters used for calculating gauss derivatives.
Definition: vf_colorconstancy.c:109
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:314
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
illumination_estimation
static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
Redirects to corresponding algorithm estimation function and performs normalization after estimation.
Definition: vf_colorconstancy.c:565
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:784
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ThreadData::data
double * data[MAX_DATA][NUM_PLANES]
Definition: vf_colorconstancy.c:69
diagonal_transformation
static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Performs simple correction via diagonal transformation model.
Definition: vf_colorconstancy.c:591
uint8_t
uint8_t
Definition: audio_convert.c:194
INDEX_DX
#define INDEX_DX
Definition: vf_colorconstancy.c:52
MAX_DIFF_ORD
#define MAX_DIFF_ORD
Definition: vf_colorconstancy.c:47
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
NUM_PLANES
#define NUM_PLANES
Definition: vf_colorconstancy.c:46
AVFilter
Filter definition.
Definition: avfilter.h:144
dim
int dim
Definition: vorbis_enc_data.h:451
ret
ret
Definition: filter_design.txt:187
colorconstancy_outputs
static const AVFilterPad colorconstancy_outputs[]
Definition: vf_colorconstancy.c:731
DIR_Y
#define DIR_Y
Definition: vf_colorconstancy.c:61
INDEX_DY
#define INDEX_DY
Definition: vf_colorconstancy.c:53
ThreadData::meta_data
int meta_data[MAX_META_DATA]
Definition: vf_colorconstancy.c:68
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
slice_normalize
static int slice_normalize(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice Frobius normalization of gaussian derivatives.
Definition: vf_colorconstancy.c:317
temp
else temp
Definition: vf_mcdeint.c:256
MAX_META_DATA
#define MAX_META_DATA
Definition: vf_colorconstancy.c:48
colorconstancy_inputs
static const AVFilterPad colorconstancy_inputs[]
Definition: vf_colorconstancy.c:721
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1083
ColorConstancyContext::gauss
double * gauss[MAX_DIFF_ORD+1]
Definition: vf_colorconstancy.c:87
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
INDEX_DIR
#define INDEX_DIR
Definition: vf_colorconstancy.c:59
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
INDEX_DXY
#define INDEX_DXY
Definition: vf_colorconstancy.c:54
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_colorconstancy.c:637
ColorConstancyContext::sigma
double sigma
Definition: vf_colorconstancy.c:80
INDEX_DST
#define INDEX_DST
Definition: vf_colorconstancy.c:57