FFmpeg
vf_blend.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/imgutils.h"
22 #include "libavutil/intfloat.h"
23 #include "libavutil/eval.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixfmt.h"
26 #include "avfilter.h"
27 #include "formats.h"
28 #include "framesync.h"
29 #include "internal.h"
30 #include "video.h"
31 #include "blend.h"
32 
33 #define TOP 0
34 #define BOTTOM 1
35 
36 typedef struct BlendContext {
37  const AVClass *class;
39  int hsub, vsub; ///< chroma subsampling values
40  int nb_planes;
41  char *all_expr;
43  double all_opacity;
44 
45  int depth;
47  int tblend;
48  AVFrame *prev_frame; /* only used with tblend */
49 } BlendContext;
50 
51 static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
53 
54 typedef struct ThreadData {
55  const AVFrame *top, *bottom;
58  int plane;
59  int w, h;
61 } ThreadData;
62 
63 #define COMMON_OPTIONS \
64  { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
65  { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
66  { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
67  { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
68  { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},\
69  { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },\
70  { "addition128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, "mode" },\
71  { "grainmerge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, "mode" },\
72  { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },\
73  { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },\
74  { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },\
75  { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },\
76  { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },\
77  { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, "mode" },\
78  { "grainextract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, "mode" },\
79  { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },\
80  { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },\
81  { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },\
82  { "extremity", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXTREMITY}, 0, 0, FLAGS, "mode" },\
83  { "freeze", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_FREEZE}, 0, 0, FLAGS, "mode" },\
84  { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, "mode" },\
85  { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },\
86  { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, "mode" },\
87  { "heat", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HEAT}, 0, 0, FLAGS, "mode" },\
88  { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },\
89  { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, "mode" },\
90  { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },\
91  { "multiply128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY128},0, 0, FLAGS, "mode" },\
92  { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },\
93  { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },\
94  { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },\
95  { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },\
96  { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },\
97  { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },\
98  { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },\
99  { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },\
100  { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },\
101  { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },\
102  { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },\
103  { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },\
104  { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },\
105  { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },\
106  { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },\
107  { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },\
108  { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },\
109  { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
110  { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
111  { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
112  { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
113  { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS}
114 
115 #define OFFSET(x) offsetof(BlendContext, x)
116 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
117 
118 static const AVOption blend_options[] = {
120  { NULL }
121 };
122 
124 
125 #define COPY(src, depth) \
126 static void blend_copy ## src##_##depth(const uint8_t *top, ptrdiff_t top_linesize, \
127  const uint8_t *bottom, ptrdiff_t bottom_linesize,\
128  uint8_t *dst, ptrdiff_t dst_linesize, \
129  ptrdiff_t width, ptrdiff_t height, \
130  FilterParams *param, double *values, int starty) \
131 { \
132  av_image_copy_plane(dst, dst_linesize, src, src ## _linesize, \
133  width * depth / 8, height); \
134 }
135 
136 COPY(top, 8)
137 COPY(bottom, 8)
138 
139 COPY(top, 16)
140 COPY(bottom, 16)
141 
142 COPY(top, 32)
143 COPY(bottom, 32)
144 
145 #undef COPY
146 
147 static void blend_normal_8bit(const uint8_t *top, ptrdiff_t top_linesize,
148  const uint8_t *bottom, ptrdiff_t bottom_linesize,
149  uint8_t *dst, ptrdiff_t dst_linesize,
150  ptrdiff_t width, ptrdiff_t height,
151  FilterParams *param, double *values, int starty)
152 {
153  const double opacity = param->opacity;
154  int i, j;
155 
156  for (i = 0; i < height; i++) {
157  for (j = 0; j < width; j++) {
158  dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
159  }
160  dst += dst_linesize;
161  top += top_linesize;
162  bottom += bottom_linesize;
163  }
164 }
165 
166 static void blend_normal_16bit(const uint8_t *_top, ptrdiff_t top_linesize,
167  const uint8_t *_bottom, ptrdiff_t bottom_linesize,
168  uint8_t *_dst, ptrdiff_t dst_linesize,
169  ptrdiff_t width, ptrdiff_t height,
170  FilterParams *param, double *values, int starty)
171 {
172  const uint16_t *top = (uint16_t*)_top;
173  const uint16_t *bottom = (uint16_t*)_bottom;
174  uint16_t *dst = (uint16_t*)_dst;
175  const double opacity = param->opacity;
176  int i, j;
177  dst_linesize /= 2;
178  top_linesize /= 2;
179  bottom_linesize /= 2;
180 
181  for (i = 0; i < height; i++) {
182  for (j = 0; j < width; j++) {
183  dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
184  }
185  dst += dst_linesize;
186  top += top_linesize;
187  bottom += bottom_linesize;
188  }
189 }
190 
191 static void blend_normal_32bit(const uint8_t *_top, ptrdiff_t top_linesize,
192  const uint8_t *_bottom, ptrdiff_t bottom_linesize,
193  uint8_t *_dst, ptrdiff_t dst_linesize,
194  ptrdiff_t width, ptrdiff_t height,
195  FilterParams *param, double *values, int starty)
196 {
197  const float *top = (float*)_top;
198  const float *bottom = (float*)_bottom;
199  float *dst = (float*)_dst;
200  const double opacity = param->opacity;
201  int i, j;
202  dst_linesize /= 4;
203  top_linesize /= 4;
204  bottom_linesize /= 4;
205 
206  for (i = 0; i < height; i++) {
207  for (j = 0; j < width; j++) {
208  dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
209  }
210  dst += dst_linesize;
211  top += top_linesize;
212  bottom += bottom_linesize;
213  }
214 }
215 
216 #define DEFINE_BLEND8(name, expr) \
217 static void blend_## name##_8bit(const uint8_t *top, ptrdiff_t top_linesize, \
218  const uint8_t *bottom, ptrdiff_t bottom_linesize, \
219  uint8_t *dst, ptrdiff_t dst_linesize, \
220  ptrdiff_t width, ptrdiff_t height, \
221  FilterParams *param, double *values, int starty) \
222 { \
223  double opacity = param->opacity; \
224  int i, j; \
225  \
226  for (i = 0; i < height; i++) { \
227  for (j = 0; j < width; j++) { \
228  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
229  } \
230  dst += dst_linesize; \
231  top += top_linesize; \
232  bottom += bottom_linesize; \
233  } \
234 }
235 
236 #define DEFINE_BLEND16(name, expr, depth) \
237 static void blend_## name##_##depth##bit(const uint8_t *_top, ptrdiff_t top_linesize,\
238  const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
239  uint8_t *_dst, ptrdiff_t dst_linesize, \
240  ptrdiff_t width, ptrdiff_t height, \
241  FilterParams *param, double *values, int starty) \
242 { \
243  const uint16_t *top = (const uint16_t*)_top; \
244  const uint16_t *bottom = (const uint16_t*)_bottom; \
245  uint16_t *dst = (uint16_t*)_dst; \
246  double opacity = param->opacity; \
247  int i, j; \
248  dst_linesize /= 2; \
249  top_linesize /= 2; \
250  bottom_linesize /= 2; \
251  \
252  for (i = 0; i < height; i++) { \
253  for (j = 0; j < width; j++) { \
254  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
255  } \
256  dst += dst_linesize; \
257  top += top_linesize; \
258  bottom += bottom_linesize; \
259  } \
260 }
261 
262 #define DEFINE_BLEND32(name, expr, depth) \
263 static void blend_## name##_##depth##bit(const uint8_t *_top, ptrdiff_t top_linesize,\
264  const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
265  uint8_t *_dst, ptrdiff_t dst_linesize, \
266  ptrdiff_t width, ptrdiff_t height, \
267  FilterParams *param, double *values, int starty) \
268 { \
269  const float *top = (const float*)_top; \
270  const float *bottom = (const float*)_bottom; \
271  float *dst = (float*)_dst; \
272  double opacity = param->opacity; \
273  int i, j; \
274  dst_linesize /= 4; \
275  top_linesize /= 4; \
276  bottom_linesize /= 4; \
277  \
278  for (i = 0; i < height; i++) { \
279  for (j = 0; j < width; j++) { \
280  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
281  } \
282  dst += dst_linesize; \
283  top += top_linesize; \
284  bottom += bottom_linesize; \
285  } \
286 }
287 
288 #define A top[j]
289 #define B bottom[j]
290 
291 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 255))
292 #define SCREEN(x, a, b) (255 - (x) * ((255 - (a)) * (255 - (b)) / 255))
293 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 255 - ((255 - (b)) << 8) / (a)))
294 #define DODGE(a, b) (((a) == 255) ? (a) : FFMIN(255, (((b) << 8) / (255 - (a)))))
295 
296 DEFINE_BLEND8(addition, FFMIN(255, A + B))
297 DEFINE_BLEND8(grainmerge, av_clip_uint8(A + B - 128))
298 DEFINE_BLEND8(average, (A + B) / 2)
299 DEFINE_BLEND8(subtract, FFMAX(0, A - B))
301 DEFINE_BLEND8(multiply128,av_clip_uint8((A - 128) * B / 32. + 128))
302 DEFINE_BLEND8(negation, 255 - FFABS(255 - A - B))
303 DEFINE_BLEND8(extremity, FFABS(255 - A - B))
304 DEFINE_BLEND8(difference, FFABS(A - B))
305 DEFINE_BLEND8(grainextract, av_clip_uint8(128 + A - B))
306 DEFINE_BLEND8(screen, SCREEN(1, A, B))
307 DEFINE_BLEND8(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
308 DEFINE_BLEND8(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
309 DEFINE_BLEND8(hardmix, (A < (255 - B)) ? 0: 255)
310 DEFINE_BLEND8(heat, (A == 0) ? 0 : 255 - FFMIN(((255 - B) * (255 - B)) / A, 255))
311 DEFINE_BLEND8(freeze, (B == 0) ? 0 : 255 - FFMIN(((255 - A) * (255 - A)) / B, 255))
312 DEFINE_BLEND8(darken, FFMIN(A, B))
313 DEFINE_BLEND8(lighten, FFMAX(A, B))
314 DEFINE_BLEND8(divide, av_clip_uint8(B == 0 ? 255 : 255 * A / B))
315 DEFINE_BLEND8(dodge, DODGE(A, B))
316 DEFINE_BLEND8(burn, BURN(A, B))
317 DEFINE_BLEND8(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - fabs(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - fabs(B - 127.5)/255))
318 DEFINE_BLEND8(exclusion, A + B - 2 * A * B / 255)
319 DEFINE_BLEND8(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
320 DEFINE_BLEND8(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
321 DEFINE_BLEND8(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
322 DEFINE_BLEND8(glow, (A == 255) ? A : FFMIN(255, (B * B / (255 - A))))
323 DEFINE_BLEND8(and, A & B)
324 DEFINE_BLEND8(or, A | B)
325 DEFINE_BLEND8(xor, A ^ B)
326 DEFINE_BLEND8(vividlight, (A < 128) ? BURN(2 * A, B) : DODGE(2 * (A - 128), B))
327 DEFINE_BLEND8(linearlight,av_clip_uint8((B < 128) ? B + 2 * A - 255 : B + 2 * (A - 128)))
328 
329 #undef MULTIPLY
330 #undef SCREEN
331 #undef BURN
332 #undef DODGE
333 
334 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 65535))
335 #define SCREEN(x, a, b) (65535 - (x) * ((65535 - (a)) * (65535 - (b)) / 65535))
336 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 65535 - ((65535 - (b)) << 16) / (a)))
337 #define DODGE(a, b) (((a) == 65535) ? (a) : FFMIN(65535, (((b) << 16) / (65535 - (a)))))
338 
339 DEFINE_BLEND16(addition, FFMIN(65535, A + B), 16)
340 DEFINE_BLEND16(grainmerge, av_clip_uint16(A + B - 32768), 16)
341 DEFINE_BLEND16(average, (A + B) / 2, 16)
342 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 16)
344 DEFINE_BLEND16(multiply128, av_clip_uint16((A - 32768) * B / 8192. + 32768), 16)
345 DEFINE_BLEND16(negation, 65535 - FFABS(65535 - A - B), 16)
346 DEFINE_BLEND16(extremity, FFABS(65535 - A - B), 16)
347 DEFINE_BLEND16(difference, FFABS(A - B), 16)
348 DEFINE_BLEND16(grainextract, av_clip_uint16(32768 + A - B), 16)
349 DEFINE_BLEND16(screen, SCREEN(1, A, B), 16)
350 DEFINE_BLEND16(overlay, (A < 32768) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 16)
351 DEFINE_BLEND16(hardlight, (B < 32768) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 16)
352 DEFINE_BLEND16(hardmix, (A < (65535 - B)) ? 0: 65535, 16)
353 DEFINE_BLEND16(heat, (A == 0) ? 0 : 65535 - FFMIN(((65535 - B) * (65535 - B)) / A, 65535), 16)
354 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 65535 - FFMIN(((65535 - A) * (65535 - A)) / B, 65535), 16)
355 DEFINE_BLEND16(darken, FFMIN(A, B), 16)
356 DEFINE_BLEND16(lighten, FFMAX(A, B), 16)
357 DEFINE_BLEND16(divide, av_clip_uint16(B == 0 ? 65535 : 65535 * A / B), 16)
358 DEFINE_BLEND16(dodge, DODGE(A, B), 16)
359 DEFINE_BLEND16(burn, BURN(A, B), 16)
360 DEFINE_BLEND16(softlight, (A > 32767) ? B + (65535 - B) * (A - 32767.5) / 32767.5 * (0.5 - fabs(B - 32767.5) / 65535): B - B * ((32767.5 - A) / 32767.5) * (0.5 - fabs(B - 32767.5)/65535), 16)
361 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 65535, 16)
362 DEFINE_BLEND16(pinlight, (B < 32768) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 32768)), 16)
363 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 65535, 16)
364 DEFINE_BLEND16(reflect, (B == 65535) ? B : FFMIN(65535, (A * A / (65535 - B))), 16)
365 DEFINE_BLEND16(glow, (A == 65535) ? A : FFMIN(65535, (B * B / (65535 - A))), 16)
366 DEFINE_BLEND16(and, A & B, 16)
367 DEFINE_BLEND16(or, A | B, 16)
368 DEFINE_BLEND16(xor, A ^ B, 16)
369 DEFINE_BLEND16(vividlight, (A < 32768) ? BURN(2 * A, B) : DODGE(2 * (A - 32768), B), 16)
370 DEFINE_BLEND16(linearlight,av_clip_uint16((B < 32768) ? B + 2 * A - 65535 : B + 2 * (A - 32768)), 16)
371 
372 #undef MULTIPLY
373 #undef SCREEN
374 #undef BURN
375 #undef DODGE
376 
377 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 1023))
378 #define SCREEN(x, a, b) (1023 - (x) * ((1023 - (a)) * (1023 - (b)) / 1023))
379 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 1023 - ((1023 - (b)) << 10) / (a)))
380 #define DODGE(a, b) (((a) == 1023) ? (a) : FFMIN(1023, (((b) << 10) / (1023 - (a)))))
381 
382 DEFINE_BLEND16(addition, FFMIN(1023, A + B), 10)
383 DEFINE_BLEND16(grainmerge, (int)av_clip_uintp2(A + B - 512, 10), 10)
384 DEFINE_BLEND16(average, (A + B) / 2, 10)
385 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 10)
387 DEFINE_BLEND16(multiply128, (int)av_clip_uintp2((A - 512) * B / 128. + 512, 10), 10)
388 DEFINE_BLEND16(negation, 1023 - FFABS(1023 - A - B), 10)
389 DEFINE_BLEND16(extremity, FFABS(1023 - A - B), 10)
390 DEFINE_BLEND16(difference, FFABS(A - B), 10)
391 DEFINE_BLEND16(grainextract, (int)av_clip_uintp2(512 + A - B, 10), 10)
392 DEFINE_BLEND16(screen, SCREEN(1, A, B), 10)
393 DEFINE_BLEND16(overlay, (A < 512) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 10)
394 DEFINE_BLEND16(hardlight, (B < 512) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 10)
395 DEFINE_BLEND16(hardmix, (A < (1023 - B)) ? 0: 1023, 10)
396 DEFINE_BLEND16(heat, (A == 0) ? 0 : 1023 - FFMIN(((1023 - B) * (1023 - B)) / A, 1023), 10)
397 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 1023 - FFMIN(((1023 - A) * (1023 - A)) / B, 1023), 10)
398 DEFINE_BLEND16(darken, FFMIN(A, B), 10)
399 DEFINE_BLEND16(lighten, FFMAX(A, B), 10)
400 DEFINE_BLEND16(divide, (int)av_clip_uintp2(B == 0 ? 1023 : 1023 * A / B, 10), 10)
401 DEFINE_BLEND16(dodge, DODGE(A, B), 10)
402 DEFINE_BLEND16(burn, BURN(A, B), 10)
403 DEFINE_BLEND16(softlight, (A > 511) ? B + (1023 - B) * (A - 511.5) / 511.5 * (0.5 - fabs(B - 511.5) / 1023): B - B * ((511.5 - A) / 511.5) * (0.5 - fabs(B - 511.5)/1023), 10)
404 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 1023, 10)
405 DEFINE_BLEND16(pinlight, (B < 512) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 512)), 10)
406 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 1023, 10)
407 DEFINE_BLEND16(reflect, (B == 1023) ? B : FFMIN(1023, (A * A / (1023 - B))), 10)
408 DEFINE_BLEND16(glow, (A == 1023) ? A : FFMIN(1023, (B * B / (1023 - A))), 10)
409 DEFINE_BLEND16(and, A & B, 10)
410 DEFINE_BLEND16(or, A | B, 10)
411 DEFINE_BLEND16(xor, A ^ B, 10)
412 DEFINE_BLEND16(vividlight, (A < 512) ? BURN(2 * A, B) : DODGE(2 * (A - 512), B), 10)
413 DEFINE_BLEND16(linearlight,(int)av_clip_uintp2((B < 512) ? B + 2 * A - 1023 : B + 2 * (A - 512), 10), 10)
414 
415 #undef MULTIPLY
416 #undef SCREEN
417 #undef BURN
418 #undef DODGE
419 
420 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 4095))
421 #define SCREEN(x, a, b) (4095 - (x) * ((4095 - (a)) * (4095 - (b)) / 4095))
422 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 4095 - ((4095 - (b)) << 12) / (a)))
423 #define DODGE(a, b) (((a) == 4095) ? (a) : FFMIN(4095, (((b) << 12) / (4095 - (a)))))
424 
425 DEFINE_BLEND16(addition, FFMIN(4095, A + B), 12)
426 DEFINE_BLEND16(grainmerge, (int)av_clip_uintp2(A + B - 2048, 12), 12)
427 DEFINE_BLEND16(average, (A + B) / 2, 12)
428 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 12)
430 DEFINE_BLEND16(multiply128, (int)av_clip_uintp2((A - 2048) * B / 512. + 2048, 12), 12)
431 DEFINE_BLEND16(negation, 4095 - FFABS(4095 - A - B), 12)
432 DEFINE_BLEND16(extremity, FFABS(4095 - A - B), 12)
433 DEFINE_BLEND16(difference, FFABS(A - B), 12)
434 DEFINE_BLEND16(grainextract, (int)av_clip_uintp2(2048 + A - B, 12), 12)
435 DEFINE_BLEND16(screen, SCREEN(1, A, B), 12)
436 DEFINE_BLEND16(overlay, (A < 2048) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 12)
437 DEFINE_BLEND16(hardlight, (B < 2048) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 12)
438 DEFINE_BLEND16(hardmix, (A < (4095 - B)) ? 0: 4095, 12)
439 DEFINE_BLEND16(heat, (A == 0) ? 0 : 4095 - FFMIN(((4095 - B) * (4095 - B)) / A, 4095), 12)
440 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 4095 - FFMIN(((4095 - A) * (4095 - A)) / B, 4095), 12)
441 DEFINE_BLEND16(darken, FFMIN(A, B), 12)
442 DEFINE_BLEND16(lighten, FFMAX(A, B), 12)
443 DEFINE_BLEND16(divide, (int)av_clip_uintp2(B == 0 ? 4095 : 4095 * A / B, 12), 12)
444 DEFINE_BLEND16(dodge, DODGE(A, B), 12)
445 DEFINE_BLEND16(burn, BURN(A, B), 12)
446 DEFINE_BLEND16(softlight, (A > 2047) ? B + (4095 - B) * (A - 2047.5) / 2047.5 * (0.5 - fabs(B - 2047.5) / 4095): B - B * ((2047.5 - A) / 2047.5) * (0.5 - fabs(B - 2047.5)/4095), 12)
447 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 4095, 12)
448 DEFINE_BLEND16(pinlight, (B < 2048) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 2048)), 12)
449 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 4095, 12)
450 DEFINE_BLEND16(reflect, (B == 4095) ? B : FFMIN(4095, (A * A / (4095 - B))), 12)
451 DEFINE_BLEND16(glow, (A == 4095) ? A : FFMIN(4095, (B * B / (4095 - A))), 12)
452 DEFINE_BLEND16(and, A & B, 12)
453 DEFINE_BLEND16(or, A | B, 12)
454 DEFINE_BLEND16(xor, A ^ B, 12)
455 DEFINE_BLEND16(vividlight, (A < 2048) ? BURN(2 * A, B) : DODGE(2 * (A - 2048), B), 12)
456 DEFINE_BLEND16(linearlight,(int)av_clip_uintp2((B < 2048) ? B + 2 * A - 4095 : B + 2 * (A - 2048), 12), 12)
457 
458 #undef MULTIPLY
459 #undef SCREEN
460 #undef BURN
461 #undef DODGE
462 
463 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 511))
464 #define SCREEN(x, a, b) (511 - (x) * ((511 - (a)) * (511 - (b)) / 511))
465 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 511 - ((511 - (b)) << 9) / (a)))
466 #define DODGE(a, b) (((a) == 511) ? (a) : FFMIN(511, (((b) << 9) / (511 - (a)))))
467 
468 DEFINE_BLEND16(addition, FFMIN(511, A + B), 9)
469 DEFINE_BLEND16(grainmerge, (int)av_clip_uintp2(A + B - 256, 9), 9)
470 DEFINE_BLEND16(average, (A + B) / 2, 9)
471 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 9)
473 DEFINE_BLEND16(multiply128, (int)av_clip_uintp2((A - 256) * B / 64. + 256, 9), 9)
474 DEFINE_BLEND16(negation, 511 - FFABS(511 - A - B), 9)
475 DEFINE_BLEND16(extremity, FFABS(511 - A - B), 9)
476 DEFINE_BLEND16(difference, FFABS(A - B), 9)
477 DEFINE_BLEND16(grainextract, (int)av_clip_uintp2(256 + A - B, 9), 9)
478 DEFINE_BLEND16(screen, SCREEN(1, A, B), 9)
479 DEFINE_BLEND16(overlay, (A < 256) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 9)
480 DEFINE_BLEND16(hardlight, (B < 256) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 9)
481 DEFINE_BLEND16(hardmix, (A < (511 - B)) ? 0: 511, 9)
482 DEFINE_BLEND16(heat, (A == 0) ? 0 : 511 - FFMIN(((511 - B) * (511 - B)) / A, 511), 9)
483 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 511 - FFMIN(((511 - A) * (511 - A)) / B, 511), 9)
484 DEFINE_BLEND16(darken, FFMIN(A, B), 9)
485 DEFINE_BLEND16(lighten, FFMAX(A, B), 9)
486 DEFINE_BLEND16(divide, (int)av_clip_uintp2(B == 0 ? 511 : 511 * A / B, 9), 9)
487 DEFINE_BLEND16(dodge, DODGE(A, B), 9)
488 DEFINE_BLEND16(burn, BURN(A, B), 9)
489 DEFINE_BLEND16(softlight, (A > 511) ? B + (511 - B) * (A - 511.5) / 511.5 * (0.5 - fabs(B - 511.5) / 511): B - B * ((511.5 - A) / 511.5) * (0.5 - fabs(B - 511.5)/511), 9)
490 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 511, 9)
491 DEFINE_BLEND16(pinlight, (B < 256) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 256)), 9)
492 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 511, 9)
493 DEFINE_BLEND16(reflect, (B == 511) ? B : FFMIN(511, (A * A / (511 - B))), 9)
494 DEFINE_BLEND16(glow, (A == 511) ? A : FFMIN(511, (B * B / (511 - A))), 9)
495 DEFINE_BLEND16(and, A & B, 9)
496 DEFINE_BLEND16(or, A | B, 9)
497 DEFINE_BLEND16(xor, A ^ B, 9)
498 DEFINE_BLEND16(vividlight, (A < 256) ? BURN(2 * A, B) : DODGE(2 * (A - 256), B), 9)
499 DEFINE_BLEND16(linearlight,(int)av_clip_uintp2((B < 256) ? B + 2 * A - 511 : B + 2 * (A - 256), 9), 9)
500 
501 #undef MULTIPLY
502 #undef SCREEN
503 #undef BURN
504 #undef DODGE
505 
506 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 1.0))
507 #define SCREEN(x, a, b) (1.0 - (x) * ((1.0 - (a)) * (1.0 - (b)) / 1.0))
508 #define BURN(a, b) (((a) <= 0.0) ? (a) : FFMAX(0.0, 1.0 - (1.0 - (b)) / (a)))
509 #define DODGE(a, b) (((a) >= 1.0) ? (a) : FFMIN(1.0, ((b) / (1.0 - (a)))))
510 
511 DEFINE_BLEND32(addition, A + B, 32)
512 DEFINE_BLEND32(grainmerge, A + B - 0.5, 32)
513 DEFINE_BLEND32(average, (A + B) / 2, 32)
514 DEFINE_BLEND32(subtract, A - B, 32)
515 DEFINE_BLEND32(multiply, A * B, 32)
516 DEFINE_BLEND32(multiply128, (A - 0.5) * B / 0.125 + 0.5, 32)
517 DEFINE_BLEND32(negation, 1.0 - FFABS(1.0 - A - B), 32)
518 DEFINE_BLEND32(extremity, FFABS(1.0 - A - B), 32)
519 DEFINE_BLEND32(difference, FFABS(A - B), 32)
520 DEFINE_BLEND32(grainextract, 0.5 + A - B, 32)
521 DEFINE_BLEND32(screen, SCREEN(1, A, B), 32)
522 DEFINE_BLEND32(overlay, (A < 0.5) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 32)
523 DEFINE_BLEND32(hardlight, (B < 0.5) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 32)
524 DEFINE_BLEND32(hardmix, (A < (1.0 - B)) ? 0: 1.0, 32)
525 DEFINE_BLEND32(heat, (A == 0) ? 0 : 1.0 - FFMIN(((1.0 - B) * (1.0 - B)) / A, 1.0), 32)
526 DEFINE_BLEND32(freeze, (B == 0) ? 0 : 1.0 - FFMIN(((1.0 - A) * (1.0 - A)) / B, 1.0), 32)
527 DEFINE_BLEND32(darken, FFMIN(A, B), 32)
528 DEFINE_BLEND32(lighten, FFMAX(A, B), 32)
529 DEFINE_BLEND32(divide, B == 0 ? 1.0 : 1.0 * A / B, 32)
530 DEFINE_BLEND32(dodge, DODGE(A, B), 32)
531 DEFINE_BLEND32(burn, BURN(A, B), 32)
532 DEFINE_BLEND32(softlight, (A > 0.5) ? B + (1.0 - B) * (A - 0.5) / 0.5 * (0.5 - fabs(B - 0.5) / 1.0): B - B * ((0.5 - A) / 0.5) * (0.5 - fabs(B - 0.5)/1.0), 32)
533 DEFINE_BLEND32(exclusion, A + B - 2 * A * B / 1.0, 32)
534 DEFINE_BLEND32(pinlight, (B < 0.5) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 0.5)), 32)
535 DEFINE_BLEND32(phoenix, FFMIN(A, B) - FFMAX(A, B) + 1.0, 32)
536 DEFINE_BLEND32(reflect, (B == 1.0) ? B : FFMIN(1.0, (A * A / (1.0 - B))), 32)
537 DEFINE_BLEND32(glow, (A == 1.0) ? A : FFMIN(1.0, (B * B / (1.0 - A))), 32)
541 DEFINE_BLEND32(vividlight, (A < 0.5) ? BURN(2 * A, B) : DODGE(2 * (A - 0.5), B), 32)
542 DEFINE_BLEND32(linearlight,(B < 0.5) ? B + 2 * A - 1.0 : B + 2 * (A - 0.5), 32)
543 
544 #define DEFINE_BLEND_EXPR(type, name, div) \
545 static void blend_expr_## name(const uint8_t *_top, ptrdiff_t top_linesize, \
546  const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
547  uint8_t *_dst, ptrdiff_t dst_linesize, \
548  ptrdiff_t width, ptrdiff_t height, \
549  FilterParams *param, double *values, int starty) \
550 { \
551  const type *top = (type*)_top; \
552  const type *bottom = (type*)_bottom; \
553  type *dst = (type*)_dst; \
554  AVExpr *e = param->e; \
555  int y, x; \
556  dst_linesize /= div; \
557  top_linesize /= div; \
558  bottom_linesize /= div; \
559  \
560  for (y = 0; y < height; y++) { \
561  values[VAR_Y] = y + starty; \
562  for (x = 0; x < width; x++) { \
563  values[VAR_X] = x; \
564  values[VAR_TOP] = values[VAR_A] = top[x]; \
565  values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; \
566  dst[x] = av_expr_eval(e, values, NULL); \
567  } \
568  dst += dst_linesize; \
569  top += top_linesize; \
570  bottom += bottom_linesize; \
571  } \
572 }
573 
575 DEFINE_BLEND_EXPR(uint16_t, 16bit, 2)
576 DEFINE_BLEND_EXPR(float, 32bit, 4)
577 
578 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
579 {
580  ThreadData *td = arg;
581  int slice_start = (td->h * jobnr ) / nb_jobs;
582  int slice_end = (td->h * (jobnr+1)) / nb_jobs;
583  int height = slice_end - slice_start;
584  const uint8_t *top = td->top->data[td->plane];
585  const uint8_t *bottom = td->bottom->data[td->plane];
586  uint8_t *dst = td->dst->data[td->plane];
587  double values[VAR_VARS_NB];
588 
589  values[VAR_N] = td->inlink->frame_count_out;
590  values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
591  values[VAR_W] = td->w;
592  values[VAR_H] = td->h;
593  values[VAR_SW] = td->w / (double)td->dst->width;
594  values[VAR_SH] = td->h / (double)td->dst->height;
595 
596  td->param->blend(top + slice_start * td->top->linesize[td->plane],
597  td->top->linesize[td->plane],
598  bottom + slice_start * td->bottom->linesize[td->plane],
599  td->bottom->linesize[td->plane],
600  dst + slice_start * td->dst->linesize[td->plane],
601  td->dst->linesize[td->plane],
602  td->w, height, td->param, &values[0], slice_start);
603  return 0;
604 }
605 
607  const AVFrame *bottom_buf)
608 {
609  BlendContext *s = ctx->priv;
610  AVFilterLink *inlink = ctx->inputs[0];
611  AVFilterLink *outlink = ctx->outputs[0];
612  AVFrame *dst_buf;
613  int plane;
614 
615  dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
616  if (!dst_buf)
617  return top_buf;
618  av_frame_copy_props(dst_buf, top_buf);
619 
620  for (plane = 0; plane < s->nb_planes; plane++) {
621  int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
622  int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
623  int outw = AV_CEIL_RSHIFT(dst_buf->width, hsub);
624  int outh = AV_CEIL_RSHIFT(dst_buf->height, vsub);
625  FilterParams *param = &s->params[plane];
626  ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
627  .w = outw, .h = outh, .param = param, .plane = plane,
628  .inlink = inlink };
629 
631  }
632 
633  if (!s->tblend)
634  av_frame_free(&top_buf);
635 
636  return dst_buf;
637 }
638 
640 {
641  AVFilterContext *ctx = fs->parent;
642  AVFrame *top_buf, *bottom_buf, *dst_buf;
643  int ret;
644 
645  ret = ff_framesync_dualinput_get(fs, &top_buf, &bottom_buf);
646  if (ret < 0)
647  return ret;
648  if (!bottom_buf)
649  return ff_filter_frame(ctx->outputs[0], top_buf);
650  dst_buf = blend_frame(ctx, top_buf, bottom_buf);
651  return ff_filter_frame(ctx->outputs[0], dst_buf);
652 }
653 
655 {
656  BlendContext *s = ctx->priv;
657 
658  s->tblend = !strcmp(ctx->filter->name, "tblend");
659 
660  s->fs.on_event = blend_frame_for_dualinput;
661  return 0;
662 }
663 
665 {
666  static const enum AVPixelFormat pix_fmts[] = {
684  };
685 
687  if (!fmts_list)
688  return AVERROR(ENOMEM);
689  return ff_set_common_formats(ctx, fmts_list);
690 }
691 
693 {
694  BlendContext *s = ctx->priv;
695  int i;
696 
697  ff_framesync_uninit(&s->fs);
698  av_frame_free(&s->prev_frame);
699 
700  for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++)
701  av_expr_free(s->params[i].e);
702 }
703 
704 #define DEFINE_INIT_BLEND_FUNC(depth, nbits) \
705 static av_cold void init_blend_func_##depth##_##nbits##bit(FilterParams *param) \
706 { \
707  switch (param->mode) { \
708  case BLEND_ADDITION: param->blend = blend_addition_##depth##bit; break; \
709  case BLEND_GRAINMERGE: param->blend = blend_grainmerge_##depth##bit; break; \
710  case BLEND_AND: param->blend = blend_and_##depth##bit; break; \
711  case BLEND_AVERAGE: param->blend = blend_average_##depth##bit; break; \
712  case BLEND_BURN: param->blend = blend_burn_##depth##bit; break; \
713  case BLEND_DARKEN: param->blend = blend_darken_##depth##bit; break; \
714  case BLEND_DIFFERENCE: param->blend = blend_difference_##depth##bit; break; \
715  case BLEND_GRAINEXTRACT: param->blend = blend_grainextract_##depth##bit; break; \
716  case BLEND_DIVIDE: param->blend = blend_divide_##depth##bit; break; \
717  case BLEND_DODGE: param->blend = blend_dodge_##depth##bit; break; \
718  case BLEND_EXCLUSION: param->blend = blend_exclusion_##depth##bit; break; \
719  case BLEND_EXTREMITY: param->blend = blend_extremity_##depth##bit; break; \
720  case BLEND_FREEZE: param->blend = blend_freeze_##depth##bit; break; \
721  case BLEND_GLOW: param->blend = blend_glow_##depth##bit; break; \
722  case BLEND_HARDLIGHT: param->blend = blend_hardlight_##depth##bit; break; \
723  case BLEND_HARDMIX: param->blend = blend_hardmix_##depth##bit; break; \
724  case BLEND_HEAT: param->blend = blend_heat_##depth##bit; break; \
725  case BLEND_LIGHTEN: param->blend = blend_lighten_##depth##bit; break; \
726  case BLEND_LINEARLIGHT: param->blend = blend_linearlight_##depth##bit; break; \
727  case BLEND_MULTIPLY: param->blend = blend_multiply_##depth##bit; break; \
728  case BLEND_MULTIPLY128: param->blend = blend_multiply128_##depth##bit; break; \
729  case BLEND_NEGATION: param->blend = blend_negation_##depth##bit; break; \
730  case BLEND_NORMAL: param->blend = blend_normal_##nbits##bit; break; \
731  case BLEND_OR: param->blend = blend_or_##depth##bit; break; \
732  case BLEND_OVERLAY: param->blend = blend_overlay_##depth##bit; break; \
733  case BLEND_PHOENIX: param->blend = blend_phoenix_##depth##bit; break; \
734  case BLEND_PINLIGHT: param->blend = blend_pinlight_##depth##bit; break; \
735  case BLEND_REFLECT: param->blend = blend_reflect_##depth##bit; break; \
736  case BLEND_SCREEN: param->blend = blend_screen_##depth##bit; break; \
737  case BLEND_SOFTLIGHT: param->blend = blend_softlight_##depth##bit; break; \
738  case BLEND_SUBTRACT: param->blend = blend_subtract_##depth##bit; break; \
739  case BLEND_VIVIDLIGHT: param->blend = blend_vividlight_##depth##bit; break; \
740  case BLEND_XOR: param->blend = blend_xor_##depth##bit; break; \
741  } \
742 }
749 
750 void ff_blend_init(FilterParams *param, int depth)
751 {
752  switch (depth) {
753  case 8:
754  init_blend_func_8_8bit(param);
755  break;
756  case 9:
757  init_blend_func_9_16bit(param);
758  break;
759  case 10:
760  init_blend_func_10_16bit(param);
761  break;
762  case 12:
763  init_blend_func_12_16bit(param);
764  break;
765  case 16:
766  init_blend_func_16_16bit(param);
767  break;
768  case 32:
769  init_blend_func_32_32bit(param);
770  break;
771  }
772 
773  if (param->opacity == 0 && param->mode != BLEND_NORMAL) {
774  param->blend = depth > 8 ? depth > 16 ? blend_copytop_32 : blend_copytop_16 : blend_copytop_8;
775  } else if (param->mode == BLEND_NORMAL) {
776  if (param->opacity == 1)
777  param->blend = depth > 8 ? depth > 16 ? blend_copytop_32 : blend_copytop_16 : blend_copytop_8;
778  else if (param->opacity == 0)
779  param->blend = depth > 8 ? depth > 16 ? blend_copybottom_32 : blend_copybottom_16 : blend_copybottom_8;
780  }
781 
782  if (ARCH_X86)
783  ff_blend_init_x86(param, depth);
784 }
785 
786 static int config_output(AVFilterLink *outlink)
787 {
788  AVFilterContext *ctx = outlink->src;
789  AVFilterLink *toplink = ctx->inputs[TOP];
790  BlendContext *s = ctx->priv;
791  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
792  int ret, plane;
793 
794  if (!s->tblend) {
795  AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
796 
797  if (toplink->format != bottomlink->format) {
798  av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
799  return AVERROR(EINVAL);
800  }
801  if (toplink->w != bottomlink->w || toplink->h != bottomlink->h) {
802  av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
803  "(size %dx%d) do not match the corresponding "
804  "second input link %s parameters (size %dx%d)\n",
805  ctx->input_pads[TOP].name, toplink->w, toplink->h,
806  ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h);
807  return AVERROR(EINVAL);
808  }
809  }
810 
811  outlink->w = toplink->w;
812  outlink->h = toplink->h;
813  outlink->time_base = toplink->time_base;
814  outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
815  outlink->frame_rate = toplink->frame_rate;
816 
817  s->hsub = pix_desc->log2_chroma_w;
818  s->vsub = pix_desc->log2_chroma_h;
819 
820  s->depth = pix_desc->comp[0].depth;
821  s->nb_planes = av_pix_fmt_count_planes(toplink->format);
822 
823  if (!s->tblend)
824  if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0)
825  return ret;
826 
827  for (plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) {
828  FilterParams *param = &s->params[plane];
829 
830  if (s->all_mode >= 0)
831  param->mode = s->all_mode;
832  if (s->all_opacity < 1)
833  param->opacity = s->all_opacity;
834 
835  ff_blend_init(param, s->depth);
836 
837  if (s->all_expr && !param->expr_str) {
838  param->expr_str = av_strdup(s->all_expr);
839  if (!param->expr_str)
840  return AVERROR(ENOMEM);
841  }
842  if (param->expr_str) {
843  ret = av_expr_parse(&param->e, param->expr_str, var_names,
844  NULL, NULL, NULL, NULL, 0, ctx);
845  if (ret < 0)
846  return ret;
847  param->blend = s->depth > 8 ? s->depth > 16 ? blend_expr_32bit : blend_expr_16bit : blend_expr_8bit;
848  }
849  }
850 
851  if (s->tblend)
852  return 0;
853 
854  ret = ff_framesync_configure(&s->fs);
855  outlink->time_base = s->fs.time_base;
856 
857  return ret;
858 }
859 
860 #if CONFIG_BLEND_FILTER
861 
862 static int activate(AVFilterContext *ctx)
863 {
864  BlendContext *s = ctx->priv;
865  return ff_framesync_activate(&s->fs);
866 }
867 
868 static const AVFilterPad blend_inputs[] = {
869  {
870  .name = "top",
871  .type = AVMEDIA_TYPE_VIDEO,
872  },{
873  .name = "bottom",
874  .type = AVMEDIA_TYPE_VIDEO,
875  },
876  { NULL }
877 };
878 
879 static const AVFilterPad blend_outputs[] = {
880  {
881  .name = "default",
882  .type = AVMEDIA_TYPE_VIDEO,
883  .config_props = config_output,
884  },
885  { NULL }
886 };
887 
889  .name = "blend",
890  .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
891  .preinit = blend_framesync_preinit,
892  .init = init,
893  .uninit = uninit,
894  .priv_size = sizeof(BlendContext),
896  .activate = activate,
897  .inputs = blend_inputs,
898  .outputs = blend_outputs,
899  .priv_class = &blend_class,
901 };
902 
903 #endif
904 
905 #if CONFIG_TBLEND_FILTER
906 
907 static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame)
908 {
909  AVFilterContext *ctx = inlink->dst;
910  BlendContext *s = ctx->priv;
911  AVFilterLink *outlink = ctx->outputs[0];
912 
913  if (s->prev_frame) {
914  AVFrame *out;
915 
916  if (ctx->is_disabled)
918  else
919  out = blend_frame(ctx, frame, s->prev_frame);
920  av_frame_free(&s->prev_frame);
921  s->prev_frame = frame;
922  return ff_filter_frame(outlink, out);
923  }
924  s->prev_frame = frame;
925  return 0;
926 }
927 
928 static const AVOption tblend_options[] = {
930  { NULL }
931 };
932 
933 AVFILTER_DEFINE_CLASS(tblend);
934 
935 static const AVFilterPad tblend_inputs[] = {
936  {
937  .name = "default",
938  .type = AVMEDIA_TYPE_VIDEO,
939  .filter_frame = tblend_filter_frame,
940  },
941  { NULL }
942 };
943 
944 static const AVFilterPad tblend_outputs[] = {
945  {
946  .name = "default",
947  .type = AVMEDIA_TYPE_VIDEO,
948  .config_props = config_output,
949  },
950  { NULL }
951 };
952 
954  .name = "tblend",
955  .description = NULL_IF_CONFIG_SMALL("Blend successive frames."),
956  .priv_size = sizeof(BlendContext),
957  .priv_class = &tblend_class,
959  .init = init,
960  .uninit = uninit,
961  .inputs = tblend_inputs,
962  .outputs = tblend_outputs,
964 };
965 
966 #endif
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:440
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:419
BlendContext::vsub
int vsub
chroma subsampling values
Definition: vf_blend.c:39
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:117
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
BURN
#define BURN(a, b)
Definition: vf_blend.c:293
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: vf_blend.c:63
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:300
DEFINE_BLEND32
#define DEFINE_BLEND32(name, expr, depth)
Definition: vf_blend.c:262
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:283
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
FilterParams::expr_str
char * expr_str
Definition: blend.h:69
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:432
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AVFrame::width
int width
Definition: frame.h:358
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:439
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:434
AVOption
AVOption.
Definition: opt.h:246
ThreadData::bottom
const AVFrame * bottom
Definition: vf_blend.c:55
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:397
FilterParams::e
AVExpr * e
Definition: blend.h:68
VAR_B
@ VAR_B
Definition: vf_blend.c:52
blend_normal_8bit
static void blend_normal_8bit(const uint8_t *top, ptrdiff_t top_linesize, const uint8_t *bottom, ptrdiff_t bottom_linesize, uint8_t *dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, FilterParams *param, double *values, int starty)
Definition: vf_blend.c:147
blend_normal_32bit
static void blend_normal_32bit(const uint8_t *_top, ptrdiff_t top_linesize, const uint8_t *_bottom, ptrdiff_t bottom_linesize, uint8_t *_dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, FilterParams *param, double *values, int starty)
Definition: vf_blend.c:191
A
#define A
Definition: vf_blend.c:288
ThreadData::w
int w
Definition: vf_blend.c:59
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
var_names
static const char *const var_names[]
Definition: vf_blend.c:51
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
FFFrameSync
Frame sync structure.
Definition: framesync.h:146
av_float2int
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
Definition: intfloat.h:50
intfloat.h
video.h
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1788
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:435
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:75
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2589
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:431
B
#define B
Definition: vf_blend.c:289
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
av_int2float
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
Definition: intfloat.h:40
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:413
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:441
blend_normal_16bit
static void blend_normal_16bit(const uint8_t *_top, ptrdiff_t top_linesize, const uint8_t *_bottom, ptrdiff_t bottom_linesize, uint8_t *_dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, FilterParams *param, double *values, int starty)
Definition: vf_blend.c:166
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:395
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:381
DEFINE_BLEND8
#define DEFINE_BLEND8(name, expr)
Definition: vf_blend.c:216
BlendContext::params
FilterParams params[4]
Definition: vf_blend.c:46
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
DEFINE_BLEND_EXPR
#define DEFINE_BLEND_EXPR(type, name, div)
blend_options
static const AVOption blend_options[]
Definition: vf_blend.c:118
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:400
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:409
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:605
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:417
ThreadData::plane
int plane
Definition: vf_blend.c:58
width
#define width
VAR_BOTTOM
@ VAR_BOTTOM
Definition: vf_blend.c:52
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:418
MULTIPLY
#define MULTIPLY(x, a, b)
Definition: vf_blend.c:291
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:410
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2040
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
ff_vf_tblend
AVFilter ff_vf_tblend
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:438
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:394
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:408
ctx
AVFormatContext * ctx
Definition: movenc.c:48
multiply
static SoftFloat_IEEE754 multiply(SoftFloat_IEEE754 a, SoftFloat_IEEE754 b)
multiply two softfloats and handle the rounding off
Definition: alsdec.c:1387
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
VAR_X
@ VAR_X
Definition: vf_blend.c:52
AV_PIX_FMT_GRAYF32
#define AV_PIX_FMT_GRAYF32
Definition: pixfmt.h:429
NAN
#define NAN
Definition: mathematics.h:64
FRAMESYNC_DEFINE_CLASS
FRAMESYNC_DEFINE_CLASS(blend, BlendContext, fs)
ThreadData::h
int h
Definition: vf_blend.c:59
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:378
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:56
VAR_T
@ VAR_T
Definition: vf_blend.c:52
FilterParams::mode
enum BlendMode mode
Definition: blend.h:66
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:416
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
VAR_A
@ VAR_A
Definition: vf_blend.c:52
BlendMode
BlendMode
Definition: blend.h:27
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
BlendContext::depth
int depth
Definition: vf_blend.c:45
activate
filter_frame For filters that do not use the activate() callback
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:399
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_blend.c:654
ff_blend_init_x86
void ff_blend_init_x86(FilterParams *param, int depth)
Definition: vf_blend_init.c:103
DEFINE_INIT_BLEND_FUNC
#define DEFINE_INIT_BLEND_FUNC(depth, nbits)
Definition: vf_blend.c:704
VAR_Y
@ VAR_Y
Definition: vf_blend.c:52
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:398
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:412
VAR_SW
@ VAR_SW
Definition: vf_blend.c:52
FilterParams
filter data
Definition: mlp.h:74
eval.h
VAR_SH
@ VAR_SH
Definition: vf_blend.c:52
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:351
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_blend.c:52
DEFINE_BLEND16
#define DEFINE_BLEND16(name, expr, depth)
Definition: vf_blend.c:236
BlendContext::prev_frame
AVFrame * prev_frame
Definition: vf_blend.c:48
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
COPY
#define COPY(src, depth)
Definition: vf_blend.c:125
AV_PIX_FMT_GBRPF32
#define AV_PIX_FMT_GBRPF32
Definition: pixfmt.h:426
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:402
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_blend.c:664
BlendContext::all_expr
char * all_expr
Definition: vf_blend.c:41
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
BlendContext::tblend
int tblend
Definition: vf_blend.c:47
BlendContext
Definition: vf_blend.c:36
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:404
BlendContext::hsub
int hsub
Definition: vf_blend.c:39
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
VAR_H
@ VAR_H
Definition: vf_blend.c:52
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:436
ThreadData::param
FilterParams * param
Definition: vf_blend.c:60
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:314
BlendContext::all_mode
enum BlendMode all_mode
Definition: vf_blend.c:42
VAR_N
@ VAR_N
Definition: vf_blend.c:52
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:414
BlendContext::fs
FFFrameSync fs
Definition: vf_blend.c:38
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:784
blend.h
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
uint8_t
uint8_t
Definition: audio_convert.c:194
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
TOP
#define TOP
Definition: vf_blend.c:33
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:396
BlendContext::nb_planes
int nb_planes
Definition: vf_blend.c:40
ff_blend_init
void ff_blend_init(FilterParams *param, int depth)
Definition: vf_blend.c:750
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_blend.c:786
AVFilter
Filter definition.
Definition: avfilter.h:144
ret
ret
Definition: filter_design.txt:187
pixfmt.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
SCREEN
#define SCREEN(x, a, b)
Definition: vf_blend.c:292
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:433
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:401
DODGE
#define DODGE(a, b)
Definition: vf_blend.c:294
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
ThreadData::inlink
AVFilterLink * inlink
Definition: vf_blend.c:57
AVFrame::height
int height
Definition: frame.h:358
framesync.h
blend_frame
static AVFrame * blend_frame(AVFilterContext *ctx, AVFrame *top_buf, const AVFrame *bottom_buf)
Definition: vf_blend.c:606
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VAR_TOP
@ VAR_TOP
Definition: vf_blend.c:52
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:437
avfilter.h
AV_PIX_FMT_GBRAPF32
#define AV_PIX_FMT_GBRAPF32
Definition: pixfmt.h:427
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
BlendContext::all_opacity
double all_opacity
Definition: vf_blend.c:43
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_blend.c:692
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
VAR_W
@ VAR_W
Definition: vf_blend.c:52
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
blend_frame_for_dualinput
static int blend_frame_for_dualinput(FFFrameSync *fs)
Definition: vf_blend.c:639
FilterParams::opacity
double opacity
Definition: blend.h:67
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
ThreadData::top
const AVFrame * top
Definition: vf_blend.c:55
FilterParams::blend
void(* blend)(const uint8_t *top, ptrdiff_t top_linesize, const uint8_t *bottom, ptrdiff_t bottom_linesize, uint8_t *dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, struct FilterParams *param, double *values, int starty)
Definition: blend.h:70
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:403
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:334
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:369
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:379
filter_slice
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_atadenoise.c:237
ff_vf_blend
AVFilter ff_vf_blend
BOTTOM
#define BOTTOM
Definition: vf_blend.c:34
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
BLEND_NORMAL
@ BLEND_NORMAL
Definition: blend.h:29