FFmpeg
vf_bwdif.c
Go to the documentation of this file.
1 /*
2  * BobWeaver Deinterlacing Filter
3  * Copyright (C) 2016 Thomas Mundt <loudmax@yahoo.de>
4  *
5  * Based on YADIF (Yet Another Deinterlacing Filter)
6  * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
7  * 2010 James Darnley <james.darnley@gmail.com>
8  *
9  * With use of Weston 3 Field Deinterlacing Filter algorithm
10  * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved
11  * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D
12  * Based on the process described by Martin Weston for BBC R&D
13  *
14  * This file is part of FFmpeg.
15  *
16  * FFmpeg is free software; you can redistribute it and/or
17  * modify it under the terms of the GNU Lesser General Public
18  * License as published by the Free Software Foundation; either
19  * version 2.1 of the License, or (at your option) any later version.
20  *
21  * FFmpeg is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24  * Lesser General Public License for more details.
25  *
26  * You should have received a copy of the GNU Lesser General Public
27  * License along with FFmpeg; if not, write to the Free Software
28  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29  */
30 
31 #include "libavutil/common.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/imgutils.h"
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 #include "bwdif.h"
40 
41 /*
42  * Filter coefficients coef_lf and coef_hf taken from BBC PH-2071 (Weston 3 Field Deinterlacer).
43  * Used when there is spatial and temporal interpolation.
44  * Filter coefficients coef_sp are used when there is spatial interpolation only.
45  * Adjusted for matching visual sharpness impression of spatial and temporal interpolation.
46  */
47 static const uint16_t coef_lf[2] = { 4309, 213 };
48 static const uint16_t coef_hf[3] = { 5570, 3801, 1016 };
49 static const uint16_t coef_sp[2] = { 5077, 981 };
50 
51 typedef struct ThreadData {
52  AVFrame *frame;
53  int plane;
54  int w, h;
55  int parity;
56  int tff;
57 } ThreadData;
58 
59 #define FILTER_INTRA() \
60  for (x = 0; x < w; x++) { \
61  interpol = (coef_sp[0] * (cur[mrefs] + cur[prefs]) - coef_sp[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
62  dst[0] = av_clip(interpol, 0, clip_max); \
63  \
64  dst++; \
65  cur++; \
66  }
67 
68 #define FILTER1() \
69  for (x = 0; x < w; x++) { \
70  int c = cur[mrefs]; \
71  int d = (prev2[0] + next2[0]) >> 1; \
72  int e = cur[prefs]; \
73  int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
74  int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e)) >> 1; \
75  int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e)) >> 1; \
76  int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
77  \
78  if (!diff) { \
79  dst[0] = d; \
80  } else {
81 
82 #define SPAT_CHECK() \
83  int b = ((prev2[mrefs2] + next2[mrefs2]) >> 1) - c; \
84  int f = ((prev2[prefs2] + next2[prefs2]) >> 1) - e; \
85  int dc = d - c; \
86  int de = d - e; \
87  int max = FFMAX3(de, dc, FFMIN(b, f)); \
88  int min = FFMIN3(de, dc, FFMAX(b, f)); \
89  diff = FFMAX3(diff, min, -max);
90 
91 #define FILTER_LINE() \
92  SPAT_CHECK() \
93  if (FFABS(c - e) > temporal_diff0) { \
94  interpol = (((coef_hf[0] * (prev2[0] + next2[0]) \
95  - coef_hf[1] * (prev2[mrefs2] + next2[mrefs2] + prev2[prefs2] + next2[prefs2]) \
96  + coef_hf[2] * (prev2[mrefs4] + next2[mrefs4] + prev2[prefs4] + next2[prefs4])) >> 2) \
97  + coef_lf[0] * (c + e) - coef_lf[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
98  } else { \
99  interpol = (coef_sp[0] * (c + e) - coef_sp[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
100  }
101 
102 #define FILTER_EDGE() \
103  if (spat) { \
104  SPAT_CHECK() \
105  } \
106  interpol = (c + e) >> 1;
107 
108 #define FILTER2() \
109  if (interpol > d + diff) \
110  interpol = d + diff; \
111  else if (interpol < d - diff) \
112  interpol = d - diff; \
113  \
114  dst[0] = av_clip(interpol, 0, clip_max); \
115  } \
116  \
117  dst++; \
118  cur++; \
119  prev++; \
120  next++; \
121  prev2++; \
122  next2++; \
123  }
124 
125 static void filter_intra(void *dst1, void *cur1, int w, int prefs, int mrefs,
126  int prefs3, int mrefs3, int parity, int clip_max)
127 {
128  uint8_t *dst = dst1;
129  uint8_t *cur = cur1;
130  int interpol, x;
131 
132  FILTER_INTRA()
133 }
134 
135 static void filter_line_c(void *dst1, void *prev1, void *cur1, void *next1,
136  int w, int prefs, int mrefs, int prefs2, int mrefs2,
137  int prefs3, int mrefs3, int prefs4, int mrefs4,
138  int parity, int clip_max)
139 {
140  uint8_t *dst = dst1;
141  uint8_t *prev = prev1;
142  uint8_t *cur = cur1;
143  uint8_t *next = next1;
144  uint8_t *prev2 = parity ? prev : cur ;
145  uint8_t *next2 = parity ? cur : next;
146  int interpol, x;
147 
148  FILTER1()
149  FILTER_LINE()
150  FILTER2()
151 }
152 
153 static void filter_edge(void *dst1, void *prev1, void *cur1, void *next1,
154  int w, int prefs, int mrefs, int prefs2, int mrefs2,
155  int parity, int clip_max, int spat)
156 {
157  uint8_t *dst = dst1;
158  uint8_t *prev = prev1;
159  uint8_t *cur = cur1;
160  uint8_t *next = next1;
161  uint8_t *prev2 = parity ? prev : cur ;
162  uint8_t *next2 = parity ? cur : next;
163  int interpol, x;
164 
165  FILTER1()
166  FILTER_EDGE()
167  FILTER2()
168 }
169 
170 static void filter_intra_16bit(void *dst1, void *cur1, int w, int prefs, int mrefs,
171  int prefs3, int mrefs3, int parity, int clip_max)
172 {
173  uint16_t *dst = dst1;
174  uint16_t *cur = cur1;
175  int interpol, x;
176 
177  FILTER_INTRA()
178 }
179 
180 static void filter_line_c_16bit(void *dst1, void *prev1, void *cur1, void *next1,
181  int w, int prefs, int mrefs, int prefs2, int mrefs2,
182  int prefs3, int mrefs3, int prefs4, int mrefs4,
183  int parity, int clip_max)
184 {
185  uint16_t *dst = dst1;
186  uint16_t *prev = prev1;
187  uint16_t *cur = cur1;
188  uint16_t *next = next1;
189  uint16_t *prev2 = parity ? prev : cur ;
190  uint16_t *next2 = parity ? cur : next;
191  int interpol, x;
192 
193  FILTER1()
194  FILTER_LINE()
195  FILTER2()
196 }
197 
198 static void filter_edge_16bit(void *dst1, void *prev1, void *cur1, void *next1,
199  int w, int prefs, int mrefs, int prefs2, int mrefs2,
200  int parity, int clip_max, int spat)
201 {
202  uint16_t *dst = dst1;
203  uint16_t *prev = prev1;
204  uint16_t *cur = cur1;
205  uint16_t *next = next1;
206  uint16_t *prev2 = parity ? prev : cur ;
207  uint16_t *next2 = parity ? cur : next;
208  int interpol, x;
209 
210  FILTER1()
211  FILTER_EDGE()
212  FILTER2()
213 }
214 
215 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
216 {
217  BWDIFContext *s = ctx->priv;
218  YADIFContext *yadif = &s->yadif;
219  ThreadData *td = arg;
220  int linesize = yadif->cur->linesize[td->plane];
221  int clip_max = (1 << (yadif->csp->comp[td->plane].depth)) - 1;
222  int df = (yadif->csp->comp[td->plane].depth + 7) / 8;
223  int refs = linesize / df;
224  int slice_start = (td->h * jobnr ) / nb_jobs;
225  int slice_end = (td->h * (jobnr+1)) / nb_jobs;
226  int y;
227 
228  for (y = slice_start; y < slice_end; y++) {
229  if ((y ^ td->parity) & 1) {
230  uint8_t *prev = &yadif->prev->data[td->plane][y * linesize];
231  uint8_t *cur = &yadif->cur ->data[td->plane][y * linesize];
232  uint8_t *next = &yadif->next->data[td->plane][y * linesize];
233  uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
234  if (yadif->current_field == YADIF_FIELD_END) {
235  s->filter_intra(dst, cur, td->w, (y + df) < td->h ? refs : -refs,
236  y > (df - 1) ? -refs : refs,
237  (y + 3*df) < td->h ? 3 * refs : -refs,
238  y > (3*df - 1) ? -3 * refs : refs,
239  td->parity ^ td->tff, clip_max);
240  } else if ((y < 4) || ((y + 5) > td->h)) {
241  s->filter_edge(dst, prev, cur, next, td->w,
242  (y + df) < td->h ? refs : -refs,
243  y > (df - 1) ? -refs : refs,
244  refs << 1, -(refs << 1),
245  td->parity ^ td->tff, clip_max,
246  (y < 2) || ((y + 3) > td->h) ? 0 : 1);
247  } else {
248  s->filter_line(dst, prev, cur, next, td->w,
249  refs, -refs, refs << 1, -(refs << 1),
250  3 * refs, -3 * refs, refs << 2, -(refs << 2),
251  td->parity ^ td->tff, clip_max);
252  }
253  } else {
254  memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
255  &yadif->cur->data[td->plane][y * linesize], td->w * df);
256  }
257  }
258  return 0;
259 }
260 
261 static void filter(AVFilterContext *ctx, AVFrame *dstpic,
262  int parity, int tff)
263 {
264  BWDIFContext *bwdif = ctx->priv;
265  YADIFContext *yadif = &bwdif->yadif;
266  ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
267  int i;
268 
269  for (i = 0; i < yadif->csp->nb_components; i++) {
270  int w = dstpic->width;
271  int h = dstpic->height;
272 
273  if (i == 1 || i == 2) {
274  w = AV_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
275  h = AV_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
276  }
277 
278  td.w = w;
279  td.h = h;
280  td.plane = i;
281 
284  }
285  if (yadif->current_field == YADIF_FIELD_END) {
287  }
288 
289  emms_c();
290 }
291 
293 {
294  BWDIFContext *bwdif = ctx->priv;
295  YADIFContext *yadif = &bwdif->yadif;
296 
297  av_frame_free(&yadif->prev);
298  av_frame_free(&yadif->cur );
299  av_frame_free(&yadif->next);
300  ff_ccfifo_uninit(&yadif->cc_fifo);
301 }
302 
303 static const enum AVPixelFormat pix_fmts[] = {
322 };
323 
325 {
326  AVFilterContext *ctx = link->src;
327  BWDIFContext *s = link->src->priv;
328  YADIFContext *yadif = &s->yadif;
329  int ret;
330 
331  link->time_base = av_mul_q(ctx->inputs[0]->time_base, (AVRational){1, 2});
332  link->w = link->src->inputs[0]->w;
333  link->h = link->src->inputs[0]->h;
334 
335  if(yadif->mode&1)
336  link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1});
337  else
338  link->frame_rate = ctx->inputs[0]->frame_rate;
339 
340  ret = ff_ccfifo_init(&yadif->cc_fifo, link->frame_rate, ctx);
341  if (ret < 0 ) {
342  av_log(ctx, AV_LOG_ERROR, "Failure to setup CC FIFO queue\n");
343  return ret;
344  }
345 
346  if (link->w < 3 || link->h < 4) {
347  av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or 4 lines is not supported\n");
348  return AVERROR(EINVAL);
349  }
350 
351  yadif->csp = av_pix_fmt_desc_get(link->format);
352  yadif->filter = filter;
354 
355  return 0;
356 }
357 
359 {
360  if (bit_depth > 8) {
361  s->filter_intra = filter_intra_16bit;
362  s->filter_line = filter_line_c_16bit;
363  s->filter_edge = filter_edge_16bit;
364  } else {
365  s->filter_intra = filter_intra;
366  s->filter_line = filter_line_c;
367  s->filter_edge = filter_edge;
368  }
369 
370 #if ARCH_X86
372 #endif
373 }
374 
375 
376 #define OFFSET(x) offsetof(YADIFContext, x)
377 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
378 
379 #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
380 
381 static const AVOption bwdif_options[] = {
382  { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FIELD}, 0, 1, FLAGS, "mode"},
383  CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"),
384  CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"),
385 
386  { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
387  CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"),
388  CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"),
389  CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"),
390 
391  { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
392  CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"),
393  CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"),
394 
395  { NULL }
396 };
397 
398 AVFILTER_DEFINE_CLASS(bwdif);
399 
401  {
402  .name = "default",
403  .type = AVMEDIA_TYPE_VIDEO,
404  .filter_frame = ff_yadif_filter_frame,
405  },
406 };
407 
409  {
410  .name = "default",
411  .type = AVMEDIA_TYPE_VIDEO,
412  .request_frame = ff_yadif_request_frame,
413  .config_props = config_props,
414  },
415 };
416 
418  .name = "bwdif",
419  .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
420  .priv_size = sizeof(BWDIFContext),
421  .priv_class = &bwdif_class,
422  .uninit = uninit,
427 };
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:508
ff_bwdif_init_x86
void ff_bwdif_init_x86(BWDIFContext *bwdif, int bit_depth)
Definition: vf_bwdif_init.c:53
coef_sp
static const uint16_t coef_sp[2]
Definition: vf_bwdif.c:49
YADIF_MODE_SEND_FIELD
@ YADIF_MODE_SEND_FIELD
send 1 frame for each field
Definition: yadif.h:29
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:487
bit_depth
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
Definition: af_astats.c:233
interpol
static int interpol(MBContext *s, uint32_t *color, int x, int y, int linesize)
Definition: vsrc_mandelbrot.c:186
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
filter_intra_16bit
static void filter_intra_16bit(void *dst1, void *cur1, int w, int prefs, int mrefs, int prefs3, int mrefs3, int parity, int clip_max)
Definition: vf_bwdif.c:170
df
#define df(A, B)
Definition: vf_xbr.c:89
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2936
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:174
YADIFContext::csp
const AVPixFmtDescriptor * csp
Definition: yadif.h:76
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
YADIFContext::mode
int mode
YADIFMode.
Definition: yadif.h:54
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:500
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
AVFrame::width
int width
Definition: frame.h:402
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:507
w
uint8_t w
Definition: llviddspenc.c:38
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:502
AVOption
AVOption.
Definition: opt.h:251
ThreadData::tff
int tff
Definition: vf_bwdif.c:56
FILTER1
#define FILTER1()
Definition: vf_bwdif.c:68
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:465
ThreadData::frame
AVFrame * frame
Definition: dsddec.c:70
ThreadData::w
int w
Definition: vf_blend.c:59
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
FILTER_INTRA
#define FILTER_INTRA()
Definition: vf_bwdif.c:59
coef_hf
static const uint16_t coef_hf[3]
Definition: vf_bwdif.c:48
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:503
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(bwdif)
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
YADIF_MODE_SEND_FRAME
@ YADIF_MODE_SEND_FRAME
send 1 frame for each frame
Definition: yadif.h:28
formats.h
ff_bwdif_init_filter_line
av_cold void ff_bwdif_init_filter_line(BWDIFContext *s, int bit_depth)
Definition: vf_bwdif.c:358
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:499
config_props
static int config_props(AVFilterLink *link)
Definition: vf_bwdif.c:324
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:483
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:481
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:509
filter_line_c_16bit
static void filter_line_c_16bit(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int prefs2, int mrefs2, int prefs3, int mrefs3, int prefs4, int mrefs4, int parity, int clip_max)
Definition: vf_bwdif.c:180
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:463
YADIF_PARITY_AUTO
@ YADIF_PARITY_AUTO
auto detection
Definition: yadif.h:37
avfilter_vf_bwdif_outputs
static const AVFilterPad avfilter_vf_bwdif_outputs[]
Definition: vf_bwdif.c:408
ff_ccfifo_uninit
void ff_ccfifo_uninit(CCFifo *ccf)
Free all memory allocated in a CCFifo and clear the context.
Definition: ccfifo.c:46
filter_line_c
static void filter_line_c(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int prefs2, int mrefs2, int prefs3, int mrefs3, int prefs4, int mrefs4, int parity, int clip_max)
Definition: vf_bwdif.c:135
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:449
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_bwdif.c:292
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:468
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:477
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
ThreadData::plane
int plane
Definition: vf_blend.c:58
emms_c
#define emms_c()
Definition: internal.h:51
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:478
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
FLAGS
#define FLAGS
Definition: vf_bwdif.c:377
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2020
filter_edge_16bit
static void filter_edge_16bit(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int prefs2, int mrefs2, int parity, int clip_max, int spat)
Definition: vf_bwdif.c:198
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:462
bwdif_options
static const AVOption bwdif_options[]
Definition: vf_bwdif.c:381
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:476
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
ThreadData::h
int h
Definition: vf_blend.c:59
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:484
FILTER_LINE
#define FILTER_LINE()
Definition: vf_bwdif.c:91
NULL
#define NULL
Definition: coverity.c:32
AVPixFmtDescriptor::nb_components
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
OFFSET
#define OFFSET(x)
Definition: vf_bwdif.c:376
coef_lf
static const uint16_t coef_lf[2]
Definition: vf_bwdif.c:47
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:466
FILTER2
#define FILTER2()
Definition: vf_bwdif.c:108
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:480
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:114
ThreadData::parity
int parity
Definition: vf_bwdif.c:55
filter
static void filter(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff)
Definition: vf_bwdif.c:261
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_bwdif.c:303
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:470
YADIFContext::filter
void(* filter)(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff)
Definition: yadif.h:65
parity
mcdeint parity
Definition: vf_mcdeint.c:282
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:472
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:457
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
filter_slice
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_bwdif.c:215
YADIFContext::prev
AVFrame * prev
Definition: yadif.h:62
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:504
filter_intra
static void filter_intra(void *dst1, void *cur1, int w, int prefs, int mrefs, int prefs3, int mrefs3, int parity, int clip_max)
Definition: vf_bwdif.c:125
internal.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:482
common.h
ff_vf_bwdif
const AVFilter ff_vf_bwdif
Definition: vf_bwdif.c:417
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:779
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
YADIFContext
Definition: yadif.h:51
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
BWDIFContext::yadif
YADIFContext yadif
Definition: bwdif.h:27
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:464
AVFilter
Filter definition.
Definition: avfilter.h:166
ff_ccfifo_init
int ff_ccfifo_init(CCFifo *ccf, AVRational framerate, void *log_ctx)
Initialize a CCFifo.
Definition: ccfifo.c:53
ret
ret
Definition: filter_design.txt:187
YADIF_DEINT_ALL
@ YADIF_DEINT_ALL
deinterlace all frames
Definition: yadif.h:41
avfilter_vf_bwdif_inputs
static const AVFilterPad avfilter_vf_bwdif_inputs[]
Definition: vf_bwdif.c:400
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:501
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:469
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:474
AVFrame::height
int height
Definition: frame.h:402
FILTER_EDGE
#define FILTER_EDGE()
Definition: vf_bwdif.c:102
YADIFContext::next
AVFrame * next
Definition: yadif.h:61
YADIF_FIELD_END
@ YADIF_FIELD_END
The first or last field in a sequence.
Definition: yadif.h:47
BWDIFContext
Definition: bwdif.h:26
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_yadif_request_frame
int ff_yadif_request_frame(AVFilterLink *link)
Definition: yadif_common.c:174
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
YADIF_DEINT_INTERLACED
@ YADIF_DEINT_INTERLACED
only deinterlace frames marked as interlaced
Definition: yadif.h:42
YADIF_FIELD_NORMAL
@ YADIF_FIELD_NORMAL
A normal field in the middle of a sequence.
Definition: yadif.h:48
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
YADIF_PARITY_TFF
@ YADIF_PARITY_TFF
top field first
Definition: yadif.h:35
YADIFContext::current_field
int current_field
YADIFCurrentField.
Definition: yadif.h:88
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
YADIFContext::cc_fifo
CCFifo cc_fifo
Definition: yadif.h:80
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
filter_edge
static void filter_edge(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int prefs2, int mrefs2, int parity, int clip_max, int spat)
Definition: vf_bwdif.c:153
h
h
Definition: vp9dsp_template.c:2038
YADIFContext::cur
AVFrame * cur
Definition: yadif.h:60
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:475
CONST
#define CONST(name, help, val, unit)
Definition: vf_bwdif.c:379
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:146
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
ff_yadif_filter_frame
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: yadif_common.c:99
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:473
YADIF_PARITY_BFF
@ YADIF_PARITY_BFF
bottom field first
Definition: yadif.h:36
bwdif.h