FFmpeg
vf_yadif.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
3  * 2010 James Darnley <james.darnley@gmail.com>
4 
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/common.h"
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/imgutils.h"
25 #include "avfilter.h"
26 #include "formats.h"
27 #include "internal.h"
28 #include "video.h"
29 #include "yadif.h"
30 
31 typedef struct ThreadData {
32  AVFrame *frame;
33  int plane;
34  int w, h;
35  int parity;
36  int tff;
37 } ThreadData;
38 
39 #define CHECK(j)\
40  { int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\
41  + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
42  + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\
43  if (score < spatial_score) {\
44  spatial_score= score;\
45  spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
46 
47 /* The is_not_edge argument here controls when the code will enter a branch
48  * which reads up to and including x-3 and x+3. */
49 
50 #define FILTER(start, end, is_not_edge) \
51  for (x = start; x < end; x++) { \
52  int c = cur[mrefs]; \
53  int d = (prev2[0] + next2[0])>>1; \
54  int e = cur[prefs]; \
55  int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
56  int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
57  int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
58  int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
59  int spatial_pred = (c+e) >> 1; \
60  \
61  if (is_not_edge) {\
62  int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
63  + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
64  CHECK(-1) CHECK(-2) }} }} \
65  CHECK( 1) CHECK( 2) }} }} \
66  }\
67  \
68  if (!(mode&2)) { \
69  int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
70  int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
71  int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
72  int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
73  \
74  diff = FFMAX3(diff, min, -max); \
75  } \
76  \
77  if (spatial_pred > d + diff) \
78  spatial_pred = d + diff; \
79  else if (spatial_pred < d - diff) \
80  spatial_pred = d - diff; \
81  \
82  dst[0] = spatial_pred; \
83  \
84  dst++; \
85  cur++; \
86  prev++; \
87  next++; \
88  prev2++; \
89  next2++; \
90  }
91 
92 static void filter_line_c(void *dst1,
93  void *prev1, void *cur1, void *next1,
94  int w, int prefs, int mrefs, int parity, int mode)
95 {
96  uint8_t *dst = dst1;
97  uint8_t *prev = prev1;
98  uint8_t *cur = cur1;
99  uint8_t *next = next1;
100  int x;
101  uint8_t *prev2 = parity ? prev : cur ;
102  uint8_t *next2 = parity ? cur : next;
103 
104  /* The function is called with the pointers already pointing to data[3] and
105  * with 6 subtracted from the width. This allows the FILTER macro to be
106  * called so that it processes all the pixels normally. A constant value of
107  * true for is_not_edge lets the compiler ignore the if statement. */
108  FILTER(0, w, 1)
109 }
110 
111 #define MAX_ALIGN 8
112 static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
113  int w, int prefs, int mrefs, int parity, int mode)
114 {
115  uint8_t *dst = dst1;
116  uint8_t *prev = prev1;
117  uint8_t *cur = cur1;
118  uint8_t *next = next1;
119  int x;
120  uint8_t *prev2 = parity ? prev : cur ;
121  uint8_t *next2 = parity ? cur : next;
122 
123  const int edge = MAX_ALIGN - 1;
124  int offset = FFMAX(w - edge, 3);
125 
126  /* Only edge pixels need to be processed here. A constant value of false
127  * for is_not_edge should let the compiler ignore the whole branch. */
128  FILTER(0, FFMIN(3, w), 0)
129 
130  dst = (uint8_t*)dst1 + offset;
131  prev = (uint8_t*)prev1 + offset;
132  cur = (uint8_t*)cur1 + offset;
133  next = (uint8_t*)next1 + offset;
134  prev2 = (uint8_t*)(parity ? prev : cur);
135  next2 = (uint8_t*)(parity ? cur : next);
136 
137  FILTER(offset, w - 3, 1)
138  offset = FFMAX(offset, w - 3);
139  FILTER(offset, w, 0)
140 }
141 
142 
143 static void filter_line_c_16bit(void *dst1,
144  void *prev1, void *cur1, void *next1,
145  int w, int prefs, int mrefs, int parity,
146  int mode)
147 {
148  uint16_t *dst = dst1;
149  uint16_t *prev = prev1;
150  uint16_t *cur = cur1;
151  uint16_t *next = next1;
152  int x;
153  uint16_t *prev2 = parity ? prev : cur ;
154  uint16_t *next2 = parity ? cur : next;
155  mrefs /= 2;
156  prefs /= 2;
157 
158  FILTER(0, w, 1)
159 }
160 
161 static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
162  int w, int prefs, int mrefs, int parity, int mode)
163 {
164  uint16_t *dst = dst1;
165  uint16_t *prev = prev1;
166  uint16_t *cur = cur1;
167  uint16_t *next = next1;
168  int x;
169  uint16_t *prev2 = parity ? prev : cur ;
170  uint16_t *next2 = parity ? cur : next;
171 
172  const int edge = MAX_ALIGN / 2 - 1;
173  int offset = FFMAX(w - edge, 3);
174 
175  mrefs /= 2;
176  prefs /= 2;
177 
178  FILTER(0, FFMIN(3, w), 0)
179 
180  dst = (uint16_t*)dst1 + offset;
181  prev = (uint16_t*)prev1 + offset;
182  cur = (uint16_t*)cur1 + offset;
183  next = (uint16_t*)next1 + offset;
184  prev2 = (uint16_t*)(parity ? prev : cur);
185  next2 = (uint16_t*)(parity ? cur : next);
186 
187  FILTER(offset, w - 3, 1)
188  offset = FFMAX(offset, w - 3);
189  FILTER(offset, w, 0)
190 }
191 
192 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
193 {
194  YADIFContext *s = ctx->priv;
195  ThreadData *td = arg;
196  int refs = s->cur->linesize[td->plane];
197  int df = (s->csp->comp[td->plane].depth + 7) / 8;
198  int pix_3 = 3 * df;
199  int slice_start = (td->h * jobnr ) / nb_jobs;
200  int slice_end = (td->h * (jobnr+1)) / nb_jobs;
201  int y;
202  int edge = 3 + MAX_ALIGN / df - 1;
203 
204  /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
205  * we need to call the c variant which avoids this for border pixels
206  */
207  for (y = slice_start; y < slice_end; y++) {
208  if ((y ^ td->parity) & 1) {
209  uint8_t *prev = &s->prev->data[td->plane][y * refs];
210  uint8_t *cur = &s->cur ->data[td->plane][y * refs];
211  uint8_t *next = &s->next->data[td->plane][y * refs];
212  uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
213  int mode = y == 1 || y + 2 == td->h ? 2 : s->mode;
214  s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
215  next + pix_3, td->w - edge,
216  y + 1 < td->h ? refs : -refs,
217  y ? -refs : refs,
218  td->parity ^ td->tff, mode);
219  s->filter_edges(dst, prev, cur, next, td->w,
220  y + 1 < td->h ? refs : -refs,
221  y ? -refs : refs,
222  td->parity ^ td->tff, mode);
223  } else {
224  memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
225  &s->cur->data[td->plane][y * refs], td->w * df);
226  }
227  }
228  return 0;
229 }
230 
231 static void filter(AVFilterContext *ctx, AVFrame *dstpic,
232  int parity, int tff)
233 {
234  YADIFContext *yadif = ctx->priv;
235  ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
236  int i;
237 
238  for (i = 0; i < yadif->csp->nb_components; i++) {
239  int w = dstpic->width;
240  int h = dstpic->height;
241 
242  if (i == 1 || i == 2) {
243  w = AV_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
244  h = AV_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
245  }
246 
247 
248  td.w = w;
249  td.h = h;
250  td.plane = i;
251 
254  }
255 
256  emms_c();
257 }
258 
260 {
261  YADIFContext *yadif = ctx->priv;
262 
263  av_frame_free(&yadif->prev);
264  av_frame_free(&yadif->cur );
265  av_frame_free(&yadif->next);
266 }
267 
268 static const enum AVPixelFormat pix_fmts[] = {
284 };
285 
286 static int config_output(AVFilterLink *outlink)
287 {
288  AVFilterContext *ctx = outlink->src;
289  YADIFContext *s = ctx->priv;
290 
291  outlink->time_base = av_mul_q(ctx->inputs[0]->time_base, (AVRational){1, 2});
292  outlink->w = ctx->inputs[0]->w;
293  outlink->h = ctx->inputs[0]->h;
294 
295  if(s->mode & 1)
296  outlink->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
297  (AVRational){2, 1});
298 
299  if (outlink->w < 3 || outlink->h < 3) {
300  av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
301  return AVERROR(EINVAL);
302  }
303 
304  s->csp = av_pix_fmt_desc_get(outlink->format);
305  s->filter = filter;
306  if (s->csp->comp[0].depth > 8) {
307  s->filter_line = filter_line_c_16bit;
308  s->filter_edges = filter_edges_16bit;
309  } else {
310  s->filter_line = filter_line_c;
311  s->filter_edges = filter_edges;
312  }
313 
314  if (ARCH_X86)
316 
317  return 0;
318 }
319 
320 
321 static const AVClass yadif_class = {
322  .class_name = "yadif",
323  .item_name = av_default_item_name,
324  .option = ff_yadif_options,
325  .version = LIBAVUTIL_VERSION_INT,
326  .category = AV_CLASS_CATEGORY_FILTER,
327 };
328 
330  {
331  .name = "default",
332  .type = AVMEDIA_TYPE_VIDEO,
333  .filter_frame = ff_yadif_filter_frame,
334  },
335 };
336 
338  {
339  .name = "default",
340  .type = AVMEDIA_TYPE_VIDEO,
341  .request_frame = ff_yadif_request_frame,
342  .config_props = config_output,
343  },
344 };
345 
347  .name = "yadif",
348  .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
349  .priv_size = sizeof(YADIFContext),
350  .priv_class = &yadif_class,
351  .uninit = uninit,
356 };
ff_yadif_init_x86
av_cold void ff_yadif_init_x86(YADIFContext *yadif)
Definition: vf_yadif_init.c:59
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
df
#define df(A, B)
Definition: vf_xbr.c:89
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:171
filter_line_c_16bit
static void filter_line_c_16bit(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode)
Definition: vf_yadif.c:143
YADIFContext::csp
const AVPixFmtDescriptor * csp
Definition: yadif.h:75
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
AVFrame::width
int width
Definition: frame.h:389
w
uint8_t w
Definition: llviddspenc.c:38
ThreadData::tff
int tff
Definition: vf_bwdif.c:56
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:404
ThreadData::frame
AVFrame * frame
Definition: dsddec.c:68
ThreadData::w
int w
Definition: vf_blend.c:86
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
video.h
formats.h
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:422
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:420
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:402
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:388
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_yadif.c:268
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:407
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
avfilter_vf_yadif_inputs
static const AVFilterPad avfilter_vf_yadif_inputs[]
Definition: vf_yadif.c:329
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:416
filter_slice
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_yadif.c:192
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
ThreadData::plane
int plane
Definition: vf_blend.c:85
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:417
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
MAX_ALIGN
#define MAX_ALIGN
Definition: vf_yadif.c:111
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_yadif.c:259
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2042
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:401
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:415
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
ThreadData::h
int h
Definition: vf_blend.c:86
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:423
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
filter_line_c
static void filter_line_c(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode)
Definition: vf_yadif.c:92
AVPixFmtDescriptor::nb_components
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:405
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:419
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
ff_yadif_options
const AVOption ff_yadif_options[]
Definition: yadif_common.c:198
yadif.h
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
ThreadData::parity
int parity
Definition: vf_bwdif.c:55
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_yadif.c:286
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:409
avfilter_vf_yadif_outputs
static const AVFilterPad avfilter_vf_yadif_outputs[]
Definition: vf_yadif.c:337
parity
mcdeint parity
Definition: vf_mcdeint.c:266
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:411
filter_edges
static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode)
Definition: vf_yadif.c:112
YADIFContext::prev
AVFrame * prev
Definition: yadif.h:61
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
internal.h
FILTER
#define FILTER(start, end, is_not_edge)
Definition: vf_yadif.c:50
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:421
filter_edges_16bit
static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode)
Definition: vf_yadif.c:161
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:803
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
YADIFContext
Definition: yadif.h:50
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
ff_vf_yadif
const AVFilter ff_vf_yadif
Definition: vf_yadif.c:346
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:403
AVFilter
Filter definition.
Definition: avfilter.h:165
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:408
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:413
AVFrame::height
int height
Definition: frame.h:389
YADIFContext::next
AVFrame * next
Definition: yadif.h:60
filter
static void filter(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff)
Definition: vf_yadif.c:231
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_yadif_request_frame
int ff_yadif_request_frame(AVFilterLink *link)
Definition: yadif_common.c:159
avfilter.h
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
yadif_class
static const AVClass yadif_class
Definition: vf_yadif.c:321
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:154
imgutils.h
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
h
h
Definition: vp9dsp_template.c:2038
YADIFContext::cur
AVFrame * cur
Definition: yadif.h:59
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:414
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
ff_yadif_filter_frame
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: yadif_common.c:92
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:412