FFmpeg
vf_yadif_cuda.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2018 Philip Langdale <philipl@overt.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avassert.h"
22 #include "libavutil/hwcontext.h"
24 #include "libavutil/cuda_check.h"
25 #include "internal.h"
26 #include "yadif.h"
27 
28 #include "cuda/load_helper.h"
29 
30 extern const unsigned char ff_vf_yadif_cuda_ptx_data[];
31 extern const unsigned int ff_vf_yadif_cuda_ptx_len;
32 
33 typedef struct DeintCUDAContext {
35 
40 
41  CUcontext cu_ctx;
42  CUstream stream;
43  CUmodule cu_module;
44  CUfunction cu_func_uchar;
45  CUfunction cu_func_uchar2;
46  CUfunction cu_func_ushort;
47  CUfunction cu_func_ushort2;
49 
50 #define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
51 #define ALIGN_UP(a, b) (((a) + (b) - 1) & ~((b) - 1))
52 #define BLOCKX 32
53 #define BLOCKY 16
54 
55 #define CHECK_CU(x) FF_CUDA_CHECK_DL(ctx, s->hwctx->internal->cuda_dl, x)
56 
57 static CUresult call_kernel(AVFilterContext *ctx, CUfunction func,
58  CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next,
59  CUarray_format format, int channels,
60  int src_width, // Width is pixels per channel
61  int src_height, // Height is pixels per channel
62  int src_pitch, // Pitch is bytes
63  CUdeviceptr dst,
64  int dst_width, // Width is pixels per channel
65  int dst_height, // Height is pixels per channel
66  int dst_pitch, // Pitch is pixels per channel
67  int parity, int tff)
68 {
69  DeintCUDAContext *s = ctx->priv;
70  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
71  CUtexObject tex_prev = 0, tex_cur = 0, tex_next = 0;
72  int ret;
73  int skip_spatial_check = s->yadif.mode&2;
74 
75  void *args[] = { &dst, &tex_prev, &tex_cur, &tex_next,
76  &dst_width, &dst_height, &dst_pitch,
77  &src_width, &src_height, &parity, &tff,
78  &skip_spatial_check };
79 
80  CUDA_TEXTURE_DESC tex_desc = {
81  .filterMode = CU_TR_FILTER_MODE_POINT,
82  .flags = CU_TRSF_READ_AS_INTEGER,
83  };
84 
85  CUDA_RESOURCE_DESC res_desc = {
86  .resType = CU_RESOURCE_TYPE_PITCH2D,
87  .res.pitch2D.format = format,
88  .res.pitch2D.numChannels = channels,
89  .res.pitch2D.width = src_width,
90  .res.pitch2D.height = src_height,
91  .res.pitch2D.pitchInBytes = src_pitch,
92  };
93 
94  res_desc.res.pitch2D.devPtr = (CUdeviceptr)prev;
95  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_prev, &res_desc, &tex_desc, NULL));
96  if (ret < 0)
97  goto exit;
98 
99  res_desc.res.pitch2D.devPtr = (CUdeviceptr)cur;
100  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_cur, &res_desc, &tex_desc, NULL));
101  if (ret < 0)
102  goto exit;
103 
104  res_desc.res.pitch2D.devPtr = (CUdeviceptr)next;
105  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_next, &res_desc, &tex_desc, NULL));
106  if (ret < 0)
107  goto exit;
108 
109  ret = CHECK_CU(cu->cuLaunchKernel(func,
110  DIV_UP(dst_width, BLOCKX), DIV_UP(dst_height, BLOCKY), 1,
111  BLOCKX, BLOCKY, 1,
112  0, s->stream, args, NULL));
113 
114 exit:
115  if (tex_prev)
116  CHECK_CU(cu->cuTexObjectDestroy(tex_prev));
117  if (tex_cur)
118  CHECK_CU(cu->cuTexObjectDestroy(tex_cur));
119  if (tex_next)
120  CHECK_CU(cu->cuTexObjectDestroy(tex_next));
121 
122  return ret;
123 }
124 
125 static void filter(AVFilterContext *ctx, AVFrame *dst,
126  int parity, int tff)
127 {
128  DeintCUDAContext *s = ctx->priv;
129  YADIFContext *y = &s->yadif;
130  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
131  CUcontext dummy;
132  int i, ret;
133 
134  ret = CHECK_CU(cu->cuCtxPushCurrent(s->cu_ctx));
135  if (ret < 0)
136  return;
137 
138  for (i = 0; i < y->csp->nb_components; i++) {
139  CUfunction func;
140  CUarray_format format;
141  int pixel_size, channels;
142  const AVComponentDescriptor *comp = &y->csp->comp[i];
143 
144  if (comp->plane < i) {
145  // We process planes as a whole, so don't reprocess
146  // them for additional components
147  continue;
148  }
149 
150  pixel_size = (comp->depth + comp->shift) / 8;
151  channels = comp->step / pixel_size;
152  if (pixel_size > 2 || channels > 2) {
153  av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name);
154  goto exit;
155  }
156  switch (pixel_size) {
157  case 1:
158  func = channels == 1 ? s->cu_func_uchar : s->cu_func_uchar2;
159  format = CU_AD_FORMAT_UNSIGNED_INT8;
160  break;
161  case 2:
162  func = channels == 1 ? s->cu_func_ushort : s->cu_func_ushort2;
163  format = CU_AD_FORMAT_UNSIGNED_INT16;
164  break;
165  default:
166  av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name);
167  goto exit;
168  }
170  "Deinterlacing plane %d: pixel_size: %d channels: %d\n",
171  comp->plane, pixel_size, channels);
173  (CUdeviceptr)y->prev->data[i],
174  (CUdeviceptr)y->cur->data[i],
175  (CUdeviceptr)y->next->data[i],
176  format, channels,
177  AV_CEIL_RSHIFT(y->cur->width, i ? y->csp->log2_chroma_w : 0),
178  AV_CEIL_RSHIFT(y->cur->height, i ? y->csp->log2_chroma_h : 0),
179  y->cur->linesize[i],
180  (CUdeviceptr)dst->data[i],
181  AV_CEIL_RSHIFT(dst->width, i ? y->csp->log2_chroma_w : 0),
182  AV_CEIL_RSHIFT(dst->height, i ? y->csp->log2_chroma_h : 0),
183  dst->linesize[i] / comp->step,
184  parity, tff);
185  }
186 
187 exit:
188  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
189  return;
190 }
191 
193 {
194  CUcontext dummy;
195  DeintCUDAContext *s = ctx->priv;
196  YADIFContext *y = &s->yadif;
197 
198  if (s->hwctx && s->cu_module) {
199  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
200  CHECK_CU(cu->cuCtxPushCurrent(s->cu_ctx));
201  CHECK_CU(cu->cuModuleUnload(s->cu_module));
202  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
203  }
204 
205  av_frame_free(&y->prev);
206  av_frame_free(&y->cur);
207  av_frame_free(&y->next);
208 
209  av_buffer_unref(&s->device_ref);
210  s->hwctx = NULL;
211  av_buffer_unref(&s->input_frames_ref);
212  s->input_frames = NULL;
213 }
214 
216 {
217  AVFilterContext *ctx = inlink->dst;
218  DeintCUDAContext *s = ctx->priv;
219 
220  if (!inlink->hw_frames_ctx) {
221  av_log(ctx, AV_LOG_ERROR, "A hardware frames reference is "
222  "required to associate the processing device.\n");
223  return AVERROR(EINVAL);
224  }
225 
226  s->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx);
227  if (!s->input_frames_ref) {
228  av_log(ctx, AV_LOG_ERROR, "A input frames reference create "
229  "failed.\n");
230  return AVERROR(ENOMEM);
231  }
232  s->input_frames = (AVHWFramesContext*)s->input_frames_ref->data;
233 
234  return 0;
235 }
236 
238 {
239  AVHWFramesContext *output_frames;
240  AVFilterContext *ctx = link->src;
241  DeintCUDAContext *s = ctx->priv;
242  YADIFContext *y = &s->yadif;
243  CudaFunctions *cu;
244  int ret = 0;
245  CUcontext dummy;
246 
247  av_assert0(s->input_frames);
248  s->device_ref = av_buffer_ref(s->input_frames->device_ref);
249  if (!s->device_ref) {
250  av_log(ctx, AV_LOG_ERROR, "A device reference create "
251  "failed.\n");
252  return AVERROR(ENOMEM);
253  }
254  s->hwctx = ((AVHWDeviceContext*)s->device_ref->data)->hwctx;
255  s->cu_ctx = s->hwctx->cuda_ctx;
256  s->stream = s->hwctx->stream;
257  cu = s->hwctx->internal->cuda_dl;
258 
259  link->hw_frames_ctx = av_hwframe_ctx_alloc(s->device_ref);
260  if (!link->hw_frames_ctx) {
261  av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context "
262  "for output.\n");
263  ret = AVERROR(ENOMEM);
264  goto exit;
265  }
266 
267  output_frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
268 
269  output_frames->format = AV_PIX_FMT_CUDA;
270  output_frames->sw_format = s->input_frames->sw_format;
271  output_frames->width = ctx->inputs[0]->w;
272  output_frames->height = ctx->inputs[0]->h;
273 
274  output_frames->initial_pool_size = 4;
275 
277  if (ret < 0)
278  goto exit;
279 
281  if (ret < 0) {
282  av_log(ctx, AV_LOG_ERROR, "Failed to initialise CUDA frame "
283  "context for output: %d\n", ret);
284  goto exit;
285  }
286 
287  link->time_base = av_mul_q(ctx->inputs[0]->time_base, (AVRational){1, 2});
288  link->w = ctx->inputs[0]->w;
289  link->h = ctx->inputs[0]->h;
290 
291  if(y->mode & 1)
292  link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
293  (AVRational){2, 1});
294 
295  if (link->w < 3 || link->h < 3) {
296  av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
297  ret = AVERROR(EINVAL);
298  goto exit;
299  }
300 
301  y->csp = av_pix_fmt_desc_get(output_frames->sw_format);
302  y->filter = filter;
303 
304  ret = CHECK_CU(cu->cuCtxPushCurrent(s->cu_ctx));
305  if (ret < 0)
306  goto exit;
307 
309  if (ret < 0)
310  goto exit;
311 
312  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar, s->cu_module, "yadif_uchar"));
313  if (ret < 0)
314  goto exit;
315 
316  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar2, s->cu_module, "yadif_uchar2"));
317  if (ret < 0)
318  goto exit;
319 
320  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort, s->cu_module, "yadif_ushort"));
321  if (ret < 0)
322  goto exit;
323 
324  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort2, s->cu_module, "yadif_ushort2"));
325  if (ret < 0)
326  goto exit;
327 
328 exit:
329  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
330 
331  return ret;
332 }
333 
334 static const AVClass yadif_cuda_class = {
335  .class_name = "yadif_cuda",
336  .item_name = av_default_item_name,
337  .option = ff_yadif_options,
338  .version = LIBAVUTIL_VERSION_INT,
339  .category = AV_CLASS_CATEGORY_FILTER,
340 };
341 
342 static const AVFilterPad deint_cuda_inputs[] = {
343  {
344  .name = "default",
345  .type = AVMEDIA_TYPE_VIDEO,
346  .filter_frame = ff_yadif_filter_frame,
347  .config_props = config_input,
348  },
349 };
350 
351 static const AVFilterPad deint_cuda_outputs[] = {
352  {
353  .name = "default",
354  .type = AVMEDIA_TYPE_VIDEO,
355  .request_frame = ff_yadif_request_frame,
356  .config_props = config_output,
357  },
358 };
359 
361  .name = "yadif_cuda",
362  .description = NULL_IF_CONFIG_SMALL("Deinterlace CUDA frames"),
363  .priv_size = sizeof(DeintCUDAContext),
364  .priv_class = &yadif_cuda_class,
370  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
371 };
func
int(* func)(AVBPrint *dst, const char *in, const char *arg)
Definition: jacosubdec.c:68
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
DeintCUDAContext::input_frames
AVHWFramesContext * input_frames
Definition: vf_yadif_cuda.c:39
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
hwcontext_cuda_internal.h
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:370
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:86
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
ff_cuda_load_module
int ff_cuda_load_module(void *avctx, AVCUDADeviceContext *hwctx, CUmodule *cu_module, const unsigned char *data, const unsigned int length)
Loads a CUDA module and applies any decompression, if neccesary.
Definition: load_helper.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
YADIFContext::csp
const AVPixFmtDescriptor * csp
Definition: yadif.h:75
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
YADIFContext::mode
int mode
YADIFMode.
Definition: yadif.h:53
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AVFrame::width
int width
Definition: frame.h:397
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
DIV_UP
#define DIV_UP(a, b)
Definition: vf_yadif_cuda.c:50
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
DeintCUDAContext::cu_func_uchar
CUfunction cu_func_uchar
Definition: vf_yadif_cuda.c:44
dummy
int dummy
Definition: motion.c:65
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_yadif_cuda.c:215
DeintCUDAContext::cu_func_uchar2
CUfunction cu_func_uchar2
Definition: vf_yadif_cuda.c:45
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
filter
static void filter(AVFilterContext *ctx, AVFrame *dst, int parity, int tff)
Definition: vf_yadif_cuda.c:125
avassert.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
deint_cuda_outputs
static const AVFilterPad deint_cuda_outputs[]
Definition: vf_yadif_cuda.c:351
av_cold
#define av_cold
Definition: attributes.h:90
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
DeintCUDAContext::cu_ctx
CUcontext cu_ctx
Definition: vf_yadif_cuda.c:41
s
#define s(width, name)
Definition: cbs_vp9.c:256
DeintCUDAContext::cu_func_ushort2
CUfunction cu_func_ushort2
Definition: vf_yadif_cuda.c:47
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_vf_yadif_cuda_ptx_data
const unsigned char ff_vf_yadif_cuda_ptx_data[]
deint_cuda_inputs
static const AVFilterPad deint_cuda_inputs[]
Definition: vf_yadif_cuda.c:342
BLOCKX
#define BLOCKX
Definition: vf_yadif_cuda.c:52
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:32
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
load_helper.h
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
yadif_cuda_class
static const AVClass yadif_cuda_class
Definition: vf_yadif_cuda.c:334
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
DeintCUDAContext::cu_module
CUmodule cu_module
Definition: vf_yadif_cuda.c:43
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
AVPixFmtDescriptor::nb_components
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
DeintCUDAContext::stream
CUstream stream
Definition: vf_yadif_cuda.c:42
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
DeintCUDAContext::yadif
YADIFContext yadif
Definition: vf_yadif_cuda.c:34
deint_cuda_uninit
static av_cold void deint_cuda_uninit(AVFilterContext *ctx)
Definition: vf_yadif_cuda.c:192
CHECK_CU
#define CHECK_CU(x)
Definition: vf_yadif_cuda.c:55
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
ff_vf_yadif_cuda_ptx_len
const unsigned int ff_vf_yadif_cuda_ptx_len
ff_yadif_options
const AVOption ff_yadif_options[]
Definition: yadif_common.c:198
yadif.h
config_output
static int config_output(AVFilterLink *link)
Definition: vf_yadif_cuda.c:237
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
YADIFContext::filter
void(* filter)(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff)
Definition: yadif.h:64
parity
mcdeint parity
Definition: vf_mcdeint.c:266
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:447
DeintCUDAContext
Definition: vf_yadif_cuda.c:33
YADIFContext::prev
AVFrame * prev
Definition: yadif.h:61
DeintCUDAContext::input_frames_ref
AVBufferRef * input_frames_ref
Definition: vf_yadif_cuda.c:38
internal.h
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: internal.h:180
call_kernel
static CUresult call_kernel(AVFilterContext *ctx, CUfunction func, CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next, CUarray_format format, int channels, int src_width, int src_height, int src_pitch, CUdeviceptr dst, int dst_width, int dst_height, int dst_pitch, int parity, int tff)
Definition: vf_yadif_cuda.c:57
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
YADIFContext
Definition: yadif.h:50
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
DeintCUDAContext::hwctx
AVCUDADeviceContext * hwctx
Definition: vf_yadif_cuda.c:36
BLOCKY
#define BLOCKY
Definition: vf_yadif_cuda.c:53
AVFilter
Filter definition.
Definition: avfilter.h:171
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
cuda_check.h
ff_vf_yadif_cuda
const AVFilter ff_vf_yadif_cuda
Definition: vf_yadif_cuda.c:360
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:659
AVFrame::height
int height
Definition: frame.h:397
YADIFContext::next
AVFrame * next
Definition: yadif.h:60
ff_yadif_request_frame
int ff_yadif_request_frame(AVFilterLink *link)
Definition: yadif_common.c:159
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
DeintCUDAContext::device_ref
AVBufferRef * device_ref
Definition: vf_yadif_cuda.c:37
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:191
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:160
hwcontext.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
DeintCUDAContext::cu_func_ushort
CUfunction cu_func_ushort
Definition: vf_yadif_cuda.c:46
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:285
YADIFContext::cur
AVFrame * cur
Definition: yadif.h:59
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
ff_yadif_filter_frame
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: yadif_common.c:92
ff_filter_init_hw_frames
int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link, int default_pool_size)
Perform any additional setup required for hardware frames.
Definition: avfilter.c:1552