FFmpeg
hwcontext_cuda.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "buffer.h"
20 #include "common.h"
21 #include "hwcontext.h"
22 #include "hwcontext_internal.h"
24 #if CONFIG_VULKAN
25 #include "hwcontext_vulkan.h"
26 #endif
27 #include "cuda_check.h"
28 #include "mem.h"
29 #include "pixdesc.h"
30 #include "pixfmt.h"
31 #include "imgutils.h"
32 
33 #define CUDA_FRAME_ALIGNMENT 256
34 
35 typedef struct CUDAFramesContext {
38 
39 static const enum AVPixelFormat supported_formats[] = {
49 #if CONFIG_VULKAN
51 #endif
52 };
53 
54 #define CHECK_CU(x) FF_CUDA_CHECK_DL(device_ctx, cu, x)
55 
57  const void *hwconfig,
58  AVHWFramesConstraints *constraints)
59 {
60  int i;
61 
63  sizeof(*constraints->valid_sw_formats));
64  if (!constraints->valid_sw_formats)
65  return AVERROR(ENOMEM);
66 
67  for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
68  constraints->valid_sw_formats[i] = supported_formats[i];
69  constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_formats)] = AV_PIX_FMT_NONE;
70 
71  constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
72  if (!constraints->valid_hw_formats)
73  return AVERROR(ENOMEM);
74 
75  constraints->valid_hw_formats[0] = AV_PIX_FMT_CUDA;
76  constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
77 
78  return 0;
79 }
80 
81 static void cuda_buffer_free(void *opaque, uint8_t *data)
82 {
83  AVHWFramesContext *ctx = opaque;
84  AVHWDeviceContext *device_ctx = ctx->device_ctx;
85  AVCUDADeviceContext *hwctx = device_ctx->hwctx;
86  CudaFunctions *cu = hwctx->internal->cuda_dl;
87 
88  CUcontext dummy;
89 
90  CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
91 
92  CHECK_CU(cu->cuMemFree((CUdeviceptr)data));
93 
94  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
95 }
96 
97 static AVBufferRef *cuda_pool_alloc(void *opaque, int size)
98 {
99  AVHWFramesContext *ctx = opaque;
100  AVHWDeviceContext *device_ctx = ctx->device_ctx;
101  AVCUDADeviceContext *hwctx = device_ctx->hwctx;
102  CudaFunctions *cu = hwctx->internal->cuda_dl;
103 
104  AVBufferRef *ret = NULL;
105  CUcontext dummy = NULL;
106  CUdeviceptr data;
107  int err;
108 
109  err = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
110  if (err < 0)
111  return NULL;
112 
113  err = CHECK_CU(cu->cuMemAlloc(&data, size));
114  if (err < 0)
115  goto fail;
116 
117  ret = av_buffer_create((uint8_t*)data, size, cuda_buffer_free, ctx, 0);
118  if (!ret) {
119  CHECK_CU(cu->cuMemFree(data));
120  goto fail;
121  }
122 
123 fail:
124  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
125  return ret;
126 }
127 
129 {
130  CUDAFramesContext *priv = ctx->internal->priv;
131  int i;
132 
133  for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
134  if (ctx->sw_format == supported_formats[i])
135  break;
136  }
137  if (i == FF_ARRAY_ELEMS(supported_formats)) {
138  av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
140  return AVERROR(ENOSYS);
141  }
142 
144 
145  if (!ctx->pool) {
147  if (size < 0)
148  return size;
149 
151  if (!ctx->internal->pool_internal)
152  return AVERROR(ENOMEM);
153  }
154 
155  return 0;
156 }
157 
159 {
160  int res;
161 
162  frame->buf[0] = av_buffer_pool_get(ctx->pool);
163  if (!frame->buf[0])
164  return AVERROR(ENOMEM);
165 
166  res = av_image_fill_arrays(frame->data, frame->linesize, frame->buf[0]->data,
167  ctx->sw_format, ctx->width, ctx->height, CUDA_FRAME_ALIGNMENT);
168  if (res < 0)
169  return res;
170 
171  // YUV420P is a special case.
172  // Nvenc expects the U/V planes in swapped order from how ffmpeg expects them, also chroma is half-aligned
173  if (ctx->sw_format == AV_PIX_FMT_YUV420P) {
174  frame->linesize[1] = frame->linesize[2] = frame->linesize[0] / 2;
175  frame->data[2] = frame->data[1];
176  frame->data[1] = frame->data[2] + frame->linesize[2] * ctx->height / 2;
177  }
178 
179  frame->format = AV_PIX_FMT_CUDA;
180  frame->width = ctx->width;
181  frame->height = ctx->height;
182 
183  return 0;
184 }
185 
188  enum AVPixelFormat **formats)
189 {
190  enum AVPixelFormat *fmts;
191 
192  fmts = av_malloc_array(2, sizeof(*fmts));
193  if (!fmts)
194  return AVERROR(ENOMEM);
195 
196  fmts[0] = ctx->sw_format;
197  fmts[1] = AV_PIX_FMT_NONE;
198 
199  *formats = fmts;
200 
201  return 0;
202 }
203 
205  const AVFrame *src)
206 {
207  CUDAFramesContext *priv = ctx->internal->priv;
208  AVHWDeviceContext *device_ctx = ctx->device_ctx;
209  AVCUDADeviceContext *hwctx = device_ctx->hwctx;
210  CudaFunctions *cu = hwctx->internal->cuda_dl;
211 
212  CUcontext dummy;
213  int i, ret;
214 
215  ret = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
216  if (ret < 0)
217  return ret;
218 
219  for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {
220  CUDA_MEMCPY2D cpy = {
221  .srcPitch = src->linesize[i],
222  .dstPitch = dst->linesize[i],
223  .WidthInBytes = FFMIN(src->linesize[i], dst->linesize[i]),
224  .Height = src->height >> ((i == 0 || i == 3) ? 0 : priv->shift_height),
225  };
226 
227  if (src->hw_frames_ctx) {
228  cpy.srcMemoryType = CU_MEMORYTYPE_DEVICE;
229  cpy.srcDevice = (CUdeviceptr)src->data[i];
230  } else {
231  cpy.srcMemoryType = CU_MEMORYTYPE_HOST;
232  cpy.srcHost = src->data[i];
233  }
234 
235  if (dst->hw_frames_ctx) {
236  cpy.dstMemoryType = CU_MEMORYTYPE_DEVICE;
237  cpy.dstDevice = (CUdeviceptr)dst->data[i];
238  } else {
239  cpy.dstMemoryType = CU_MEMORYTYPE_HOST;
240  cpy.dstHost = dst->data[i];
241  }
242 
243  ret = CHECK_CU(cu->cuMemcpy2DAsync(&cpy, hwctx->stream));
244  if (ret < 0)
245  goto exit;
246  }
247 
248  if (!dst->hw_frames_ctx) {
249  ret = CHECK_CU(cu->cuStreamSynchronize(hwctx->stream));
250  if (ret < 0)
251  goto exit;
252  }
253 
254 exit:
255  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
256 
257  return 0;
258 }
259 
260 static void cuda_device_uninit(AVHWDeviceContext *device_ctx)
261 {
262  AVCUDADeviceContext *hwctx = device_ctx->hwctx;
263 
264  if (hwctx->internal) {
265  CudaFunctions *cu = hwctx->internal->cuda_dl;
266 
267  if (hwctx->internal->is_allocated && hwctx->cuda_ctx) {
269  CHECK_CU(cu->cuDevicePrimaryCtxRelease(hwctx->internal->cuda_device));
270  else
271  CHECK_CU(cu->cuCtxDestroy(hwctx->cuda_ctx));
272 
273  hwctx->cuda_ctx = NULL;
274  }
275 
276  cuda_free_functions(&hwctx->internal->cuda_dl);
277  }
278 
279  av_freep(&hwctx->internal);
280 }
281 
283 {
284  AVCUDADeviceContext *hwctx = ctx->hwctx;
285  int ret;
286 
287  if (!hwctx->internal) {
288  hwctx->internal = av_mallocz(sizeof(*hwctx->internal));
289  if (!hwctx->internal)
290  return AVERROR(ENOMEM);
291  }
292 
293  if (!hwctx->internal->cuda_dl) {
294  ret = cuda_load_functions(&hwctx->internal->cuda_dl, ctx);
295  if (ret < 0) {
296  av_log(ctx, AV_LOG_ERROR, "Could not dynamically load CUDA\n");
297  goto error;
298  }
299  }
300 
301  return 0;
302 
303 error:
304  cuda_device_uninit(ctx);
305  return ret;
306 }
307 
308 static int cuda_context_init(AVHWDeviceContext *device_ctx, int flags) {
309  AVCUDADeviceContext *hwctx = device_ctx->hwctx;
310  CudaFunctions *cu;
311  CUcontext dummy;
312  int ret, dev_active = 0;
313  unsigned int dev_flags = 0;
314 
315  const unsigned int desired_flags = CU_CTX_SCHED_BLOCKING_SYNC;
316 
317  cu = hwctx->internal->cuda_dl;
318 
319  hwctx->internal->flags = flags;
320 
321  if (flags & AV_CUDA_USE_PRIMARY_CONTEXT) {
322  ret = CHECK_CU(cu->cuDevicePrimaryCtxGetState(hwctx->internal->cuda_device,
323  &dev_flags, &dev_active));
324  if (ret < 0)
325  return ret;
326 
327  if (dev_active && dev_flags != desired_flags) {
328  av_log(device_ctx, AV_LOG_ERROR, "Primary context already active with incompatible flags.\n");
329  return AVERROR(ENOTSUP);
330  } else if (dev_flags != desired_flags) {
331  ret = CHECK_CU(cu->cuDevicePrimaryCtxSetFlags(hwctx->internal->cuda_device,
332  desired_flags));
333  if (ret < 0)
334  return ret;
335  }
336 
337  ret = CHECK_CU(cu->cuDevicePrimaryCtxRetain(&hwctx->cuda_ctx,
338  hwctx->internal->cuda_device));
339  if (ret < 0)
340  return ret;
341  } else {
342  ret = CHECK_CU(cu->cuCtxCreate(&hwctx->cuda_ctx, desired_flags,
343  hwctx->internal->cuda_device));
344  if (ret < 0)
345  return ret;
346 
347  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
348  }
349 
350  hwctx->internal->is_allocated = 1;
351 
352  // Setting stream to NULL will make functions automatically use the default CUstream
353  hwctx->stream = NULL;
354 
355  return 0;
356 }
357 
358 static int cuda_device_create(AVHWDeviceContext *device_ctx,
359  const char *device,
360  AVDictionary *opts, int flags)
361 {
362  AVCUDADeviceContext *hwctx = device_ctx->hwctx;
363  CudaFunctions *cu;
364  int ret, device_idx = 0;
365 
366  if (device)
367  device_idx = strtol(device, NULL, 0);
368 
369  if (cuda_device_init(device_ctx) < 0)
370  goto error;
371 
372  cu = hwctx->internal->cuda_dl;
373 
374  ret = CHECK_CU(cu->cuInit(0));
375  if (ret < 0)
376  goto error;
377 
378  ret = CHECK_CU(cu->cuDeviceGet(&hwctx->internal->cuda_device, device_idx));
379  if (ret < 0)
380  goto error;
381 
382  ret = cuda_context_init(device_ctx, flags);
383  if (ret < 0)
384  goto error;
385 
386  return 0;
387 
388 error:
389  cuda_device_uninit(device_ctx);
390  return AVERROR_UNKNOWN;
391 }
392 
393 static int cuda_device_derive(AVHWDeviceContext *device_ctx,
394  AVHWDeviceContext *src_ctx,
395  int flags) {
396  AVCUDADeviceContext *hwctx = device_ctx->hwctx;
397  CudaFunctions *cu;
398  const char *src_uuid = NULL;
399  int ret, i, device_count;
400 
401 #if CONFIG_VULKAN
402  VkPhysicalDeviceIDProperties vk_idp = {
403  .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES,
404  };
405 #endif
406 
407  switch (src_ctx->type) {
408 #if CONFIG_VULKAN
410  AVVulkanDeviceContext *vkctx = src_ctx->hwctx;
411  VkPhysicalDeviceProperties2 vk_dev_props = {
412  .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
413  .pNext = &vk_idp,
414  };
415  vkGetPhysicalDeviceProperties2(vkctx->phys_dev, &vk_dev_props);
416  src_uuid = vk_idp.deviceUUID;
417  break;
418  }
419 #endif
420  default:
421  return AVERROR(ENOSYS);
422  }
423 
424  if (!src_uuid) {
425  av_log(device_ctx, AV_LOG_ERROR,
426  "Failed to get UUID of source device.\n");
427  goto error;
428  }
429 
430  if (cuda_device_init(device_ctx) < 0)
431  goto error;
432 
433  cu = hwctx->internal->cuda_dl;
434 
435  ret = CHECK_CU(cu->cuInit(0));
436  if (ret < 0)
437  goto error;
438 
439  ret = CHECK_CU(cu->cuDeviceGetCount(&device_count));
440  if (ret < 0)
441  goto error;
442 
443  hwctx->internal->cuda_device = -1;
444  for (i = 0; i < device_count; i++) {
445  CUdevice dev;
446  CUuuid uuid;
447 
448  ret = CHECK_CU(cu->cuDeviceGet(&dev, i));
449  if (ret < 0)
450  goto error;
451 
452  ret = CHECK_CU(cu->cuDeviceGetUuid(&uuid, dev));
453  if (ret < 0)
454  goto error;
455 
456  if (memcmp(src_uuid, uuid.bytes, sizeof (uuid.bytes)) == 0) {
457  hwctx->internal->cuda_device = dev;
458  break;
459  }
460  }
461 
462  if (hwctx->internal->cuda_device == -1) {
463  av_log(device_ctx, AV_LOG_ERROR, "Could not derive CUDA device.\n");
464  goto error;
465  }
466 
467  ret = cuda_context_init(device_ctx, flags);
468  if (ret < 0)
469  goto error;
470 
471  return 0;
472 
473 error:
474  cuda_device_uninit(device_ctx);
475  return AVERROR_UNKNOWN;
476 }
477 
480  .name = "CUDA",
481 
482  .device_hwctx_size = sizeof(AVCUDADeviceContext),
483  .frames_priv_size = sizeof(CUDAFramesContext),
484 
485  .device_create = cuda_device_create,
486  .device_derive = cuda_device_derive,
487  .device_init = cuda_device_init,
488  .device_uninit = cuda_device_uninit,
489  .frames_get_constraints = cuda_frames_get_constraints,
490  .frames_init = cuda_frames_init,
491  .frames_get_buffer = cuda_get_buffer,
492  .transfer_get_formats = cuda_transfer_get_formats,
493  .transfer_data_to = cuda_transfer_data,
494  .transfer_data_from = cuda_transfer_data,
495 
496  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE },
497 };
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
Memory handling functions.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
AVCUDADeviceContextInternal * internal
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array...
Definition: imgutils.c:411
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
static int cuda_frames_get_constraints(AVHWDeviceContext *ctx, const void *hwconfig, AVHWFramesConstraints *constraints)
static int cuda_frames_init(AVHWFramesContext *ctx)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static void error(const char *err)
#define src
Definition: vp8dsp.c:254
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:634
#define AV_PIX_FMT_P016
Definition: pixfmt.h:447
#define AV_PIX_FMT_P010
Definition: pixfmt.h:446
AVBufferPool * pool_internal
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
enum AVHWDeviceType type
uint8_t
static int cuda_device_derive(AVHWDeviceContext *device_ctx, AVHWDeviceContext *src_ctx, int flags)
static enum AVPixelFormat supported_formats[]
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:92
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:410
ptrdiff_t size
Definition: opengl_enc.c:100
#define CHECK_CU(x)
#define av_log(a,...)
static int cuda_device_create(AVHWDeviceContext *device_ctx, const char *device, AVDictionary *opts, int flags)
static void cuda_buffer_free(void *opaque, uint8_t *data)
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters...
Definition: imgutils.c:431
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:375
API-specific header for AV_HWDEVICE_TYPE_VULKAN.
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:79
static int cuda_device_init(AVHWDeviceContext *ctx)
#define fail()
Definition: checkasm.h:122
AVDictionary * opts
Definition: movenc.c:50
static int cuda_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
#define FFMIN(a, b)
Definition: common.h:96
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:149
static int cuda_transfer_get_formats(AVHWFramesContext *ctx, enum AVHWFrameTransferDirection dir, enum AVPixelFormat **formats)
static AVBufferRef * cuda_pool_alloc(void *opaque, int size)
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Main Vulkan context, allocated as AVHWDeviceContext.hwctx.
FFmpeg internal API for CUDA.
int dummy
Definition: motion.c:64
HW acceleration through CUDA.
Definition: pixfmt.h:235
AVBufferPool * av_buffer_pool_init2(int size, void *opaque, AVBufferRef *(*alloc)(void *opaque, int size), void(*pool_free)(void *opaque))
Allocate and initialize a buffer pool with a more complex allocator.
Definition: buffer.c:219
#define FF_ARRAY_ELEMS(a)
#define CUDA_FRAME_ALIGNMENT
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
This struct describes the constraints on hardware frames attached to a given device with a hardware-s...
Definition: hwcontext.h:433
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int cuda_transfer_data(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src)
const HWContextType ff_hwcontext_type_cuda
uint8_t * data
The data buffer.
Definition: buffer.h:89
This struct is allocated as AVHWDeviceContext.hwctx.
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
refcounted data buffer API
enum AVPixelFormat * valid_hw_formats
A list of possible values for format in the hw_frames_ctx, terminated by AV_PIX_FMT_NONE.
Definition: hwcontext.h:438
static int cuda_context_init(AVHWDeviceContext *device_ctx, int flags)
AVHWFramesInternal * internal
Private data used internally by libavutil.
Definition: hwcontext.h:134
#define flags(name, subs,...)
Definition: cbs_av1.c:564
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
A reference to a data buffer.
Definition: buffer.h:81
Vulkan hardware images.
Definition: pixfmt.h:356
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define AV_CUDA_USE_PRIMARY_CONTEXT
Use primary device context instead of creating a new one.
common internal and external API header
static void cuda_device_uninit(AVHWDeviceContext *device_ctx)
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVHWFrameTransferDirection
Definition: hwcontext.h:395
pixel format definitions
AVBufferPool * pool
A pool from which the frames are allocated by av_hwframe_get_buffer().
Definition: hwcontext.h:190
enum AVPixelFormat * valid_sw_formats
A list of possible values for sw_format in the hw_frames_ctx, terminated by AV_PIX_FMT_NONE.
Definition: hwcontext.h:445
VkPhysicalDevice phys_dev
Physical device.
int height
Definition: frame.h:353
#define av_freep(p)
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:335
#define av_malloc_array(a, b)
formats
Definition: signature.h:48
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:374