40 #if !NVDECAPI_CHECK_VERSION(9, 0) 41 #define cudaVideoSurfaceFormat_YUV444 2 42 #define cudaVideoSurfaceFormat_YUV444_16Bit 3 45 #if NVDECAPI_CHECK_VERSION(11, 0) 46 #define CUVID_HAS_AV1_SUPPORT 108 #define CHECK_CU(x) FF_CUDA_CHECK_DL(avctx, ctx->cudl, x) 115 CUVIDDECODECAPS *caps =
NULL;
116 CUVIDDECODECREATEINFO cuinfo;
120 int old_width = avctx->
width;
121 int old_height = avctx->
height;
129 memset(&cuinfo, 0,
sizeof(cuinfo));
131 ctx->internal_error = 0;
133 avctx->coded_width = cuinfo.ulWidth =
format->coded_width;
134 avctx->coded_height = cuinfo.ulHeight =
format->coded_height;
137 cuinfo.display_area.left =
format->display_area.left +
ctx->crop.left;
138 cuinfo.display_area.top =
format->display_area.top +
ctx->crop.top;
139 cuinfo.display_area.right =
format->display_area.right -
ctx->crop.right;
140 cuinfo.display_area.bottom =
format->display_area.bottom -
ctx->crop.bottom;
143 if (
ctx->resize_expr) {
144 avctx->width =
ctx->resize.width;
145 avctx->height =
ctx->resize.height;
147 avctx->width = cuinfo.display_area.right - cuinfo.display_area.left;
148 avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top;
152 cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1;
153 cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1;
156 cuinfo.target_rect.left = 0;
157 cuinfo.target_rect.top = 0;
158 cuinfo.target_rect.right = cuinfo.ulTargetWidth;
159 cuinfo.target_rect.bottom = cuinfo.ulTargetHeight;
161 chroma_444 =
format->chroma_format == cudaVideoChromaFormat_444;
163 switch (
format->bit_depth_luma_minus8) {
180 if (!caps || !caps->bIsSupported) {
182 format->bit_depth_luma_minus8 + 8);
188 if (surface_fmt < 0) {
199 avctx->pix_fmt = surface_fmt;
202 if (avctx->hw_frames_ctx) {
218 ctx->deint_mode_current =
format->progressive_sequence
219 ? cudaVideoDeinterlaceMode_Weave
222 ctx->progressive_sequence =
format->progressive_sequence;
224 if (!
format->progressive_sequence &&
ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave)
229 if (
format->video_signal_description.video_full_range_flag)
235 avctx->color_trc =
format->video_signal_description.transfer_characteristics;
239 avctx->bit_rate =
format->bitrate;
241 if (
format->frame_rate.numerator &&
format->frame_rate.denominator) {
242 avctx->framerate.num =
format->frame_rate.numerator;
243 avctx->framerate.den =
format->frame_rate.denominator;
247 && avctx->coded_width ==
format->coded_width
248 && avctx->coded_height ==
format->coded_height
249 && avctx->
width == old_width
250 && avctx->height == old_height
251 &&
ctx->chroma_format ==
format->chroma_format
255 if (
ctx->cudecoder) {
258 if (
ctx->internal_error < 0)
263 if (hwframe_ctx->pool && (
264 hwframe_ctx->width < avctx->width ||
265 hwframe_ctx->height < avctx->height ||
267 hwframe_ctx->sw_format != avctx->sw_pix_fmt)) {
268 av_log(avctx,
AV_LOG_ERROR,
"AVHWFramesContext is already initialized with incompatible parameters\n");
270 av_log(avctx,
AV_LOG_DEBUG,
"height: %d <-> %d\n", hwframe_ctx->height, avctx->height);
278 ctx->chroma_format =
format->chroma_format;
280 cuinfo.CodecType =
ctx->codec_type =
format->codec;
281 cuinfo.ChromaFormat =
format->chroma_format;
283 switch (avctx->sw_pix_fmt) {
285 cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;
289 cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016;
304 cuinfo.ulNumDecodeSurfaces =
ctx->nb_surfaces;
305 cuinfo.ulNumOutputSurfaces = 1;
306 cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
307 cuinfo.bitDepthMinus8 =
format->bit_depth_luma_minus8;
308 cuinfo.DeinterlaceMode =
ctx->deint_mode_current;
310 if (
ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !
ctx->drop_second_field)
313 ctx->internal_error =
CHECK_CU(
ctx->cvdl->cuvidCreateDecoder(&
ctx->cudecoder, &cuinfo));
314 if (
ctx->internal_error < 0)
317 if (!hwframe_ctx->pool) {
319 hwframe_ctx->sw_format = avctx->sw_pix_fmt;
320 hwframe_ctx->width = avctx->width;
321 hwframe_ctx->height = avctx->height;
339 ctx->
key_frame[picparams->CurrPicIdx] = picparams->intra_pic_flag;
390 CUcontext
dummy, cuda_ctx = device_hwctx->cuda_ctx;
391 CUVIDSOURCEDATAPACKET cupkt;
396 if (is_flush && avpkt && avpkt->
size)
407 memset(&cupkt, 0,
sizeof(cupkt));
409 if (avpkt && avpkt->
size) {
410 cupkt.payload_size = avpkt->
size;
411 cupkt.payload = avpkt->
data;
414 cupkt.flags = CUVID_PKT_TIMESTAMP;
418 cupkt.timestamp = avpkt->
pts;
421 cupkt.flags = CUVID_PKT_ENDOFSTREAM;
455 CUcontext
dummy, cuda_ctx = device_hwctx->cuda_ctx;
456 CUdeviceptr mapped_frame = 0;
457 int ret = 0, eret = 0;
489 unsigned int pitch = 0;
495 memset(¶ms, 0,
sizeof(params));
496 params.progressive_frame = parsed_frame.
dispinfo.progressive_frame;
498 params.top_field_first = parsed_frame.
dispinfo.top_field_first;
521 CUDA_MEMCPY2D cpy = {
522 .srcMemoryType = CU_MEMORYTYPE_DEVICE,
523 .dstMemoryType = CU_MEMORYTYPE_DEVICE,
524 .srcDevice = mapped_frame,
525 .dstDevice = (CUdeviceptr)frame->
data[i],
533 ret =
CHECK_CU(ctx->
cudl->cuMemcpy2DAsync(&cpy, device_hwctx->stream));
544 unsigned int offset = 0;
610 frame->
pts += pts_diff;
673 }
else if (ret < 0) {
687 CUcontext
dummy, cuda_ctx = device_hwctx->cuda_ctx;
691 ctx->
cudl->cuCtxPushCurrent(cuda_ctx);
699 ctx->
cudl->cuCtxPopCurrent(&dummy);
709 cuvid_free_functions(&ctx->
cvdl);
721 CUVIDDECODECAPS *caps;
722 int res8 = 0, res10 = 0, res12 = 0;
724 if (!ctx->
cvdl->cuvidGetDecoderCaps) {
725 av_log(avctx,
AV_LOG_WARNING,
"Used Nvidia driver is too old to perform a capability check.\n");
727 #
if defined(_WIN32) || defined(__CYGWIN__)
732 ". Continuing blind.\n");
733 ctx->
caps8.bIsSupported = ctx->
caps10.bIsSupported = 1;
735 ctx->
caps12.bIsSupported = 0;
740 = cuparseinfo->CodecType;
742 = cudaVideoChromaFormat_420;
744 ctx->
caps8.nBitDepthMinus8 = 0;
745 ctx->
caps10.nBitDepthMinus8 = 2;
746 ctx->
caps12.nBitDepthMinus8 = 4;
753 av_log(avctx,
AV_LOG_VERBOSE,
"8 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
755 av_log(avctx,
AV_LOG_VERBOSE,
"10 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
757 av_log(avctx,
AV_LOG_VERBOSE,
"12 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
777 if (!ctx->
caps8.bIsSupported) {
782 if (!caps->bIsSupported) {
787 if (probed_width > caps->nMaxWidth || probed_width < caps->nMinWidth) {
789 probed_width, caps->nMinWidth, caps->nMaxWidth);
793 if (probed_height > caps->nMaxHeight || probed_height < caps->nMinHeight) {
795 probed_height, caps->nMinHeight, caps->nMaxHeight);
799 if ((probed_width * probed_height) / 256 > caps->nMaxMBCount) {
801 (
int)(probed_width * probed_height) / 256, caps->nMaxMBCount);
814 CUVIDSOURCEDATAPACKET seq_pkt;
815 CUcontext cuda_ctx =
NULL;
827 int probed_bit_depth = 8;
831 probed_bit_depth = probe_desc->
comp[0].
depth;
859 ret = cuvid_load_functions(&ctx->
cvdl, avctx);
909 device_hwctx = device_ctx->
hwctx;
915 memset(&seq_pkt, 0,
sizeof(seq_pkt));
918 #if CONFIG_H264_CUVID_DECODER 923 #if CONFIG_HEVC_CUVID_DECODER 928 #if CONFIG_MJPEG_CUVID_DECODER 933 #if CONFIG_MPEG1_CUVID_DECODER 938 #if CONFIG_MPEG2_CUVID_DECODER 943 #if CONFIG_MPEG4_CUVID_DECODER 948 #if CONFIG_VP8_CUVID_DECODER 953 #if CONFIG_VP9_CUVID_DECODER 958 #if CONFIG_VC1_CUVID_DECODER 963 #if CONFIG_AV1_CUVID_DECODER && defined(CUVID_HAS_AV1_SUPPORT) 983 +
FFMAX(extradata_size - (
int)
sizeof(ctx->
cuparse_ext->raw_seqhdr_data), 0));
989 if (extradata_size > 0)
990 memcpy(ctx->
cuparse_ext->raw_seqhdr_data, extradata, extradata_size);
991 ctx->
cuparse_ext->format.seqhdr_data_length = extradata_size;
1023 seq_pkt.payload = ctx->
cuparse_ext->raw_seqhdr_data;
1024 seq_pkt.payload_size = ctx->
cuparse_ext->format.seqhdr_data_length;
1026 if (seq_pkt.payload && seq_pkt.payload_size) {
1053 CUcontext
dummy, cuda_ctx = device_hwctx->cuda_ctx;
1054 CUVIDSOURCEDATAPACKET seq_pkt = { 0 };
1057 ret =
CHECK_CU(
ctx->cudl->cuCtxPushCurrent(cuda_ctx));
1064 if (!
ctx->frame_queue) {
1069 if (
ctx->cudecoder) {
1070 ctx->cvdl->cuvidDestroyDecoder(
ctx->cudecoder);
1074 if (
ctx->cuparser) {
1075 ctx->cvdl->cuvidDestroyVideoParser(
ctx->cuparser);
1079 ret =
CHECK_CU(
ctx->cvdl->cuvidCreateVideoParser(&
ctx->cuparser, &
ctx->cuparseinfo));
1083 seq_pkt.payload =
ctx->cuparse_ext->raw_seqhdr_data;
1084 seq_pkt.payload_size =
ctx->cuparse_ext->format.seqhdr_data_length;
1086 if (seq_pkt.payload && seq_pkt.payload_size) {
1087 ret =
CHECK_CU(
ctx->cvdl->cuvidParseVideoData(
ctx->cuparser, &seq_pkt));
1096 ctx->prev_pts = INT64_MIN;
1097 ctx->decoder_flushing = 0;
1104 #define OFFSET(x) offsetof(CuvidContext, x) 1105 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM 1107 {
"deint",
"Set deinterlacing mode",
OFFSET(
deint_mode),
AV_OPT_TYPE_INT, { .i64 = cudaVideoDeinterlaceMode_Weave }, cudaVideoDeinterlaceMode_Weave, cudaVideoDeinterlaceMode_Adaptive,
VD,
"deint" },
1108 {
"weave",
"Weave deinterlacing (do nothing)", 0,
AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Weave }, 0, 0,
VD,
"deint" },
1109 {
"bob",
"Bob deinterlacing", 0,
AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Bob }, 0, 0,
VD,
"deint" },
1110 {
"adaptive",
"Adaptive deinterlacing", 0,
AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Adaptive }, 0, 0,
VD,
"deint" },
1132 #define DEFINE_CUVID_CODEC(x, X, bsf_name) \ 1133 static const AVClass x##_cuvid_class = { \ 1134 .class_name = #x "_cuvid", \ 1135 .item_name = av_default_item_name, \ 1136 .option = options, \ 1137 .version = LIBAVUTIL_VERSION_INT, \ 1139 AVCodec ff_##x##_cuvid_decoder = { \ 1140 .name = #x "_cuvid", \ 1141 .long_name = NULL_IF_CONFIG_SMALL("Nvidia CUVID " #X " decoder"), \ 1142 .type = AVMEDIA_TYPE_VIDEO, \ 1143 .id = AV_CODEC_ID_##X, \ 1144 .priv_data_size = sizeof(CuvidContext), \ 1145 .priv_class = &x##_cuvid_class, \ 1146 .init = cuvid_decode_init, \ 1147 .close = cuvid_decode_end, \ 1148 .decode = cuvid_decode_frame, \ 1149 .receive_frame = cuvid_output_frame, \ 1150 .flush = cuvid_flush, \ 1152 .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \ 1153 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, \ 1157 AV_PIX_FMT_NONE }, \ 1158 .hw_configs = cuvid_hw_configs, \ 1159 .wrapper_name = "cuvid", \ 1162 #if CONFIG_AV1_CUVID_DECODER && defined(CUVID_HAS_AV1_SUPPORT) 1166 #if CONFIG_HEVC_CUVID_DECODER 1170 #if CONFIG_H264_CUVID_DECODER 1174 #if CONFIG_MJPEG_CUVID_DECODER 1178 #if CONFIG_MPEG1_CUVID_DECODER 1182 #if CONFIG_MPEG2_CUVID_DECODER 1186 #if CONFIG_MPEG4_CUVID_DECODER 1190 #if CONFIG_VP8_CUVID_DECODER 1194 #if CONFIG_VP9_CUVID_DECODER 1198 #if CONFIG_VC1_CUVID_DECODER This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
const struct AVCodec * codec
AVCodecParameters * par_out
Parameters of the output stream.
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
struct CuvidContext::@49 crop
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
int coded_width
Bitstream width / height, may be different from width/height e.g.
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
static av_cold int cuvid_decode_init(AVCodecContext *avctx)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
AVCUDADeviceContextInternal * internal
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
CUVIDPARSERPARAMS cuparseinfo
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
static av_cold int cuvid_decode_end(AVCodecContext *avctx)
static void error(const char *err)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
This struct describes the properties of an encoded stream.
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
The codec supports this format by some internal method.
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
AVFifoBuffer * frame_queue
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
#define AVERROR_EOF
End of file.
#define AV_LOG_VERBOSE
Detailed information.
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
#define cudaVideoSurfaceFormat_YUV444
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
cudaVideoChromaFormat chroma_format
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
cudaVideoCodec codec_type
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
enum AVColorSpace colorspace
YUV colorspace type.
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
const char * name
Name of the codec implementation.
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
static void cuvid_flush(AVCodecContext *avctx)
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
int extradata_size
Size of the extradata content in bytes.
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
#define cudaVideoSurfaceFormat_YUV444_16Bit
uint8_t nb_components
The number of components each pixel has, (1-4)
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT *format)
int width
picture width / height.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
FFmpeg internal API for CUDA.
static int CUDAAPI cuvid_handle_picture_decode(void *opaque, CUVIDPICPARAMS *picparams)
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
HW acceleration through CUDA.
preferred ID for MPEG-1/2 video decoding
static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Libavcodec external API header.
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
The codec supports this format via the hw_device_ctx interface.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint8_t * data
The data buffer.
static int cuvid_test_capabilities(AVCodecContext *avctx, const CUVIDPARSERPARAMS *cuparseinfo, int probed_width, int probed_height, int bit_depth)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
a very simple circular buffer FIFO implementation
static const AVOption options[]
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
This struct is allocated as AVHWDeviceContext.hwctx.
Describe the class of an AVClass context structure.
Rational number (pair of numerator and denominator).
This struct describes a set or pool of "hardware" frames (i.e.
refcounted data buffer API
static enum AVPixelFormat pix_fmts[]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
CUVIDPARSERDISPINFO dispinfo
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Narrow or limited range content.
A reference to a data buffer.
#define DEFINE_CUVID_CODEC(x, X, bsf_name)
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
static int CUDAAPI cuvid_handle_picture_display(void *opaque, CUVIDPARSERDISPINFO *dispinfo)
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
#define FF_ENABLE_DEPRECATION_WARNINGS
int top_field_first
If the content is interlaced, is top field displayed first.
struct AVCodecInternal * internal
Private context used for internal data.
struct CuvidContext::@50 resize
int key_frame
1 -> keyframe, 0-> not
enum AVColorPrimaries color_primaries
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
static const AVCodecHWConfigInternal *const cuvid_hw_configs[]
CUVIDEOFORMATEX * cuparse_ext
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
int pkt_size
size of the corresponding packet containing the compressed frame.
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
#define AV_NOPTS_VALUE
Undefined timestamp value.
static int cuvid_is_buffer_full(AVCodecContext *avctx)