FFmpeg
nvdec.c
Go to the documentation of this file.
1 /*
2  * HW decode acceleration through NVDEC
3  *
4  * Copyright (c) 2016 Anton Khirnov
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 
25 #include "libavutil/common.h"
26 #include "libavutil/error.h"
27 #include "libavutil/hwcontext.h"
29 #include "libavutil/cuda_check.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/pixfmt.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "nvdec.h"
36 #include "internal.h"
37 
38 #if !NVDECAPI_CHECK_VERSION(9, 0)
39 #define cudaVideoSurfaceFormat_YUV444 2
40 #define cudaVideoSurfaceFormat_YUV444_16Bit 3
41 #endif
42 
43 typedef struct NVDECDecoder {
44  CUvideodecoder decoder;
45 
48  CUcontext cuda_ctx;
49  CUstream stream;
50 
51  CudaFunctions *cudl;
52  CuvidFunctions *cvdl;
53 } NVDECDecoder;
54 
55 typedef struct NVDECFramePool {
56  unsigned int dpb_size;
57  unsigned int nb_allocated;
59 
60 #define CHECK_CU(x) FF_CUDA_CHECK_DL(logctx, decoder->cudl, x)
61 
62 static int map_avcodec_id(enum AVCodecID id)
63 {
64  switch (id) {
65 #if CONFIG_AV1_NVDEC_HWACCEL
66  case AV_CODEC_ID_AV1: return cudaVideoCodec_AV1;
67 #endif
68  case AV_CODEC_ID_H264: return cudaVideoCodec_H264;
69  case AV_CODEC_ID_HEVC: return cudaVideoCodec_HEVC;
70  case AV_CODEC_ID_MJPEG: return cudaVideoCodec_JPEG;
71  case AV_CODEC_ID_MPEG1VIDEO: return cudaVideoCodec_MPEG1;
72  case AV_CODEC_ID_MPEG2VIDEO: return cudaVideoCodec_MPEG2;
73  case AV_CODEC_ID_MPEG4: return cudaVideoCodec_MPEG4;
74  case AV_CODEC_ID_VC1: return cudaVideoCodec_VC1;
75  case AV_CODEC_ID_VP8: return cudaVideoCodec_VP8;
76  case AV_CODEC_ID_VP9: return cudaVideoCodec_VP9;
77  case AV_CODEC_ID_WMV3: return cudaVideoCodec_VC1;
78  }
79  return -1;
80 }
81 
83 {
84  int shift_h = 0, shift_v = 0;
85 
87  return cudaVideoChromaFormat_Monochrome;
88 
89  av_pix_fmt_get_chroma_sub_sample(pix_fmt, &shift_h, &shift_v);
90 
91  if (shift_h == 1 && shift_v == 1)
92  return cudaVideoChromaFormat_420;
93  else if (shift_h == 1 && shift_v == 0)
94  return cudaVideoChromaFormat_422;
95  else if (shift_h == 0 && shift_v == 0)
96  return cudaVideoChromaFormat_444;
97 
98  return -1;
99 }
100 
102  CUVIDDECODECREATEINFO *params, void *logctx)
103 {
104  int ret;
105  CUVIDDECODECAPS caps = { 0 };
106 
107  caps.eCodecType = params->CodecType;
108  caps.eChromaFormat = params->ChromaFormat;
109  caps.nBitDepthMinus8 = params->bitDepthMinus8;
110 
111  if (!decoder->cvdl->cuvidGetDecoderCaps) {
112  av_log(logctx, AV_LOG_WARNING, "Used Nvidia driver is too old to perform a capability check.\n");
113  av_log(logctx, AV_LOG_WARNING, "The minimum required version is "
114 #if defined(_WIN32) || defined(__CYGWIN__)
115  "378.66"
116 #else
117  "378.13"
118 #endif
119  ". Continuing blind.\n");
120  return 0;
121  }
122 
123  ret = CHECK_CU(decoder->cvdl->cuvidGetDecoderCaps(&caps));
124  if (ret < 0)
125  return ret;
126 
127  av_log(logctx, AV_LOG_VERBOSE, "NVDEC capabilities:\n");
128  av_log(logctx, AV_LOG_VERBOSE, "format supported: %s, max_mb_count: %d\n",
129  caps.bIsSupported ? "yes" : "no", caps.nMaxMBCount);
130  av_log(logctx, AV_LOG_VERBOSE, "min_width: %d, max_width: %d\n",
131  caps.nMinWidth, caps.nMaxWidth);
132  av_log(logctx, AV_LOG_VERBOSE, "min_height: %d, max_height: %d\n",
133  caps.nMinHeight, caps.nMaxHeight);
134 
135  if (!caps.bIsSupported) {
136  av_log(logctx, AV_LOG_ERROR, "Hardware is lacking required capabilities\n");
137  return AVERROR(EINVAL);
138  }
139 
140  if (params->ulWidth > caps.nMaxWidth || params->ulWidth < caps.nMinWidth) {
141  av_log(logctx, AV_LOG_ERROR, "Video width %d not within range from %d to %d\n",
142  (int)params->ulWidth, caps.nMinWidth, caps.nMaxWidth);
143  return AVERROR(EINVAL);
144  }
145 
146  if (params->ulHeight > caps.nMaxHeight || params->ulHeight < caps.nMinHeight) {
147  av_log(logctx, AV_LOG_ERROR, "Video height %d not within range from %d to %d\n",
148  (int)params->ulHeight, caps.nMinHeight, caps.nMaxHeight);
149  return AVERROR(EINVAL);
150  }
151 
152  if ((params->ulWidth * params->ulHeight) / 256 > caps.nMaxMBCount) {
153  av_log(logctx, AV_LOG_ERROR, "Video macroblock count %d exceeds maximum of %d\n",
154  (int)(params->ulWidth * params->ulHeight) / 256, caps.nMaxMBCount);
155  return AVERROR(EINVAL);
156  }
157 
158  return 0;
159 }
160 
161 static void nvdec_decoder_free(void *opaque, uint8_t *data)
162 {
164 
165  if (decoder->decoder) {
166  void *logctx = decoder->hw_device_ref->data;
167  CUcontext dummy;
168  CHECK_CU(decoder->cudl->cuCtxPushCurrent(decoder->cuda_ctx));
169  CHECK_CU(decoder->cvdl->cuvidDestroyDecoder(decoder->decoder));
170  CHECK_CU(decoder->cudl->cuCtxPopCurrent(&dummy));
171  }
172 
173  av_buffer_unref(&decoder->real_hw_frames_ref);
174  av_buffer_unref(&decoder->hw_device_ref);
175 
176  cuvid_free_functions(&decoder->cvdl);
177 
178  av_freep(&decoder);
179 }
180 
181 static int nvdec_decoder_create(AVBufferRef **out, AVBufferRef *hw_device_ref,
182  CUVIDDECODECREATEINFO *params, void *logctx)
183 {
185  AVCUDADeviceContext *device_hwctx = hw_device_ctx->hwctx;
186 
187  AVBufferRef *decoder_ref;
189 
190  CUcontext dummy;
191  int ret;
192 
193  decoder = av_mallocz(sizeof(*decoder));
194  if (!decoder)
195  return AVERROR(ENOMEM);
196 
197  decoder_ref = av_buffer_create((uint8_t*)decoder, sizeof(*decoder),
199  if (!decoder_ref) {
200  av_freep(&decoder);
201  return AVERROR(ENOMEM);
202  }
203 
204  decoder->hw_device_ref = av_buffer_ref(hw_device_ref);
205  if (!decoder->hw_device_ref) {
206  ret = AVERROR(ENOMEM);
207  goto fail;
208  }
209  decoder->cuda_ctx = device_hwctx->cuda_ctx;
210  decoder->cudl = device_hwctx->internal->cuda_dl;
211  decoder->stream = device_hwctx->stream;
212 
213  ret = cuvid_load_functions(&decoder->cvdl, logctx);
214  if (ret < 0) {
215  av_log(logctx, AV_LOG_ERROR, "Failed loading nvcuvid.\n");
216  goto fail;
217  }
218 
219  ret = CHECK_CU(decoder->cudl->cuCtxPushCurrent(decoder->cuda_ctx));
220  if (ret < 0)
221  goto fail;
222 
223  ret = nvdec_test_capabilities(decoder, params, logctx);
224  if (ret < 0) {
225  CHECK_CU(decoder->cudl->cuCtxPopCurrent(&dummy));
226  goto fail;
227  }
228 
229  ret = CHECK_CU(decoder->cvdl->cuvidCreateDecoder(&decoder->decoder, params));
230 
231  CHECK_CU(decoder->cudl->cuCtxPopCurrent(&dummy));
232 
233  if (ret < 0) {
234  goto fail;
235  }
236 
237  *out = decoder_ref;
238 
239  return 0;
240 fail:
241  av_buffer_unref(&decoder_ref);
242  return ret;
243 }
244 
245 static AVBufferRef *nvdec_decoder_frame_alloc(void *opaque, size_t size)
246 {
247  NVDECFramePool *pool = opaque;
248  AVBufferRef *ret;
249 
250  if (pool->nb_allocated >= pool->dpb_size)
251  return NULL;
252 
253  ret = av_buffer_alloc(sizeof(unsigned int));
254  if (!ret)
255  return NULL;
256 
257  *(unsigned int*)ret->data = pool->nb_allocated++;
258 
259  return ret;
260 }
261 
263 {
265 
266  av_freep(&ctx->bitstream);
267  av_freep(&ctx->bitstream_internal);
268  ctx->bitstream_len = 0;
269  ctx->bitstream_allocated = 0;
270 
271  av_freep(&ctx->slice_offsets);
272  ctx->nb_slices = 0;
273  ctx->slice_offsets_allocated = 0;
274 
275  av_buffer_unref(&ctx->decoder_ref);
276  av_buffer_pool_uninit(&ctx->decoder_pool);
277 
278  return 0;
279 }
280 
282 {
283  av_buffer_pool_uninit(&ctx->pool);
284 }
285 
287 {
288  return av_buffer_create(NULL, 0, NULL, NULL, 0);
289 }
290 
291 static int nvdec_init_hwframes(AVCodecContext *avctx, AVBufferRef **out_frames_ref, int dummy)
292 {
293  AVHWFramesContext *frames_ctx;
294  int ret;
295 
297  avctx->hw_device_ctx,
298  avctx->hwaccel->pix_fmt,
299  out_frames_ref);
300  if (ret < 0)
301  return ret;
302 
303  frames_ctx = (AVHWFramesContext*)(*out_frames_ref)->data;
304 
305  if (dummy) {
306  // Copied from ff_decode_get_hw_frames_ctx for compatibility
307  frames_ctx->initial_pool_size += 3;
308 
309  frames_ctx->free = nvdec_free_dummy;
310  frames_ctx->pool = av_buffer_pool_init(0, nvdec_alloc_dummy);
311 
312  if (!frames_ctx->pool) {
313  av_buffer_unref(out_frames_ref);
314  return AVERROR(ENOMEM);
315  }
316  } else {
317  // This is normally not used to actually allocate frames from
318  frames_ctx->initial_pool_size = 0;
319  }
320 
321  ret = av_hwframe_ctx_init(*out_frames_ref);
322  if (ret < 0) {
323  av_buffer_unref(out_frames_ref);
324  return ret;
325  }
326 
327  return 0;
328 }
329 
331 {
333 
335  AVBufferRef *real_hw_frames_ref;
336  NVDECFramePool *pool;
337  AVHWFramesContext *frames_ctx;
338  const AVPixFmtDescriptor *sw_desc;
339 
340  CUVIDDECODECREATEINFO params = { 0 };
341 
342  cudaVideoSurfaceFormat output_format;
343  int cuvid_codec_type, cuvid_chroma_format, chroma_444;
344  int ret = 0;
345 
346  sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
347  if (!sw_desc)
348  return AVERROR_BUG;
349 
350  cuvid_codec_type = map_avcodec_id(avctx->codec_id);
351  if (cuvid_codec_type < 0) {
352  av_log(avctx, AV_LOG_ERROR, "Unsupported codec ID\n");
353  return AVERROR_BUG;
354  }
355 
356  cuvid_chroma_format = map_chroma_format(avctx->sw_pix_fmt);
357  if (cuvid_chroma_format < 0) {
358  av_log(avctx, AV_LOG_ERROR, "Unsupported chroma format\n");
359  return AVERROR(ENOSYS);
360  }
361  chroma_444 = ctx->supports_444 && cuvid_chroma_format == cudaVideoChromaFormat_444;
362 
363  if (!avctx->hw_frames_ctx) {
364  ret = nvdec_init_hwframes(avctx, &avctx->hw_frames_ctx, 1);
365  if (ret < 0)
366  return ret;
367 
368  ret = nvdec_init_hwframes(avctx, &real_hw_frames_ref, 0);
369  if (ret < 0)
370  return ret;
371  } else {
372  real_hw_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
373  if (!real_hw_frames_ref)
374  return AVERROR(ENOMEM);
375  }
376 
377  switch (sw_desc->comp[0].depth) {
378  case 8:
379  output_format = chroma_444 ? cudaVideoSurfaceFormat_YUV444 :
380  cudaVideoSurfaceFormat_NV12;
381  break;
382  case 10:
383  case 12:
384  output_format = chroma_444 ? cudaVideoSurfaceFormat_YUV444_16Bit :
385  cudaVideoSurfaceFormat_P016;
386  break;
387  default:
388  av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth\n");
389  av_buffer_unref(&real_hw_frames_ref);
390  return AVERROR(ENOSYS);
391  }
392 
393  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
394 
395  params.ulWidth = avctx->coded_width;
396  params.ulHeight = avctx->coded_height;
397  params.ulTargetWidth = avctx->coded_width;
398  params.ulTargetHeight = avctx->coded_height;
399  params.bitDepthMinus8 = sw_desc->comp[0].depth - 8;
400  params.OutputFormat = output_format;
401  params.CodecType = cuvid_codec_type;
402  params.ChromaFormat = cuvid_chroma_format;
403  params.ulNumDecodeSurfaces = frames_ctx->initial_pool_size;
404  params.ulNumOutputSurfaces = frames_ctx->initial_pool_size;
405 
406  ret = nvdec_decoder_create(&ctx->decoder_ref, frames_ctx->device_ref, &params, avctx);
407  if (ret < 0) {
408  if (params.ulNumDecodeSurfaces > 32) {
409  av_log(avctx, AV_LOG_WARNING, "Using more than 32 (%d) decode surfaces might cause nvdec to fail.\n",
410  (int)params.ulNumDecodeSurfaces);
411  av_log(avctx, AV_LOG_WARNING, "Try lowering the amount of threads. Using %d right now.\n",
412  avctx->thread_count);
413  }
414  av_buffer_unref(&real_hw_frames_ref);
415  return ret;
416  }
417 
418  decoder = (NVDECDecoder*)ctx->decoder_ref->data;
419  decoder->real_hw_frames_ref = real_hw_frames_ref;
420  real_hw_frames_ref = NULL;
421 
422  pool = av_mallocz(sizeof(*pool));
423  if (!pool) {
424  ret = AVERROR(ENOMEM);
425  goto fail;
426  }
427  pool->dpb_size = frames_ctx->initial_pool_size;
428 
429  ctx->decoder_pool = av_buffer_pool_init2(sizeof(int), pool,
431  if (!ctx->decoder_pool) {
432  ret = AVERROR(ENOMEM);
433  goto fail;
434  }
435 
436  return 0;
437 fail:
438  ff_nvdec_decode_uninit(avctx);
439  return ret;
440 }
441 
442 static void nvdec_fdd_priv_free(void *priv)
443 {
444  NVDECFrame *cf = priv;
445 
446  if (!cf)
447  return;
448 
449  av_buffer_unref(&cf->idx_ref);
452 
453  av_freep(&priv);
454 }
455 
456 static void nvdec_unmap_mapped_frame(void *opaque, uint8_t *data)
457 {
458  NVDECFrame *unmap_data = (NVDECFrame*)data;
460  void *logctx = decoder->hw_device_ref->data;
461  CUdeviceptr devptr = (CUdeviceptr)opaque;
462  int ret;
463  CUcontext dummy;
464 
465  ret = CHECK_CU(decoder->cudl->cuCtxPushCurrent(decoder->cuda_ctx));
466  if (ret < 0)
467  goto finish;
468 
469  CHECK_CU(decoder->cvdl->cuvidUnmapVideoFrame(decoder->decoder, devptr));
470 
471  CHECK_CU(decoder->cudl->cuCtxPopCurrent(&dummy));
472 
473 finish:
474  av_buffer_unref(&unmap_data->idx_ref);
475  av_buffer_unref(&unmap_data->decoder_ref);
476  av_buffer_unref(&unmap_data->ref_idx_ref);
477  av_free(unmap_data);
478 }
479 
480 static int nvdec_retrieve_data(void *logctx, AVFrame *frame)
481 {
482  FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
483  NVDECFrame *cf = (NVDECFrame*)fdd->hwaccel_priv;
484  NVDECDecoder *decoder = (NVDECDecoder*)cf->decoder_ref->data;
485 
486  AVHWFramesContext *hwctx = (AVHWFramesContext *)frame->hw_frames_ctx->data;
487 
488  CUVIDPROCPARAMS vpp = { 0 };
489  NVDECFrame *unmap_data = NULL;
490 
491  CUcontext dummy;
492  CUdeviceptr devptr;
493 
494  unsigned int pitch, i;
495  unsigned int offset = 0;
496  int shift_h = 0, shift_v = 0;
497  int ret = 0;
498 
499  vpp.progressive_frame = 1;
500  vpp.output_stream = decoder->stream;
501 
502  ret = CHECK_CU(decoder->cudl->cuCtxPushCurrent(decoder->cuda_ctx));
503  if (ret < 0)
504  return ret;
505 
506  ret = CHECK_CU(decoder->cvdl->cuvidMapVideoFrame(decoder->decoder,
507  cf->idx, &devptr,
508  &pitch, &vpp));
509  if (ret < 0)
510  goto finish;
511 
512  unmap_data = av_mallocz(sizeof(*unmap_data));
513  if (!unmap_data) {
514  ret = AVERROR(ENOMEM);
515  goto copy_fail;
516  }
517 
518  frame->buf[1] = av_buffer_create((uint8_t *)unmap_data, sizeof(*unmap_data),
519  nvdec_unmap_mapped_frame, (void*)devptr,
521  if (!frame->buf[1]) {
522  ret = AVERROR(ENOMEM);
523  goto copy_fail;
524  }
525 
526  av_buffer_unref(&frame->hw_frames_ctx);
527  frame->hw_frames_ctx = av_buffer_ref(decoder->real_hw_frames_ref);
528  if (!frame->hw_frames_ctx) {
529  ret = AVERROR(ENOMEM);
530  goto copy_fail;
531  }
532 
533  unmap_data->idx = cf->idx;
534  unmap_data->idx_ref = av_buffer_ref(cf->idx_ref);
535  unmap_data->decoder_ref = av_buffer_ref(cf->decoder_ref);
536 
537  av_pix_fmt_get_chroma_sub_sample(hwctx->sw_format, &shift_h, &shift_v);
538  for (i = 0; frame->linesize[i]; i++) {
539  frame->data[i] = (uint8_t*)(devptr + offset);
540  frame->linesize[i] = pitch;
541  offset += pitch * (frame->height >> (i ? shift_v : 0));
542  }
543 
544  goto finish;
545 
546 copy_fail:
547  if (!frame->buf[1]) {
548  CHECK_CU(decoder->cvdl->cuvidUnmapVideoFrame(decoder->decoder, devptr));
549  av_freep(&unmap_data);
550  } else {
551  av_buffer_unref(&frame->buf[1]);
552  }
553 
554 finish:
555  CHECK_CU(decoder->cudl->cuCtxPopCurrent(&dummy));
556  return ret;
557 }
558 
560 {
562  FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
563  NVDECFrame *cf = NULL;
564  int ret;
565 
566  ctx->bitstream_len = 0;
567  ctx->nb_slices = 0;
568 
569  if (fdd->hwaccel_priv)
570  return 0;
571 
572  cf = av_mallocz(sizeof(*cf));
573  if (!cf)
574  return AVERROR(ENOMEM);
575 
576  cf->decoder_ref = av_buffer_ref(ctx->decoder_ref);
577  if (!cf->decoder_ref) {
578  ret = AVERROR(ENOMEM);
579  goto fail;
580  }
581 
582  cf->idx_ref = av_buffer_pool_get(ctx->decoder_pool);
583  if (!cf->idx_ref) {
584  av_log(avctx, AV_LOG_ERROR, "No decoder surfaces left\n");
585  ret = AVERROR(ENOMEM);
586  goto fail;
587  }
588  cf->ref_idx = cf->idx = *(unsigned int*)cf->idx_ref->data;
589 
590  fdd->hwaccel_priv = cf;
593 
594  return 0;
595 fail:
597  return ret;
598 
599 }
600 
602 {
604  FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
605  NVDECFrame *cf;
606  int ret;
607 
608  ret = ff_nvdec_start_frame(avctx, frame);
609  if (ret < 0)
610  return ret;
611 
612  cf = fdd->hwaccel_priv;
613 
614  if (has_sep_ref) {
615  if (!cf->ref_idx_ref) {
616  cf->ref_idx_ref = av_buffer_pool_get(ctx->decoder_pool);
617  if (!cf->ref_idx_ref) {
618  av_log(avctx, AV_LOG_ERROR, "No decoder surfaces left\n");
619  ret = AVERROR(ENOMEM);
620  goto fail;
621  }
622  }
623  cf->ref_idx = *(unsigned int*)cf->ref_idx_ref->data;
624  } else {
625  av_buffer_unref(&cf->ref_idx_ref);
626  cf->ref_idx = cf->idx;
627  }
628 
629  return 0;
630 fail:
632  return ret;
633 }
634 
636 {
638  NVDECDecoder *decoder = (NVDECDecoder*)ctx->decoder_ref->data;
639  void *logctx = avctx;
640  CUVIDPICPARAMS *pp = &ctx->pic_params;
641 
642  CUcontext dummy;
643 
644  int ret = 0;
645 
646  pp->nBitstreamDataLen = ctx->bitstream_len;
647  pp->pBitstreamData = ctx->bitstream;
648  pp->nNumSlices = ctx->nb_slices;
649  pp->pSliceDataOffsets = ctx->slice_offsets;
650 
651  ret = CHECK_CU(decoder->cudl->cuCtxPushCurrent(decoder->cuda_ctx));
652  if (ret < 0)
653  return ret;
654 
655  ret = CHECK_CU(decoder->cvdl->cuvidDecodePicture(decoder->decoder, &ctx->pic_params));
656  if (ret < 0)
657  goto finish;
658 
659 finish:
660  CHECK_CU(decoder->cudl->cuCtxPopCurrent(&dummy));
661 
662  return ret;
663 }
664 
666 {
668  int ret = ff_nvdec_end_frame(avctx);
669  ctx->bitstream = NULL;
670  return ret;
671 }
672 
674  uint32_t size)
675 {
677  void *tmp;
678 
679  tmp = av_fast_realloc(ctx->slice_offsets, &ctx->slice_offsets_allocated,
680  (ctx->nb_slices + 1) * sizeof(*ctx->slice_offsets));
681  if (!tmp)
682  return AVERROR(ENOMEM);
683  ctx->slice_offsets = tmp;
684 
685  if (!ctx->bitstream)
686  ctx->bitstream = (uint8_t*)buffer;
687 
688  ctx->slice_offsets[ctx->nb_slices] = buffer - ctx->bitstream;
689  ctx->bitstream_len += size;
690  ctx->nb_slices++;
691 
692  return 0;
693 }
694 
696  AVBufferRef *hw_frames_ctx,
697  int dpb_size,
698  int supports_444)
699 {
700  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
701  const AVPixFmtDescriptor *sw_desc;
702  int cuvid_codec_type, cuvid_chroma_format, chroma_444;
703 
704  sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
705  if (!sw_desc)
706  return AVERROR_BUG;
707 
708  cuvid_codec_type = map_avcodec_id(avctx->codec_id);
709  if (cuvid_codec_type < 0) {
710  av_log(avctx, AV_LOG_ERROR, "Unsupported codec ID\n");
711  return AVERROR_BUG;
712  }
713 
714  cuvid_chroma_format = map_chroma_format(avctx->sw_pix_fmt);
715  if (cuvid_chroma_format < 0) {
716  av_log(avctx, AV_LOG_VERBOSE, "Unsupported chroma format\n");
717  return AVERROR(EINVAL);
718  }
719  chroma_444 = supports_444 && cuvid_chroma_format == cudaVideoChromaFormat_444;
720 
721  frames_ctx->format = AV_PIX_FMT_CUDA;
722  frames_ctx->width = (avctx->coded_width + 1) & ~1;
723  frames_ctx->height = (avctx->coded_height + 1) & ~1;
724  /*
725  * We add two extra frames to the pool to account for deinterlacing filters
726  * holding onto their frames.
727  */
728  frames_ctx->initial_pool_size = dpb_size + 2;
729 
730  switch (sw_desc->comp[0].depth) {
731  case 8:
732  frames_ctx->sw_format = chroma_444 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_NV12;
733  break;
734  case 10:
735  frames_ctx->sw_format = chroma_444 ? AV_PIX_FMT_YUV444P16 : AV_PIX_FMT_P010;
736  break;
737  case 12:
738  frames_ctx->sw_format = chroma_444 ? AV_PIX_FMT_YUV444P16 : AV_PIX_FMT_P016;
739  break;
740  default:
741  return AVERROR(EINVAL);
742  }
743 
744  return 0;
745 }
746 
748 {
749  FrameDecodeData *fdd;
750  NVDECFrame *cf;
751 
752  if (!frame || !frame->private_ref)
753  return -1;
754 
755  fdd = (FrameDecodeData*)frame->private_ref->data;
756  cf = (NVDECFrame*)fdd->hwaccel_priv;
757  if (!cf)
758  return -1;
759 
760  return cf->ref_idx;
761 }
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
nvdec_decoder_create
static int nvdec_decoder_create(AVBufferRef **out, AVBufferRef *hw_device_ref, CUVIDDECODECREATEINFO *params, void *logctx)
Definition: nvdec.c:181
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
map_avcodec_id
static int map_avcodec_id(enum AVCodecID id)
Definition: nvdec.c:62
hwcontext_cuda_internal.h
out
FILE * out
Definition: movenc.c:54
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
map_chroma_format
static int map_chroma_format(enum AVPixelFormat pix_fmt)
Definition: nvdec.c:82
NVDECFramePool
Definition: nvdec.c:55
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
NVDECDecoder::stream
CUstream stream
Definition: nvdec.c:49
ff_nvdec_get_ref_idx
int ff_nvdec_get_ref_idx(AVFrame *frame)
Definition: nvdec.c:747
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
pixdesc.h
internal.h
AVHWFramesContext::free
void(* free)(struct AVHWFramesContext *ctx)
This field may be set by the caller before calling av_hwframe_ctx_init().
Definition: hwcontext.h:170
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
data
const char data[16]
Definition: mxf.c:143
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
FrameDecodeData::hwaccel_priv_free
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
NVDECDecoder::decoder
CUvideodecoder decoder
Definition: nvdec.c:44
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
NVDECDecoder::cvdl
CuvidFunctions * cvdl
Definition: nvdec.c:52
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2700
decoder
static const chunk_decoder decoder[8]
Definition: dfa.c:330
finish
static void finish(void)
Definition: movenc.c:342
ff_nvdec_start_frame
int ff_nvdec_start_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: nvdec.c:559
NVDECFrame::ref_idx
unsigned int ref_idx
Definition: nvdec.h:46
fail
#define fail()
Definition: checkasm.h:127
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1440
av_buffer_pool_init2
AVBufferPool * av_buffer_pool_init2(size_t size, void *opaque, AVBufferRef *(*alloc)(void *opaque, size_t size), void(*pool_free)(void *opaque))
Allocate and initialize a buffer pool with a more complex allocator.
Definition: buffer.c:259
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2688
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
cudaVideoSurfaceFormat_YUV444
#define cudaVideoSurfaceFormat_YUV444
Definition: nvdec.c:39
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:387
AVHWFramesContext::pool
AVBufferPool * pool
A pool from which the frames are allocated by av_hwframe_get_buffer().
Definition: hwcontext.h:190
ff_nvdec_start_frame_sep_ref
int ff_nvdec_start_frame_sep_ref(AVCodecContext *avctx, AVFrame *frame, int has_sep_ref)
Definition: nvdec.c:601
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:504
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:417
AV_BUFFER_FLAG_READONLY
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:114
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
nvdec_decoder_free
static void nvdec_decoder_free(void *opaque, uint8_t *data)
Definition: nvdec.c:161
NVDECFrame
Definition: nvdec.h:44
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
NVDECDecoder::cudl
CudaFunctions * cudl
Definition: nvdec.c:51
dpb_size
int dpb_size
Definition: h264_levels.c:107
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_ID_WMV3
@ AV_CODEC_ID_WMV3
Definition: codec_id.h:121
ff_nvdec_simple_end_frame
int ff_nvdec_simple_end_frame(AVCodecContext *avctx)
Definition: nvdec.c:665
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:279
ff_nvdec_decode_init
int ff_nvdec_decode_init(AVCodecContext *avctx)
Definition: nvdec.c:330
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:141
NVDECDecoder::hw_device_ref
AVBufferRef * hw_device_ref
Definition: nvdec.c:46
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:418
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:51
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
error.h
AVCodecID
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:47
cudaVideoSurfaceFormat_YUV444_16Bit
#define cudaVideoSurfaceFormat_YUV444_16Bit
Definition: nvdec.c:40
nvdec_alloc_dummy
static AVBufferRef * nvdec_alloc_dummy(size_t size)
Definition: nvdec.c:286
CHECK_CU
#define CHECK_CU(x)
Definition: nvdec.c:60
NVDECDecoder::real_hw_frames_ref
AVBufferRef * real_hw_frames_ref
Definition: nvdec.c:47
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:185
size
int size
Definition: twinvq_data.h:10344
nvdec_decoder_frame_alloc
static AVBufferRef * nvdec_decoder_frame_alloc(void *opaque, size_t size)
Definition: nvdec.c:245
nvdec_test_capabilities
static int nvdec_test_capabilities(NVDECDecoder *decoder, CUVIDDECODECREATEINFO *params, void *logctx)
Definition: nvdec.c:101
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
nvdec.h
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
nvdec_free_dummy
static void nvdec_free_dummy(struct AVHWFramesContext *ctx)
Definition: nvdec.c:281
ff_nvdec_decode_uninit
int ff_nvdec_decode_uninit(AVCodecContext *avctx)
Definition: nvdec.c:262
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
hw_device_ctx
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:45
NVDECDecoder::cuda_ctx
CUcontext cuda_ctx
Definition: nvdec.c:48
nvdec_fdd_priv_free
static void nvdec_fdd_priv_free(void *priv)
Definition: nvdec.c:442
ff_nvdec_end_frame
int ff_nvdec_end_frame(AVCodecContext *avctx)
Definition: nvdec.c:635
common.h
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1908
AV_CODEC_ID_VC1
@ AV_CODEC_ID_VC1
Definition: codec_id.h:120
NVDECDecoder
Definition: nvdec.c:43
AV_PIX_FMT_P016
#define AV_PIX_FMT_P016
Definition: pixfmt.h:454
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1858
avcodec.h
nvdec_retrieve_data
static int nvdec_retrieve_data(void *logctx, AVFrame *frame)
Definition: nvdec.c:480
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
avcodec_get_hw_frames_parameters
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding.
Definition: decode.c:991
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
cuda_check.h
NVDECFramePool::dpb_size
unsigned int dpb_size
Definition: nvdec.c:56
nvdec_init_hwframes
static int nvdec_init_hwframes(AVCodecContext *avctx, AVBufferRef **out_frames_ref, int dummy)
Definition: nvdec.c:291
NVDECFramePool::nb_allocated
unsigned int nb_allocated
Definition: nvdec.c:57
AVCodecContext
main external API structure.
Definition: avcodec.h:383
ff_nvdec_simple_decode_slice
int ff_nvdec_simple_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: nvdec.c:673
nvdec_unmap_mapped_frame
static void nvdec_unmap_mapped_frame(void *opaque, uint8_t *data)
Definition: nvdec.c:456
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
dummy
int dummy
Definition: motion.c:65
ff_nvdec_frame_params
int ff_nvdec_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx, int dpb_size, int supports_444)
Definition: nvdec.c:695
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
NVDECFrame::idx_ref
AVBufferRef * idx_ref
Definition: nvdec.h:47
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:453
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:571
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
FrameDecodeData::hwaccel_priv
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
AV_CODEC_ID_VP8
@ AV_CODEC_ID_VP8
Definition: codec_id.h:190
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1717
NVDECContext
Definition: nvdec.h:52
NVDECFrame::ref_idx_ref
AVBufferRef * ref_idx_ref
Definition: nvdec.h:48
NVDECFrame::decoder_ref
AVBufferRef * decoder_ref
Definition: nvdec.h:49
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
AVHWAccel::pix_fmt
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:2066