FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 #include <sys/types.h>
26 
27 #include <mfx/mfxvideo.h>
28 
29 #include "libavutil/common.h"
30 #include "libavutil/hwcontext.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/log.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/time.h"
38 #include "libavutil/imgutils.h"
39 
40 #include "avcodec.h"
41 #include "internal.h"
42 #include "decode.h"
43 #include "qsv.h"
44 #include "qsv_internal.h"
45 #include "qsvdec.h"
46 
48  &(const AVCodecHWConfigInternal) {
49  .public = {
53  .device_type = AV_HWDEVICE_TYPE_QSV,
54  },
55  .hwaccel = NULL,
56  },
57  NULL
58 };
59 
61 {
62  int ret = 0;
63 
64  ff_decode_frame_props(avctx, frame);
65 
66  frame->width = avctx->width;
67  frame->height = avctx->height;
68 
69  switch (avctx->pix_fmt) {
70  case AV_PIX_FMT_NV12:
71  frame->linesize[0] = FFALIGN(avctx->width, 128);
72  break;
73  case AV_PIX_FMT_P010:
74  frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
75  break;
76  default:
77  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
78  return AVERROR(EINVAL);
79  }
80 
81  frame->linesize[1] = frame->linesize[0];
82  frame->buf[0] = av_buffer_pool_get(pool);
83  if (!frame->buf[0])
84  return AVERROR(ENOMEM);
85 
86  frame->data[0] = frame->buf[0]->data;
87  frame->data[1] = frame->data[0] +
88  frame->linesize[0] * FFALIGN(avctx->height, 64);
89 
90  ret = ff_attach_decode_data(frame);
91  if (ret < 0)
92  return ret;
93 
94  return 0;
95 }
96 
97 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
98  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
99 {
100  int ret;
101 
102  if (q->gpu_copy == MFX_GPUCOPY_ON &&
103  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
104  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
105  "only works in system memory mode.\n");
106  q->gpu_copy = MFX_GPUCOPY_OFF;
107  }
108  if (session) {
109  q->session = session;
110  } else if (hw_frames_ref) {
111  if (q->internal_qs.session) {
112  MFXClose(q->internal_qs.session);
113  q->internal_qs.session = NULL;
114  }
116 
117  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
118  if (!q->frames_ctx.hw_frames_ctx)
119  return AVERROR(ENOMEM);
120 
122  &q->frames_ctx, q->load_plugins,
123  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
124  q->gpu_copy);
125  if (ret < 0) {
127  return ret;
128  }
129 
130  q->session = q->internal_qs.session;
131  } else if (hw_device_ref) {
132  if (q->internal_qs.session) {
133  MFXClose(q->internal_qs.session);
134  q->internal_qs.session = NULL;
135  }
136 
138  hw_device_ref, q->load_plugins, q->gpu_copy);
139  if (ret < 0)
140  return ret;
141 
142  q->session = q->internal_qs.session;
143  } else {
144  if (!q->internal_qs.session) {
145  ret = ff_qsv_init_internal_session(avctx, &q->internal_qs,
146  q->load_plugins, q->gpu_copy);
147  if (ret < 0)
148  return ret;
149  }
150 
151  q->session = q->internal_qs.session;
152  }
153 
154  /* make sure the decoder is uninitialized */
155  MFXVideoDECODE_Close(q->session);
156 
157  return 0;
158 }
159 
160 static inline unsigned int qsv_fifo_item_size(void)
161 {
162  return sizeof(mfxSyncPoint*) + sizeof(QSVFrame*);
163 }
164 
165 static inline unsigned int qsv_fifo_size(const AVFifoBuffer* fifo)
166 {
167  return av_fifo_size(fifo) / qsv_fifo_item_size();
168 }
169 
170 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
171 {
172  mfxSession session = NULL;
173  int iopattern = 0;
174  int ret;
175  enum AVPixelFormat pix_fmts[3] = {
176  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
177  pix_fmt, /* system memory format obtained from bitstream parser */
178  AV_PIX_FMT_NONE };
179 
180  ret = ff_get_format(avctx, pix_fmts);
181  if (ret < 0) {
182  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
183  return ret;
184  }
185 
186  if (!q->async_fifo) {
188  if (!q->async_fifo)
189  return AVERROR(ENOMEM);
190  }
191 
192  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
193  AVQSVContext *user_ctx = avctx->hwaccel_context;
194  session = user_ctx->session;
195  iopattern = user_ctx->iopattern;
196  q->ext_buffers = user_ctx->ext_buffers;
197  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
198  }
199 
200  if (avctx->hw_frames_ctx) {
201  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
202  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
203 
204  if (!iopattern) {
205  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
206  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
207  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
208  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
209  }
210  }
211 
212  if (!iopattern)
213  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
214  q->iopattern = iopattern;
215 
216  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
217 
218  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
219  if (ret < 0) {
220  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
221  return ret;
222  }
223 
224  param->IOPattern = q->iopattern;
225  param->AsyncDepth = q->async_depth;
226  param->ExtParam = q->ext_buffers;
227  param->NumExtParam = q->nb_ext_buffers;
228 
229  return 0;
230  }
231 
232 static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
233 {
234  int ret;
235 
236  avctx->width = param->mfx.FrameInfo.CropW;
237  avctx->height = param->mfx.FrameInfo.CropH;
238  avctx->coded_width = param->mfx.FrameInfo.Width;
239  avctx->coded_height = param->mfx.FrameInfo.Height;
240  avctx->level = param->mfx.CodecLevel;
241  avctx->profile = param->mfx.CodecProfile;
242  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
243  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
244 
245  ret = MFXVideoDECODE_Init(q->session, param);
246  if (ret < 0)
247  return ff_qsv_print_error(avctx, ret,
248  "Error initializing the MFX video decoder");
249 
250  q->frame_info = param->mfx.FrameInfo;
251 
252  if (!avctx->hw_frames_ctx)
254  FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz);
255  return 0;
256 }
257 
258 static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
259 {
260  int ret;
261 
262  mfxBitstream bs = { 0 };
263 
264  if (avpkt->size) {
265  bs.Data = avpkt->data;
266  bs.DataLength = avpkt->size;
267  bs.MaxLength = bs.DataLength;
268  bs.TimeStamp = avpkt->pts;
269  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
270  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
271  } else
272  return AVERROR_INVALIDDATA;
273 
274 
275  if(!q->session) {
276  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
277  if (ret < 0)
278  return ret;
279  }
280 
281  ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
282  if (ret < 0)
283  return ret;
284 
285  param->mfx.CodecId = ret;
286  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
287  if (MFX_ERR_MORE_DATA == ret) {
288  return AVERROR(EAGAIN);
289  }
290  if (ret < 0)
291  return ff_qsv_print_error(avctx, ret,
292  "Error decoding stream header");
293 
294  return 0;
295 }
296 
298 {
299  int ret;
300 
301  if (q->pool)
302  ret = ff_qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
303  else
304  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
305 
306  if (ret < 0)
307  return ret;
308 
309  if (frame->frame->format == AV_PIX_FMT_QSV) {
310  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
311  } else {
312  frame->surface.Info = q->frame_info;
313 
314  frame->surface.Data.PitchLow = frame->frame->linesize[0];
315  frame->surface.Data.Y = frame->frame->data[0];
316  frame->surface.Data.UV = frame->frame->data[1];
317  }
318 
319  if (q->frames_ctx.mids) {
320  ret = ff_qsv_find_surface_idx(&q->frames_ctx, frame);
321  if (ret < 0)
322  return ret;
323 
324  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
325  }
326  frame->surface.Data.ExtParam = &frame->ext_param;
327  frame->surface.Data.NumExtParam = 1;
328  frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
329  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
330  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
331 
332  frame->used = 1;
333 
334  return 0;
335 }
336 
338 {
339  QSVFrame *cur = q->work_frames;
340  while (cur) {
341  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
342  cur->used = 0;
343  av_frame_unref(cur->frame);
344  }
345  cur = cur->next;
346  }
347 }
348 
349 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
350 {
351  QSVFrame *frame, **last;
352  int ret;
353 
355 
356  frame = q->work_frames;
357  last = &q->work_frames;
358  while (frame) {
359  if (!frame->used) {
360  ret = alloc_frame(avctx, q, frame);
361  if (ret < 0)
362  return ret;
363  *surf = &frame->surface;
364  return 0;
365  }
366 
367  last = &frame->next;
368  frame = frame->next;
369  }
370 
371  frame = av_mallocz(sizeof(*frame));
372  if (!frame)
373  return AVERROR(ENOMEM);
374  frame->frame = av_frame_alloc();
375  if (!frame->frame) {
376  av_freep(&frame);
377  return AVERROR(ENOMEM);
378  }
379  *last = frame;
380 
381  ret = alloc_frame(avctx, q, frame);
382  if (ret < 0)
383  return ret;
384 
385  *surf = &frame->surface;
386 
387  return 0;
388 }
389 
390 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
391 {
392  QSVFrame *cur = q->work_frames;
393  while (cur) {
394  if (surf == &cur->surface)
395  return cur;
396  cur = cur->next;
397  }
398  return NULL;
399 }
400 
401 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
402  AVFrame *frame, int *got_frame,
403  AVPacket *avpkt)
404 {
405  QSVFrame *out_frame;
406  mfxFrameSurface1 *insurf;
407  mfxFrameSurface1 *outsurf;
408  mfxSyncPoint *sync;
409  mfxBitstream bs = { { { 0 } } };
410  int ret;
411 
412  if (avpkt->size) {
413  bs.Data = avpkt->data;
414  bs.DataLength = avpkt->size;
415  bs.MaxLength = bs.DataLength;
416  bs.TimeStamp = avpkt->pts;
417  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
418  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
419  }
420 
421  sync = av_mallocz(sizeof(*sync));
422  if (!sync) {
423  av_freep(&sync);
424  return AVERROR(ENOMEM);
425  }
426 
427  do {
428  ret = get_surface(avctx, q, &insurf);
429  if (ret < 0) {
430  av_freep(&sync);
431  return ret;
432  }
433 
434  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
435  insurf, &outsurf, sync);
436  if (ret == MFX_WRN_DEVICE_BUSY)
437  av_usleep(500);
438 
439  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
440 
441  if (ret != MFX_ERR_NONE &&
442  ret != MFX_ERR_MORE_DATA &&
443  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
444  ret != MFX_ERR_MORE_SURFACE) {
445  av_freep(&sync);
446  return ff_qsv_print_error(avctx, ret,
447  "Error during QSV decoding.");
448  }
449 
450  /* make sure we do not enter an infinite loop if the SDK
451  * did not consume any data and did not return anything */
452  if (!*sync && !bs.DataOffset) {
453  bs.DataOffset = avpkt->size;
454  ++q->zero_consume_run;
455  if (q->zero_consume_run > 1)
456  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
457  } else if (!*sync && bs.DataOffset) {
458  ++q->buffered_count;
459  } else {
460  q->zero_consume_run = 0;
461  }
462 
463  if (*sync) {
464  QSVFrame *out_frame = find_frame(q, outsurf);
465 
466  if (!out_frame) {
467  av_log(avctx, AV_LOG_ERROR,
468  "The returned surface does not correspond to any frame\n");
469  av_freep(&sync);
470  return AVERROR_BUG;
471  }
472 
473  out_frame->queued = 1;
474  av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
475  av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL);
476  } else {
477  av_freep(&sync);
478  }
479 
480  if ((qsv_fifo_size(q->async_fifo) >= q->async_depth) ||
481  (!avpkt->size && av_fifo_size(q->async_fifo))) {
482  AVFrame *src_frame;
483 
484  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
485  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
486  out_frame->queued = 0;
487 
488  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
489  do {
490  ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
491  } while (ret == MFX_WRN_IN_EXECUTION);
492  }
493 
494  av_freep(&sync);
495 
496  src_frame = out_frame->frame;
497 
498  ret = av_frame_ref(frame, src_frame);
499  if (ret < 0)
500  return ret;
501 
502  outsurf = &out_frame->surface;
503 
504 #if FF_API_PKT_PTS
506  frame->pkt_pts = outsurf->Data.TimeStamp;
508 #endif
509  frame->pts = outsurf->Data.TimeStamp;
510 
511  frame->repeat_pict =
512  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
513  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
514  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
515  frame->top_field_first =
516  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
517  frame->interlaced_frame =
518  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
519  frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType);
520  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
521  if (avctx->codec_id == AV_CODEC_ID_H264)
522  frame->key_frame = !!(out_frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
523 
524  /* update the surface properties */
525  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
526  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
527 
528  *got_frame = 1;
529  }
530 
531  return bs.DataOffset;
532 }
533 
535 {
536  QSVFrame *cur = q->work_frames;
537 
538  if (q->session)
539  MFXVideoDECODE_Close(q->session);
540 
541  while (q->async_fifo && av_fifo_size(q->async_fifo)) {
542  QSVFrame *out_frame;
543  mfxSyncPoint *sync;
544 
545  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
546  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
547 
548  av_freep(&sync);
549  }
550 
551  while (cur) {
552  q->work_frames = cur->next;
553  av_frame_free(&cur->frame);
554  av_freep(&cur);
555  cur = q->work_frames;
556  }
557 
559  q->async_fifo = NULL;
560 
562 
566 
567  return 0;
568 }
569 
571  AVFrame *frame, int *got_frame, AVPacket *pkt)
572 {
573  int ret;
574  mfxVideoParam param = { 0 };
576 
577  if (!pkt->size)
578  return qsv_decode(avctx, q, frame, got_frame, pkt);
579 
580  /* TODO: flush delayed frames on reinit */
581 
582  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
583  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
584  // the assumption may be not corret but will be updated after header decoded if not true.
585  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
586  pix_fmt = q->orig_pix_fmt;
587  if (!avctx->coded_width)
588  avctx->coded_width = 1280;
589  if (!avctx->coded_height)
590  avctx->coded_height = 720;
591 
592  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
593 
594  if (ret >= 0 && (q->orig_pix_fmt != ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC) ||
595  avctx->coded_width != param.mfx.FrameInfo.Width ||
596  avctx->coded_height != param.mfx.FrameInfo.Height)) {
597  AVPacket zero_pkt = {0};
598 
599  if (q->buffered_count) {
600  q->reinit_flag = 1;
601  /* decode zero-size pkt to flush the buffered pkt before reinit */
602  q->buffered_count--;
603  return qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
604  }
605  q->reinit_flag = 0;
606 
607  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
608 
609  avctx->coded_width = param.mfx.FrameInfo.Width;
610  avctx->coded_height = param.mfx.FrameInfo.Height;
611 
612  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
613  if (ret < 0)
614  goto reinit_fail;
615  q->initialized = 0;
616  }
617 
618  if (!q->initialized) {
619  ret = qsv_decode_init_context(avctx, q, &param);
620  if (ret < 0)
621  goto reinit_fail;
622  q->initialized = 1;
623  }
624 
625  return qsv_decode(avctx, q, frame, got_frame, pkt);
626 
627 reinit_fail:
628  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
629  return ret;
630 }
631 
633 {
635  q->initialized = 0;
636 }
637 
642 };
643 
644 typedef struct QSVDecContext {
645  AVClass *class;
647 
649 
651 
653 } QSVDecContext;
654 
656 {
657  AVPacket pkt;
658  while (av_fifo_size(s->packet_fifo) >= sizeof(pkt)) {
659  av_fifo_generic_read(s->packet_fifo, &pkt, sizeof(pkt), NULL);
660  av_packet_unref(&pkt);
661  }
662 
664 }
665 
667 {
668  QSVDecContext *s = avctx->priv_data;
669 
671 
673 
675 
677 
678  return 0;
679 }
680 
682 {
683  QSVDecContext *s = avctx->priv_data;
684  int ret;
685  const char *uid = NULL;
686 
687  if (avctx->codec_id == AV_CODEC_ID_VP8) {
688  uid = "f622394d8d87452f878c51f2fc9b4131";
689  } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
690  uid = "a922394d8d87452f878c51f2fc9b4131";
691  }
692  else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) {
693  static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6";
694  static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e";
695 
696  if (s->qsv.load_plugins[0]) {
697  av_log(avctx, AV_LOG_WARNING,
698  "load_plugins is not empty, but load_plugin is not set to 'none'."
699  "The load_plugin value will be ignored.\n");
700  } else {
702  uid = uid_hevcdec_sw;
703  else
704  uid = uid_hevcdec_hw;
705  }
706  }
707  if (uid) {
709  s->qsv.load_plugins = av_strdup(uid);
710  if (!s->qsv.load_plugins)
711  return AVERROR(ENOMEM);
712  }
713 
715  s->packet_fifo = av_fifo_alloc(sizeof(AVPacket));
716  if (!s->packet_fifo) {
717  ret = AVERROR(ENOMEM);
718  goto fail;
719  }
720 
721  return 0;
722 fail:
723  qsv_decode_close(avctx);
724  return ret;
725 }
726 
727 static int qsv_decode_frame(AVCodecContext *avctx, void *data,
728  int *got_frame, AVPacket *avpkt)
729 {
730  QSVDecContext *s = avctx->priv_data;
731  AVFrame *frame = data;
732  int ret;
733 
734  /* buffer the input packet */
735  if (avpkt->size) {
736  AVPacket input_ref;
737 
738  if (av_fifo_space(s->packet_fifo) < sizeof(input_ref)) {
739  ret = av_fifo_realloc2(s->packet_fifo,
740  av_fifo_size(s->packet_fifo) + sizeof(input_ref));
741  if (ret < 0)
742  return ret;
743  }
744 
745  ret = av_packet_ref(&input_ref, avpkt);
746  if (ret < 0)
747  return ret;
748  av_fifo_generic_write(s->packet_fifo, &input_ref, sizeof(input_ref), NULL);
749  }
750 
751  /* process buffered data */
752  while (!*got_frame) {
753  /* prepare the input data */
754  if (s->buffer_pkt.size <= 0) {
755  /* no more data */
756  if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket))
757  return avpkt->size ? avpkt->size : ff_qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt);
758  /* in progress of reinit, no read from fifo and keep the buffer_pkt */
759  if (!s->qsv.reinit_flag) {
762  }
763  }
764 
765  ret = ff_qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt);
766  if (ret < 0){
767  /* Drop buffer_pkt when failed to decode the packet. Otherwise,
768  the decoder will keep decoding the failure packet. */
770  return ret;
771  }
772  if (s->qsv.reinit_flag)
773  continue;
774 
775  s->buffer_pkt.size -= ret;
776  s->buffer_pkt.data += ret;
777  }
778 
779  return avpkt->size;
780 }
781 
782 static void qsv_decode_flush(AVCodecContext *avctx)
783 {
784  QSVDecContext *s = avctx->priv_data;
785 
787  ff_qsv_decode_flush(avctx, &s->qsv);
788 }
789 
790 #define OFFSET(x) offsetof(QSVDecContext, x)
791 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
792 
793 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
794 static const AVClass x##_qsv_class = { \
795  .class_name = #x "_qsv", \
796  .item_name = av_default_item_name, \
797  .option = opt, \
798  .version = LIBAVUTIL_VERSION_INT, \
799 }; \
800 AVCodec ff_##x##_qsv_decoder = { \
801  .name = #x "_qsv", \
802  .long_name = NULL_IF_CONFIG_SMALL(#X " video (Intel Quick Sync Video acceleration)"), \
803  .priv_data_size = sizeof(QSVDecContext), \
804  .type = AVMEDIA_TYPE_VIDEO, \
805  .id = AV_CODEC_ID_##X, \
806  .init = qsv_decode_init, \
807  .decode = qsv_decode_frame, \
808  .flush = qsv_decode_flush, \
809  .close = qsv_decode_close, \
810  .bsfs = bsf_name, \
811  .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
812  .priv_class = &x##_qsv_class, \
813  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
814  AV_PIX_FMT_P010, \
815  AV_PIX_FMT_QSV, \
816  AV_PIX_FMT_NONE }, \
817  .hw_configs = ff_qsv_hw_configs, \
818  .wrapper_name = "qsv", \
819 }; \
820 
821 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
822 
823 #if CONFIG_HEVC_QSV_DECODER
824 static const AVOption hevc_options[] = {
825  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
826 
827  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, "load_plugin" },
828  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, "load_plugin" },
829  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, "load_plugin" },
830  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, "load_plugin" },
831 
832  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
833  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
834 
835  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
836  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
837  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
838  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
839  { NULL },
840 };
841 DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options)
842 #endif
843 
844 static const AVOption options[] = {
845  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
846 
847  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
848  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
849  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
850  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
851  { NULL },
852 };
853 
854 #if CONFIG_H264_QSV_DECODER
855 DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb")
856 #endif
857 
858 #if CONFIG_MPEG2_QSV_DECODER
859 DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL)
860 #endif
861 
862 #if CONFIG_VC1_QSV_DECODER
863 DEFINE_QSV_DECODER(vc1, VC1, NULL)
864 #endif
865 
866 #if CONFIG_MJPEG_QSV_DECODER
867 DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL)
868 #endif
869 
870 #if CONFIG_VP8_QSV_DECODER
871 DEFINE_QSV_DECODER(vp8, VP8, NULL)
872 #endif
873 
874 #if CONFIG_VP9_QSV_DECODER
875 DEFINE_QSV_DECODER(vp9, VP9, NULL)
876 #endif
877 
878 #if CONFIG_AV1_QSV_DECODER
879 DEFINE_QSV_DECODER(av1, AV1, NULL)
880 #endif
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1304
int iopattern
Definition: qsvdec.h:68
#define VD
Definition: qsvdec.c:791
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
static unsigned int qsv_fifo_size(const AVFifoBuffer *fifo)
Definition: qsvdec.c:165
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
Memory handling functions.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:505
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:42
#define DEFINE_QSV_DECODER(x, X, bsf_name)
Definition: qsvdec.c:821
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:456
int size
Definition: packet.h:364
static const AVOption options[]
Definition: qsvdec.c:844
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
Definition: qsvdec.c:793
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
mfxExtBuffer ** ext_buffers
Definition: qsvdec.h:73
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:689
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1709
static AVPacket pkt
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:401
int profile
profile
Definition: avcodec.h:1851
QSVSession internal_qs
Definition: qsvdec.h:45
static int ff_qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:60
static av_cold int qsv_decode_init(AVCodecContext *avctx)
Definition: qsvdec.c:681
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:94
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:176
UID uid
Definition: mxfenc.c:2164
#define AV_PIX_FMT_P010
Definition: pixfmt.h:448
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:76
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1860
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:1687
AVBufferRef * mids_buf
Definition: qsv_internal.h:101
AVOptions.
int ff_qsv_decode_close(QSVContext *q)
Definition: qsvdec.c:534
static const AVOption hevc_options[]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:407
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:432
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
int queued
Definition: qsv_internal.h:79
uint8_t * data
Definition: packet.h:363
void av_fifo_free(AVFifoBuffer *f)
Free an AVFifoBuffer.
Definition: fifo.c:55
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:461
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
The buffer pool.
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:615
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:390
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters...
Definition: imgutils.c:466
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:766
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:241
static void qsv_clear_buffers(QSVDecContext *s)
Definition: qsvdec.c:655
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int reinit_flag
Definition: qsvdec.h:57
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:104
int iopattern
The IO pattern to use.
Definition: qsv.h:46
static av_cold int qsv_decode_close(AVCodecContext *avctx)
Definition: qsvdec.c:666
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:813
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:270
#define fail()
Definition: checkasm.h:133
int nb_ext_buffers
Definition: qsv.h:52
void ff_qsv_decode_flush(AVCodecContext *avctx, QSVContext *q)
Definition: qsvdec.c:632
int buffered_count
Definition: qsvdec.h:56
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:186
#define ASYNC_DEPTH_DEFAULT
Definition: qsv_internal.h:51
mfxExtBuffer * ext_param
Definition: qsv_internal.h:77
int zero_consume_run
Definition: qsvdec.h:55
AVBufferPool * pool
Definition: qsvdec.h:62
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:397
static unsigned int qsv_fifo_item_size(void)
Definition: qsvdec.c:160
int width
picture width / height.
Definition: avcodec.h:704
static int qsv_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:727
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:2220
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:43
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int level
level
Definition: avcodec.h:1977
mfxFrameSurface1 surface
Definition: qsv_internal.h:74
if(ret)
int load_plugin
Definition: qsvdec.c:648
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.h:59
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:541
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:297
main external API structure.
Definition: avcodec.h:531
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
LoadPlugin
Definition: qsvdec.c:638
uint8_t * data
The data buffer.
Definition: buffer.h:89
struct QSVFrame * next
Definition: qsv_internal.h:82
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:162
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1884
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:349
#define OFFSET(x)
Definition: qsvdec.c:790
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:719
Describe the class of an AVClass context structure.
Definition: log.h:67
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:303
int gpu_copy
Definition: qsvdec.h:69
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:222
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:383
char * load_plugins
Definition: qsvdec.h:71
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:337
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:232
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:404
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:415
A reference to a data buffer.
Definition: buffer.h:81
The codec supports this format by some ad-hoc method.
Definition: codec.h:420
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:266
QSVContext qsv
Definition: qsvdec.c:646
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:97
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AVFifoBuffer * packet_fifo
Definition: qsvdec.c:650
mfxFrameInfo frame_info
Definition: qsvdec.h:61
void * priv_data
Definition: avcodec.h:558
const AVCodecHWConfigInternal *const ff_qsv_hw_configs[]
Definition: qsvdec.c:47
pixel format definitions
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:466
QSVFramesContext frames_ctx
Definition: qsvdec.h:47
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:170
mfxSession session
Definition: qsvdec.h:41
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
AVFifoBuffer * async_fifo
Definition: qsvdec.h:54
static void qsv_decode_flush(AVCodecContext *avctx)
Definition: qsvdec.c:782
int height
Definition: frame.h:372
int initialized
Definition: qsvdec.h:64
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:252
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1188
An API-specific header for AV_HWDEVICE_TYPE_QSV.
AVFrame * frame
Definition: qsv_internal.h:73
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:364
int async_depth
Definition: qsvdec.h:67
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:2272
AVPacket buffer_pkt
Definition: qsvdec.c:652
int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: qsvdec.c:570
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.h:52
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:258
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:340
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
mfxSession session
Definition: qsv_internal.h:86
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:196
int nb_ext_buffers
Definition: qsvdec.h:74