FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <stdint.h>
25 #include <string.h>
26 #include <sys/types.h>
27 
28 #include <mfx/mfxvideo.h>
29 
30 #include "libavutil/common.h"
31 #include "libavutil/fifo.h"
32 #include "libavutil/frame.h"
33 #include "libavutil/hwcontext.h"
35 #include "libavutil/mem.h"
36 #include "libavutil/log.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/time.h"
40 #include "libavutil/imgutils.h"
41 
42 #include "avcodec.h"
43 #include "internal.h"
44 #include "decode.h"
45 #include "hwconfig.h"
46 #include "qsv.h"
47 #include "qsv_internal.h"
48 
49 static const AVRational mfx_tb = { 1, 90000 };
50 
51 #define PTS_TO_MFX_PTS(pts, pts_tb) ((pts) == AV_NOPTS_VALUE ? \
52  MFX_TIMESTAMP_UNKNOWN : pts_tb.num ? \
53  av_rescale_q(pts, pts_tb, mfx_tb) : pts)
54 
55 #define MFX_PTS_TO_PTS(mfx_pts, pts_tb) ((mfx_pts) == MFX_TIMESTAMP_UNKNOWN ? \
56  AV_NOPTS_VALUE : pts_tb.num ? \
57  av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
58 
59 typedef struct QSVContext {
60  // the session used for decoding
61  mfxSession session;
62 
63  // the session we allocated internally, in case the caller did not provide
64  // one
66 
68 
69  /**
70  * a linked list of frames currently being used by QSV
71  */
73 
78 
80  uint32_t fourcc;
81  mfxFrameInfo frame_info;
83 
85 
86  // options set by the caller
88  int iopattern;
89  int gpu_copy;
90 
91  char *load_plugins;
92 
93  mfxExtBuffer **ext_buffers;
95 } QSVContext;
96 
97 static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
98  &(const AVCodecHWConfigInternal) {
99  .public = {
103  .device_type = AV_HWDEVICE_TYPE_QSV,
104  },
105  .hwaccel = NULL,
106  },
107  NULL
108 };
109 
111  AVBufferPool *pool)
112 {
113  int ret = 0;
114 
116 
117  frame->width = avctx->width;
118  frame->height = avctx->height;
119 
120  switch (avctx->pix_fmt) {
121  case AV_PIX_FMT_NV12:
122  frame->linesize[0] = FFALIGN(avctx->width, 128);
123  break;
124  case AV_PIX_FMT_P010:
125  frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
126  break;
127  default:
128  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
129  return AVERROR(EINVAL);
130  }
131 
132  frame->linesize[1] = frame->linesize[0];
133  frame->buf[0] = av_buffer_pool_get(pool);
134  if (!frame->buf[0])
135  return AVERROR(ENOMEM);
136 
137  frame->data[0] = frame->buf[0]->data;
138  frame->data[1] = frame->data[0] +
139  frame->linesize[0] * FFALIGN(avctx->height, 64);
140 
142  if (ret < 0)
143  return ret;
144 
145  return 0;
146 }
147 
148 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
149  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
150 {
151  int ret;
152 
153  if (q->gpu_copy == MFX_GPUCOPY_ON &&
154  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
155  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
156  "only works in system memory mode.\n");
157  q->gpu_copy = MFX_GPUCOPY_OFF;
158  }
159  if (session) {
160  q->session = session;
161  } else if (hw_frames_ref) {
162  if (q->internal_qs.session) {
163  MFXClose(q->internal_qs.session);
164  q->internal_qs.session = NULL;
165  }
167 
168  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
169  if (!q->frames_ctx.hw_frames_ctx)
170  return AVERROR(ENOMEM);
171 
173  &q->frames_ctx, q->load_plugins,
174  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
175  q->gpu_copy);
176  if (ret < 0) {
178  return ret;
179  }
180 
181  q->session = q->internal_qs.session;
182  } else if (hw_device_ref) {
183  if (q->internal_qs.session) {
184  MFXClose(q->internal_qs.session);
185  q->internal_qs.session = NULL;
186  }
187 
189  hw_device_ref, q->load_plugins, q->gpu_copy);
190  if (ret < 0)
191  return ret;
192 
193  q->session = q->internal_qs.session;
194  } else {
195  if (!q->internal_qs.session) {
197  q->load_plugins, q->gpu_copy);
198  if (ret < 0)
199  return ret;
200  }
201 
202  q->session = q->internal_qs.session;
203  }
204 
205  /* make sure the decoder is uninitialized */
206  MFXVideoDECODE_Close(q->session);
207 
208  return 0;
209 }
210 
211 static inline unsigned int qsv_fifo_item_size(void)
212 {
213  return sizeof(mfxSyncPoint*) + sizeof(QSVFrame*);
214 }
215 
216 static inline unsigned int qsv_fifo_size(const AVFifoBuffer* fifo)
217 {
218  return av_fifo_size(fifo) / qsv_fifo_item_size();
219 }
220 
221 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
222 {
223  mfxSession session = NULL;
224  int iopattern = 0;
225  int ret;
226  enum AVPixelFormat pix_fmts[3] = {
227  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
228  pix_fmt, /* system memory format obtained from bitstream parser */
229  AV_PIX_FMT_NONE };
230 
231  ret = ff_get_format(avctx, pix_fmts);
232  if (ret < 0) {
233  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
234  return ret;
235  }
236 
237  if (!q->async_fifo) {
239  if (!q->async_fifo)
240  return AVERROR(ENOMEM);
241  }
242 
243  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
244  AVQSVContext *user_ctx = avctx->hwaccel_context;
245  session = user_ctx->session;
246  iopattern = user_ctx->iopattern;
247  q->ext_buffers = user_ctx->ext_buffers;
248  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
249  }
250 
251  if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_QSV) {
252  AVHWFramesContext *hwframes_ctx;
253  AVQSVFramesContext *frames_hwctx;
254 
256 
257  if (!avctx->hw_frames_ctx) {
258  av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
259  return AVERROR(ENOMEM);
260  }
261 
262  hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
263  frames_hwctx = hwframes_ctx->hwctx;
264  hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
265  hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
266  hwframes_ctx->format = AV_PIX_FMT_QSV;
267  hwframes_ctx->sw_format = avctx->sw_pix_fmt;
268  hwframes_ctx->initial_pool_size = 64 + avctx->extra_hw_frames;
269  frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
270 
272 
273  if (ret < 0) {
274  av_log(NULL, AV_LOG_ERROR, "Error initializing a QSV frame pool\n");
276  return ret;
277  }
278  }
279 
280  if (avctx->hw_frames_ctx) {
281  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
282  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
283 
284  if (!iopattern) {
285  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
286  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
287  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
288  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
289  }
290  }
291 
292  if (!iopattern)
293  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
294  q->iopattern = iopattern;
295 
296  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
297 
298  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
299  if (ret < 0) {
300  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
301  return ret;
302  }
303 
304  param->IOPattern = q->iopattern;
305  param->AsyncDepth = q->async_depth;
306  param->ExtParam = q->ext_buffers;
307  param->NumExtParam = q->nb_ext_buffers;
308 
309  return 0;
310  }
311 
312 static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
313 {
314  int ret;
315 
316  avctx->width = param->mfx.FrameInfo.CropW;
317  avctx->height = param->mfx.FrameInfo.CropH;
318  avctx->coded_width = param->mfx.FrameInfo.Width;
319  avctx->coded_height = param->mfx.FrameInfo.Height;
320  avctx->level = param->mfx.CodecLevel;
321  avctx->profile = param->mfx.CodecProfile;
322  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
323  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
324 
325  ret = MFXVideoDECODE_Init(q->session, param);
326  if (ret < 0)
327  return ff_qsv_print_error(avctx, ret,
328  "Error initializing the MFX video decoder");
329 
330  q->frame_info = param->mfx.FrameInfo;
331 
332  if (!avctx->hw_frames_ctx)
334  FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz);
335  return 0;
336 }
337 
339  const AVPacket *avpkt, enum AVPixelFormat pix_fmt,
340  mfxVideoParam *param)
341 {
342  int ret;
343  mfxExtVideoSignalInfo video_signal_info = { 0 };
344  mfxExtBuffer *header_ext_params[1] = { (mfxExtBuffer *)&video_signal_info };
345  mfxBitstream bs = { 0 };
346 
347  if (avpkt->size) {
348  bs.Data = avpkt->data;
349  bs.DataLength = avpkt->size;
350  bs.MaxLength = bs.DataLength;
351  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
352  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
353  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
354  } else
355  return AVERROR_INVALIDDATA;
356 
357 
358  if(!q->session) {
359  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
360  if (ret < 0)
361  return ret;
362  }
363 
365  if (ret < 0)
366  return ret;
367 
368  param->mfx.CodecId = ret;
369  video_signal_info.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
370  video_signal_info.Header.BufferSz = sizeof(video_signal_info);
371  // The SDK doesn't support other ext buffers when calling MFXVideoDECODE_DecodeHeader,
372  // so do not append this buffer to the existent buffer array
373  param->ExtParam = header_ext_params;
374  param->NumExtParam = 1;
375  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
376  if (MFX_ERR_MORE_DATA == ret) {
377  return AVERROR(EAGAIN);
378  }
379  if (ret < 0)
380  return ff_qsv_print_error(avctx, ret,
381  "Error decoding stream header");
382 
383  avctx->color_range = video_signal_info.VideoFullRange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
384 
385  if (video_signal_info.ColourDescriptionPresent) {
386  avctx->color_primaries = video_signal_info.ColourPrimaries;
387  avctx->color_trc = video_signal_info.TransferCharacteristics;
388  avctx->colorspace = video_signal_info.MatrixCoefficients;
389  }
390 
391  param->ExtParam = q->ext_buffers;
392  param->NumExtParam = q->nb_ext_buffers;
393 
394  return 0;
395 }
396 
398 {
399  int ret;
400 
401  if (q->pool)
402  ret = qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
403  else
404  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
405 
406  if (ret < 0)
407  return ret;
408 
409  if (frame->frame->format == AV_PIX_FMT_QSV) {
410  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
411  } else {
412  frame->surface.Info = q->frame_info;
413 
414  frame->surface.Data.PitchLow = frame->frame->linesize[0];
415  frame->surface.Data.Y = frame->frame->data[0];
416  frame->surface.Data.UV = frame->frame->data[1];
417  }
418 
419  if (q->frames_ctx.mids) {
421  if (ret < 0)
422  return ret;
423 
424  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
425  }
426  frame->surface.Data.ExtParam = &frame->ext_param;
427  frame->surface.Data.NumExtParam = 1;
428  frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
429  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
430  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
431 
432  frame->used = 1;
433 
434  return 0;
435 }
436 
438 {
439  QSVFrame *cur = q->work_frames;
440  while (cur) {
441  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
442  cur->used = 0;
443  av_frame_unref(cur->frame);
444  }
445  cur = cur->next;
446  }
447 }
448 
449 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
450 {
451  QSVFrame *frame, **last;
452  int ret;
453 
455 
456  frame = q->work_frames;
457  last = &q->work_frames;
458  while (frame) {
459  if (!frame->used) {
460  ret = alloc_frame(avctx, q, frame);
461  if (ret < 0)
462  return ret;
463  *surf = &frame->surface;
464  return 0;
465  }
466 
467  last = &frame->next;
468  frame = frame->next;
469  }
470 
471  frame = av_mallocz(sizeof(*frame));
472  if (!frame)
473  return AVERROR(ENOMEM);
474  frame->frame = av_frame_alloc();
475  if (!frame->frame) {
476  av_freep(&frame);
477  return AVERROR(ENOMEM);
478  }
479  *last = frame;
480 
481  ret = alloc_frame(avctx, q, frame);
482  if (ret < 0)
483  return ret;
484 
485  *surf = &frame->surface;
486 
487  return 0;
488 }
489 
490 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
491 {
492  QSVFrame *cur = q->work_frames;
493  while (cur) {
494  if (surf == &cur->surface)
495  return cur;
496  cur = cur->next;
497  }
498  return NULL;
499 }
500 
501 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
502  AVFrame *frame, int *got_frame,
503  const AVPacket *avpkt)
504 {
505  QSVFrame *out_frame;
506  mfxFrameSurface1 *insurf;
507  mfxFrameSurface1 *outsurf;
508  mfxSyncPoint *sync;
509  mfxBitstream bs = { { { 0 } } };
510  int ret;
511 
512  if (avpkt->size) {
513  bs.Data = avpkt->data;
514  bs.DataLength = avpkt->size;
515  bs.MaxLength = bs.DataLength;
516  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
517  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
518  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
519  }
520 
521  sync = av_mallocz(sizeof(*sync));
522  if (!sync) {
523  av_freep(&sync);
524  return AVERROR(ENOMEM);
525  }
526 
527  do {
528  ret = get_surface(avctx, q, &insurf);
529  if (ret < 0) {
530  av_freep(&sync);
531  return ret;
532  }
533 
534  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
535  insurf, &outsurf, sync);
536  if (ret == MFX_WRN_DEVICE_BUSY)
537  av_usleep(500);
538 
539  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
540 
541  if (ret != MFX_ERR_NONE &&
542  ret != MFX_ERR_MORE_DATA &&
543  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
544  ret != MFX_ERR_MORE_SURFACE) {
545  av_freep(&sync);
546  return ff_qsv_print_error(avctx, ret,
547  "Error during QSV decoding.");
548  }
549 
550  /* make sure we do not enter an infinite loop if the SDK
551  * did not consume any data and did not return anything */
552  if (!*sync && !bs.DataOffset) {
553  bs.DataOffset = avpkt->size;
554  ++q->zero_consume_run;
555  if (q->zero_consume_run > 1)
556  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
557  } else if (!*sync && bs.DataOffset) {
558  ++q->buffered_count;
559  } else {
560  q->zero_consume_run = 0;
561  }
562 
563  if (*sync) {
564  QSVFrame *out_frame = find_frame(q, outsurf);
565 
566  if (!out_frame) {
567  av_log(avctx, AV_LOG_ERROR,
568  "The returned surface does not correspond to any frame\n");
569  av_freep(&sync);
570  return AVERROR_BUG;
571  }
572 
573  out_frame->queued = 1;
574  av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
575  av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL);
576  } else {
577  av_freep(&sync);
578  }
579 
580  if ((qsv_fifo_size(q->async_fifo) >= q->async_depth) ||
581  (!avpkt->size && av_fifo_size(q->async_fifo))) {
582  AVFrame *src_frame;
583 
584  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
585  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
586  out_frame->queued = 0;
587 
588  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
589  do {
590  ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
591  } while (ret == MFX_WRN_IN_EXECUTION);
592  }
593 
594  av_freep(&sync);
595 
596  src_frame = out_frame->frame;
597 
598  ret = av_frame_ref(frame, src_frame);
599  if (ret < 0)
600  return ret;
601 
602  outsurf = &out_frame->surface;
603 
604  frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
605 
606  frame->repeat_pict =
607  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
608  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
609  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
610  frame->top_field_first =
611  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
612  frame->interlaced_frame =
613  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
614  frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType);
615  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
616  if (avctx->codec_id == AV_CODEC_ID_H264)
617  frame->key_frame = !!(out_frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
618 
619  /* update the surface properties */
620  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
621  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
622 
623  *got_frame = 1;
624  }
625 
626  return bs.DataOffset;
627 }
628 
630 {
631  QSVFrame *cur = q->work_frames;
632 
633  if (q->session)
634  MFXVideoDECODE_Close(q->session);
635 
636  while (q->async_fifo && av_fifo_size(q->async_fifo)) {
637  QSVFrame *out_frame;
638  mfxSyncPoint *sync;
639 
640  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
641  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
642 
643  av_freep(&sync);
644  }
645 
646  while (cur) {
647  q->work_frames = cur->next;
648  av_frame_free(&cur->frame);
649  av_freep(&cur);
650  cur = q->work_frames;
651  }
652 
654  q->async_fifo = NULL;
655 
657 
661 }
662 
664  AVFrame *frame, int *got_frame, const AVPacket *pkt)
665 {
666  int ret;
667  mfxVideoParam param = { 0 };
669 
670  if (!pkt->size)
671  return qsv_decode(avctx, q, frame, got_frame, pkt);
672 
673  /* TODO: flush delayed frames on reinit */
674 
675  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
676  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
677  // the assumption may be not corret but will be updated after header decoded if not true.
678  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
679  pix_fmt = q->orig_pix_fmt;
680  if (!avctx->coded_width)
681  avctx->coded_width = 1280;
682  if (!avctx->coded_height)
683  avctx->coded_height = 720;
684 
685  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
686 
687  if (ret >= 0 && (q->orig_pix_fmt != ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC) ||
688  avctx->coded_width != param.mfx.FrameInfo.Width ||
689  avctx->coded_height != param.mfx.FrameInfo.Height)) {
690  AVPacket zero_pkt = {0};
691 
692  if (q->buffered_count) {
693  q->reinit_flag = 1;
694  /* decode zero-size pkt to flush the buffered pkt before reinit */
695  q->buffered_count--;
696  return qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
697  }
698  q->reinit_flag = 0;
699 
700  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
701 
702  avctx->coded_width = param.mfx.FrameInfo.Width;
703  avctx->coded_height = param.mfx.FrameInfo.Height;
704 
705  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
706  if (ret < 0)
707  goto reinit_fail;
708  q->initialized = 0;
709  }
710 
711  if (!q->initialized) {
712  ret = qsv_decode_init_context(avctx, q, &param);
713  if (ret < 0)
714  goto reinit_fail;
715  q->initialized = 1;
716  }
717 
718  return qsv_decode(avctx, q, frame, got_frame, pkt);
719 
720 reinit_fail:
721  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
722  return ret;
723 }
724 
729 };
730 
731 typedef struct QSVDecContext {
732  AVClass *class;
734 
736 
738 
740 } QSVDecContext;
741 
743 {
744  AVPacket pkt;
745  while (av_fifo_size(s->packet_fifo) >= sizeof(pkt)) {
746  av_fifo_generic_read(s->packet_fifo, &pkt, sizeof(pkt), NULL);
748  }
749 
750  av_packet_unref(&s->buffer_pkt);
751 }
752 
754 {
755  QSVDecContext *s = avctx->priv_data;
756 
757  av_freep(&s->qsv.load_plugins);
758 
760 
762 
763  av_fifo_free(s->packet_fifo);
764 
765  return 0;
766 }
767 
769 {
770  QSVDecContext *s = avctx->priv_data;
771  int ret;
772  const char *uid = NULL;
773 
774  if (avctx->codec_id == AV_CODEC_ID_VP8) {
775  uid = "f622394d8d87452f878c51f2fc9b4131";
776  } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
777  uid = "a922394d8d87452f878c51f2fc9b4131";
778  }
779  else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) {
780  static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6";
781  static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e";
782 
783  if (s->qsv.load_plugins[0]) {
784  av_log(avctx, AV_LOG_WARNING,
785  "load_plugins is not empty, but load_plugin is not set to 'none'."
786  "The load_plugin value will be ignored.\n");
787  } else {
788  if (s->load_plugin == LOAD_PLUGIN_HEVC_SW)
789  uid = uid_hevcdec_sw;
790  else
791  uid = uid_hevcdec_hw;
792  }
793  }
794  if (uid) {
795  av_freep(&s->qsv.load_plugins);
796  s->qsv.load_plugins = av_strdup(uid);
797  if (!s->qsv.load_plugins)
798  return AVERROR(ENOMEM);
799  }
800 
801  s->qsv.orig_pix_fmt = AV_PIX_FMT_NV12;
802  s->packet_fifo = av_fifo_alloc(sizeof(AVPacket));
803  if (!s->packet_fifo) {
804  ret = AVERROR(ENOMEM);
805  goto fail;
806  }
807 
808  if (!avctx->pkt_timebase.num)
809  av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
810 
811  return 0;
812 fail:
813  qsv_decode_close(avctx);
814  return ret;
815 }
816 
817 static int qsv_decode_frame(AVCodecContext *avctx, void *data,
818  int *got_frame, AVPacket *avpkt)
819 {
820  QSVDecContext *s = avctx->priv_data;
821  AVFrame *frame = data;
822  int ret;
823 
824  /* buffer the input packet */
825  if (avpkt->size) {
826  AVPacket input_ref;
827 
828  if (av_fifo_space(s->packet_fifo) < sizeof(input_ref)) {
829  ret = av_fifo_realloc2(s->packet_fifo,
830  av_fifo_size(s->packet_fifo) + sizeof(input_ref));
831  if (ret < 0)
832  return ret;
833  }
834 
835  ret = av_packet_ref(&input_ref, avpkt);
836  if (ret < 0)
837  return ret;
838  av_fifo_generic_write(s->packet_fifo, &input_ref, sizeof(input_ref), NULL);
839  }
840 
841  /* process buffered data */
842  while (!*got_frame) {
843  /* prepare the input data */
844  if (s->buffer_pkt.size <= 0) {
845  /* no more data */
846  if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket))
847  return avpkt->size ? avpkt->size : qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt);
848  /* in progress of reinit, no read from fifo and keep the buffer_pkt */
849  if (!s->qsv.reinit_flag) {
850  av_packet_unref(&s->buffer_pkt);
851  av_fifo_generic_read(s->packet_fifo, &s->buffer_pkt, sizeof(s->buffer_pkt), NULL);
852  }
853  }
854 
855  ret = qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt);
856  if (ret < 0){
857  /* Drop buffer_pkt when failed to decode the packet. Otherwise,
858  the decoder will keep decoding the failure packet. */
859  av_packet_unref(&s->buffer_pkt);
860  return ret;
861  }
862  if (s->qsv.reinit_flag)
863  continue;
864 
865  s->buffer_pkt.size -= ret;
866  s->buffer_pkt.data += ret;
867  }
868 
869  return avpkt->size;
870 }
871 
872 static void qsv_decode_flush(AVCodecContext *avctx)
873 {
874  QSVDecContext *s = avctx->priv_data;
875 
877 
878  s->qsv.orig_pix_fmt = AV_PIX_FMT_NONE;
879  s->qsv.initialized = 0;
880 }
881 
882 #define OFFSET(x) offsetof(QSVDecContext, x)
883 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
884 
885 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
886 static const AVClass x##_qsv_class = { \
887  .class_name = #x "_qsv", \
888  .item_name = av_default_item_name, \
889  .option = opt, \
890  .version = LIBAVUTIL_VERSION_INT, \
891 }; \
892 const AVCodec ff_##x##_qsv_decoder = { \
893  .name = #x "_qsv", \
894  .long_name = NULL_IF_CONFIG_SMALL(#X " video (Intel Quick Sync Video acceleration)"), \
895  .priv_data_size = sizeof(QSVDecContext), \
896  .type = AVMEDIA_TYPE_VIDEO, \
897  .id = AV_CODEC_ID_##X, \
898  .init = qsv_decode_init, \
899  .decode = qsv_decode_frame, \
900  .flush = qsv_decode_flush, \
901  .close = qsv_decode_close, \
902  .bsfs = bsf_name, \
903  .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
904  .priv_class = &x##_qsv_class, \
905  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
906  AV_PIX_FMT_P010, \
907  AV_PIX_FMT_QSV, \
908  AV_PIX_FMT_NONE }, \
909  .hw_configs = qsv_hw_configs, \
910  .wrapper_name = "qsv", \
911 }; \
912 
913 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
914 
915 #if CONFIG_HEVC_QSV_DECODER
916 static const AVOption hevc_options[] = {
917  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
918 
919  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, "load_plugin" },
920  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, "load_plugin" },
921  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, "load_plugin" },
922  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, "load_plugin" },
923 
924  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
925  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
926 
927  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
928  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
929  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
930  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
931  { NULL },
932 };
933 DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options)
934 #endif
935 
936 static const AVOption options[] = {
937  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
938 
939  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
940  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
941  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
942  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
943  { NULL },
944 };
945 
946 #if CONFIG_H264_QSV_DECODER
947 DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb")
948 #endif
949 
950 #if CONFIG_MPEG2_QSV_DECODER
951 DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL)
952 #endif
953 
954 #if CONFIG_VC1_QSV_DECODER
955 DEFINE_QSV_DECODER(vc1, VC1, NULL)
956 #endif
957 
958 #if CONFIG_MJPEG_QSV_DECODER
959 DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL)
960 #endif
961 
962 #if CONFIG_VP8_QSV_DECODER
963 DEFINE_QSV_DECODER(vp8, VP8, NULL)
964 #endif
965 
966 #if CONFIG_VP9_QSV_DECODER
967 DEFINE_QSV_DECODER(vp9, VP9, NULL)
968 #endif
969 
970 #if CONFIG_AV1_QSV_DECODER
971 DEFINE_QSV_DECODER(av1, AV1, NULL)
972 #endif
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:424
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:49
AVCodecContext::hwaccel_context
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:1370
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uid
UID uid
Definition: mxfenc.c:2198
opt.h
qsv_process_data
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: qsvdec.c:663
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1089
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:96
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:89
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
av_fifo_free
void av_fifo_free(AVFifoBuffer *f)
Free an AVFifoBuffer.
Definition: fifo.c:55
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
qsv_fifo_size
static unsigned int qsv_fifo_size(const AVFifoBuffer *fifo)
Definition: qsvdec.c:216
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
ff_qsv_close_internal_session
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:820
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:262
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:953
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:597
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:989
AVOption
AVOption.
Definition: opt.h:247
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:231
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvdec.c:726
data
const char data[16]
Definition: mxf.c:143
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.c:72
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvdec.c:728
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:73
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
QSVFrame::used
int used
Definition: qsv_internal.h:82
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:691
AVFifoBuffer
Definition: fifo.h:31
QSVDecContext::packet_fifo
AVFifoBuffer * packet_fifo
Definition: qsvdec.c:737
fifo.h
DEFINE_QSV_DECODER
#define DEFINE_QSV_DECODER(x, X, bsf_name)
Definition: qsvdec.c:913
fail
#define fail()
Definition: checkasm.h:127
QSVDecContext::qsv
QSVContext qsv
Definition: qsvdec.c:733
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvdec.c:727
options
static const AVOption options[]
Definition: qsvdec.c:936
DEFINE_QSV_DECODER_WITH_OPTION
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
Definition: qsvdec.c:885
mfx_tb
static const AVRational mfx_tb
Definition: qsvdec.c:49
AVRational::num
int num
Numerator.
Definition: rational.h:59
QSVContext
Definition: qsvdec.c:59
qsv_internal.h
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:490
qsv_decode_header
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, const AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:338
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:946
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:177
av_fifo_space
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
ASYNC_DEPTH_DEFAULT
#define ASYNC_DEPTH_DEFAULT
Definition: qsv_internal.h:51
av_cold
#define av_cold
Definition: attributes.h:90
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
QSVDecContext
Definition: qsvdec.c:731
QSVContext::iopattern
int iopattern
Definition: qsvdec.c:88
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:441
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:387
qsv_decode_init
static av_cold int qsv_decode_init(AVCodecContext *avctx)
Definition: qsvdec.c:768
s
#define s(width, name)
Definition: cbs_vp9.c:257
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2741
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.c:77
qsv_fifo_item_size
static unsigned int qsv_fifo_item_size(void)
Definition: qsvdec.c:211
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.c:67
QSVContext::internal_qs
QSVSession internal_qs
Definition: qsvdec.c:65
qsv_decode_frame
static int qsv_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:817
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
decode.h
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:469
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
av_fifo_realloc2
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
if
if(ret)
Definition: filter_design.txt:179
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:773
QSVFrame
Definition: qsv_internal.h:72
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
qsv.h
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:428
ff_qsv_print_iopattern
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:93
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.c:94
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:74
time.h
QSVFramesContext::mids_buf
AVBufferRef * mids_buf
Definition: qsv_internal.h:103
QSVContext::async_fifo
AVFifoBuffer * async_fifo
Definition: qsvdec.c:74
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:212
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:432
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.c:91
AVCodecContext::level
int level
level
Definition: avcodec.h:1651
QSVContext::initialized
int initialized
Definition: qsvdec.c:84
qsv_clear_buffers
static void qsv_clear_buffers(QSVDecContext *s)
Definition: qsvdec.c:742
QSVContext::fourcc
uint32_t fourcc
Definition: qsvdec.c:80
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.c:93
PTS_TO_MFX_PTS
#define PTS_TO_MFX_PTS(pts, pts_tb)
Definition: qsvdec.c:51
qsv_decode_close_qsvcontext
static void qsv_decode_close_qsvcontext(QSVContext *q)
Definition: qsvdec.c:629
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.c:81
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
AVPacket::size
int size
Definition: packet.h:374
QSVContext::buffered_count
int buffered_count
Definition: qsvdec.c:76
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
AVCodecContext::extra_hw_frames
int extra_hw_frames
Definition: avcodec.h:1958
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
LoadPlugin
LoadPlugin
Definition: qsvdec.c:725
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:1724
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:397
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *avpkt)
Definition: qsvdec.c:501
AVCodecHWConfigInternal
Definition: hwconfig.h:29
qsv_decode_close
static av_cold int qsv_decode_close(AVCodecContext *avctx)
Definition: qsvdec.c:753
frame.h
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:449
QSVFramesContext::mids
QSVMid * mids
Definition: qsv_internal.h:104
hwcontext_qsv.h
QSVContext::pool
AVBufferPool * pool
Definition: qsvdec.c:82
log.h
ff_qsv_map_picstruct
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:244
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
QSVDecContext::buffer_pkt
AVPacket buffer_pkt
Definition: qsvdec.c:739
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.c:61
qsv_decode_preinit
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:221
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1908
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:580
QSVDecContext::load_plugin
int load_plugin
Definition: qsvdec.c:735
OFFSET
#define OFFSET(x)
Definition: qsvdec.c:882
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1858
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:148
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
QSVFrame::queued
int queued
Definition: qsv_internal.h:81
QSVContext::async_depth
int async_depth
Definition: qsvdec.c:87
MFX_PTS_TO_PTS
#define MFX_PTS_TO_PTS(mfx_pts, pts_tb)
Definition: qsvdec.c:55
QSVSession
Definition: qsv_internal.h:87
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:162
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:45
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.c:75
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1489
AVCodecContext
main external API structure.
Definition: avcodec.h:383
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.c:79
qsv_decode_init_context
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:312
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1525
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
QSVSession::session
mfxSession session
Definition: qsv_internal.h:88
ff_qsv_map_fourcc
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:186
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:453
qsv_get_continuous_buffer
static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:110
VD
#define VD
Definition: qsvdec.c:883
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:42
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:571
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:279
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:76
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1628
av_fifo_size
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
QSVFramesContext
Definition: qsv_internal.h:95
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AVPacket
This structure stores compressed data.
Definition: packet.h:350
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:437
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
av_fifo_alloc
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
imgutils.h
AV_CODEC_ID_VP8
@ AV_CODEC_ID_VP8
Definition: codec_id.h:190
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
QSVContext::gpu_copy
int gpu_copy
Definition: qsvdec.c:89
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1717
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:84
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:168
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:375
qsv_decode_flush
static void qsv_decode_flush(AVCodecContext *avctx)
Definition: qsvdec.c:872
qsv_hw_configs
static const AVCodecHWConfigInternal *const qsv_hw_configs[]
Definition: qsvdec.c:97