FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 #include <sys/types.h>
26 
27 #include <mfx/mfxvideo.h>
28 
29 #include "libavutil/common.h"
30 #include "libavutil/hwcontext.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/log.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/pixfmt.h"
36 #include "libavutil/time.h"
37 #include "libavutil/imgutils.h"
38 
39 #include "avcodec.h"
40 #include "internal.h"
41 #include "decode.h"
42 #include "qsv.h"
43 #include "qsv_internal.h"
44 #include "qsvdec.h"
45 
47  &(const AVCodecHWConfigInternal) {
48  .public = {
52  .device_type = AV_HWDEVICE_TYPE_QSV,
53  },
54  .hwaccel = NULL,
55  },
56  NULL
57 };
58 
60 {
61  int ret = 0;
62 
64 
65  frame->width = avctx->width;
66  frame->height = avctx->height;
67 
68  switch (avctx->pix_fmt) {
69  case AV_PIX_FMT_NV12:
70  frame->linesize[0] = FFALIGN(avctx->width, 128);
71  break;
72  case AV_PIX_FMT_P010:
73  frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
74  break;
75  default:
76  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
77  return AVERROR(EINVAL);
78  }
79 
80  frame->linesize[1] = frame->linesize[0];
81  frame->buf[0] = av_buffer_pool_get(pool);
82  if (!frame->buf[0])
83  return AVERROR(ENOMEM);
84 
85  frame->data[0] = frame->buf[0]->data;
86  frame->data[1] = frame->data[0] +
87  frame->linesize[0] * FFALIGN(avctx->height, 64);
88 
90  if (ret < 0)
91  return ret;
92 
93  return 0;
94 }
95 
96 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
97  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
98 {
99  int ret;
100 
101  if (q->gpu_copy == MFX_GPUCOPY_ON &&
102  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
103  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
104  "only works in system memory mode.\n");
105  q->gpu_copy = MFX_GPUCOPY_OFF;
106  }
107  if (session) {
108  q->session = session;
109  } else if (hw_frames_ref) {
110  if (q->internal_qs.session) {
111  MFXClose(q->internal_qs.session);
112  q->internal_qs.session = NULL;
113  }
115 
116  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
117  if (!q->frames_ctx.hw_frames_ctx)
118  return AVERROR(ENOMEM);
119 
121  &q->frames_ctx, q->load_plugins,
122  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
123  q->gpu_copy);
124  if (ret < 0) {
126  return ret;
127  }
128 
129  q->session = q->internal_qs.session;
130  } else if (hw_device_ref) {
131  if (q->internal_qs.session) {
132  MFXClose(q->internal_qs.session);
133  q->internal_qs.session = NULL;
134  }
135 
137  hw_device_ref, q->load_plugins, q->gpu_copy);
138  if (ret < 0)
139  return ret;
140 
141  q->session = q->internal_qs.session;
142  } else {
143  if (!q->internal_qs.session) {
145  q->load_plugins, q->gpu_copy);
146  if (ret < 0)
147  return ret;
148  }
149 
150  q->session = q->internal_qs.session;
151  }
152 
153  /* make sure the decoder is uninitialized */
154  MFXVideoDECODE_Close(q->session);
155 
156  return 0;
157 }
158 
159 static inline unsigned int qsv_fifo_item_size(void)
160 {
161  return sizeof(mfxSyncPoint*) + sizeof(QSVFrame*);
162 }
163 
164 static inline unsigned int qsv_fifo_size(const AVFifoBuffer* fifo)
165 {
166  return av_fifo_size(fifo) / qsv_fifo_item_size();
167 }
168 
169 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
170 {
171  mfxSession session = NULL;
172  int iopattern = 0;
173  int ret;
174  enum AVPixelFormat pix_fmts[3] = {
175  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
176  pix_fmt, /* system memory format obtained from bitstream parser */
177  AV_PIX_FMT_NONE };
178 
179  ret = ff_get_format(avctx, pix_fmts);
180  if (ret < 0) {
181  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
182  return ret;
183  }
184 
185  if (!q->async_fifo) {
187  if (!q->async_fifo)
188  return AVERROR(ENOMEM);
189  }
190 
191  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
192  AVQSVContext *user_ctx = avctx->hwaccel_context;
193  session = user_ctx->session;
194  iopattern = user_ctx->iopattern;
195  q->ext_buffers = user_ctx->ext_buffers;
196  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
197  }
198 
199  if (avctx->hw_frames_ctx) {
200  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
201  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
202 
203  if (!iopattern) {
204  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
205  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
206  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
207  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
208  }
209  }
210 
211  if (!iopattern)
212  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
213  q->iopattern = iopattern;
214 
215  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
216 
217  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
218  if (ret < 0) {
219  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
220  return ret;
221  }
222 
223  param->IOPattern = q->iopattern;
224  param->AsyncDepth = q->async_depth;
225  param->ExtParam = q->ext_buffers;
226  param->NumExtParam = q->nb_ext_buffers;
227 
228  return 0;
229  }
230 
231 static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
232 {
233  int ret;
234 
235  avctx->width = param->mfx.FrameInfo.CropW;
236  avctx->height = param->mfx.FrameInfo.CropH;
237  avctx->coded_width = param->mfx.FrameInfo.Width;
238  avctx->coded_height = param->mfx.FrameInfo.Height;
239  avctx->level = param->mfx.CodecLevel;
240  avctx->profile = param->mfx.CodecProfile;
241  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
242  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
243 
244  ret = MFXVideoDECODE_Init(q->session, param);
245  if (ret < 0)
246  return ff_qsv_print_error(avctx, ret,
247  "Error initializing the MFX video decoder");
248 
249  q->frame_info = param->mfx.FrameInfo;
250 
251  if (!avctx->hw_frames_ctx)
253  FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz);
254  return 0;
255 }
256 
257 static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
258 {
259  int ret;
260 
261  mfxBitstream bs = { 0 };
262 
263  if (avpkt->size) {
264  bs.Data = avpkt->data;
265  bs.DataLength = avpkt->size;
266  bs.MaxLength = bs.DataLength;
267  bs.TimeStamp = avpkt->pts;
268  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
269  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
270  } else
271  return AVERROR_INVALIDDATA;
272 
273 
274  if(!q->session) {
275  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
276  if (ret < 0)
277  return ret;
278  }
279 
281  if (ret < 0)
282  return ret;
283 
284  param->mfx.CodecId = ret;
285  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
286  if (MFX_ERR_MORE_DATA == ret) {
287  return AVERROR(EAGAIN);
288  }
289  if (ret < 0)
290  return ff_qsv_print_error(avctx, ret,
291  "Error decoding stream header");
292 
293  return 0;
294 }
295 
297 {
298  int ret;
299 
300  if (q->pool)
301  ret = ff_qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
302  else
303  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
304 
305  if (ret < 0)
306  return ret;
307 
308  if (frame->frame->format == AV_PIX_FMT_QSV) {
309  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
310  } else {
311  frame->surface.Info = q->frame_info;
312 
313  frame->surface.Data.PitchLow = frame->frame->linesize[0];
314  frame->surface.Data.Y = frame->frame->data[0];
315  frame->surface.Data.UV = frame->frame->data[1];
316  }
317 
318  if (q->frames_ctx.mids) {
320  if (ret < 0)
321  return ret;
322 
323  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
324  }
325  frame->surface.Data.ExtParam = &frame->ext_param;
326  frame->surface.Data.NumExtParam = 1;
327  frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
328  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
329  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
330 
331  frame->used = 1;
332 
333  return 0;
334 }
335 
337 {
338  QSVFrame *cur = q->work_frames;
339  while (cur) {
340  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
341  cur->used = 0;
342  av_frame_unref(cur->frame);
343  }
344  cur = cur->next;
345  }
346 }
347 
348 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
349 {
350  QSVFrame *frame, **last;
351  int ret;
352 
354 
355  frame = q->work_frames;
356  last = &q->work_frames;
357  while (frame) {
358  if (!frame->used) {
359  ret = alloc_frame(avctx, q, frame);
360  if (ret < 0)
361  return ret;
362  *surf = &frame->surface;
363  return 0;
364  }
365 
366  last = &frame->next;
367  frame = frame->next;
368  }
369 
370  frame = av_mallocz(sizeof(*frame));
371  if (!frame)
372  return AVERROR(ENOMEM);
373  frame->frame = av_frame_alloc();
374  if (!frame->frame) {
375  av_freep(&frame);
376  return AVERROR(ENOMEM);
377  }
378  *last = frame;
379 
380  ret = alloc_frame(avctx, q, frame);
381  if (ret < 0)
382  return ret;
383 
384  *surf = &frame->surface;
385 
386  return 0;
387 }
388 
389 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
390 {
391  QSVFrame *cur = q->work_frames;
392  while (cur) {
393  if (surf == &cur->surface)
394  return cur;
395  cur = cur->next;
396  }
397  return NULL;
398 }
399 
400 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
401  AVFrame *frame, int *got_frame,
402  AVPacket *avpkt)
403 {
404  QSVFrame *out_frame;
405  mfxFrameSurface1 *insurf;
406  mfxFrameSurface1 *outsurf;
407  mfxSyncPoint *sync;
408  mfxBitstream bs = { { { 0 } } };
409  int ret;
410 
411  if (avpkt->size) {
412  bs.Data = avpkt->data;
413  bs.DataLength = avpkt->size;
414  bs.MaxLength = bs.DataLength;
415  bs.TimeStamp = avpkt->pts;
416  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
417  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
418  }
419 
420  sync = av_mallocz(sizeof(*sync));
421  if (!sync) {
422  av_freep(&sync);
423  return AVERROR(ENOMEM);
424  }
425 
426  do {
427  ret = get_surface(avctx, q, &insurf);
428  if (ret < 0) {
429  av_freep(&sync);
430  return ret;
431  }
432 
433  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
434  insurf, &outsurf, sync);
435  if (ret == MFX_WRN_DEVICE_BUSY)
436  av_usleep(500);
437 
438  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
439 
440  if (ret != MFX_ERR_NONE &&
441  ret != MFX_ERR_MORE_DATA &&
442  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
443  ret != MFX_ERR_MORE_SURFACE) {
444  av_freep(&sync);
445  return ff_qsv_print_error(avctx, ret,
446  "Error during QSV decoding.");
447  }
448 
449  /* make sure we do not enter an infinite loop if the SDK
450  * did not consume any data and did not return anything */
451  if (!*sync && !bs.DataOffset) {
452  bs.DataOffset = avpkt->size;
453  ++q->zero_consume_run;
454  if (q->zero_consume_run > 1)
455  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
456  } else if (!*sync && bs.DataOffset) {
457  ++q->buffered_count;
458  } else {
459  q->zero_consume_run = 0;
460  }
461 
462  if (*sync) {
463  QSVFrame *out_frame = find_frame(q, outsurf);
464 
465  if (!out_frame) {
466  av_log(avctx, AV_LOG_ERROR,
467  "The returned surface does not correspond to any frame\n");
468  av_freep(&sync);
469  return AVERROR_BUG;
470  }
471 
472  out_frame->queued = 1;
473  av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
474  av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL);
475  } else {
476  av_freep(&sync);
477  }
478 
479  if ((qsv_fifo_size(q->async_fifo) >= q->async_depth) ||
480  (!avpkt->size && av_fifo_size(q->async_fifo))) {
481  AVFrame *src_frame;
482 
483  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
484  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
485  out_frame->queued = 0;
486 
487  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
488  do {
489  ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
490  } while (ret == MFX_WRN_IN_EXECUTION);
491  }
492 
493  av_freep(&sync);
494 
495  src_frame = out_frame->frame;
496 
497  ret = av_frame_ref(frame, src_frame);
498  if (ret < 0)
499  return ret;
500 
501  outsurf = &out_frame->surface;
502 
503 #if FF_API_PKT_PTS
505  frame->pkt_pts = outsurf->Data.TimeStamp;
507 #endif
508  frame->pts = outsurf->Data.TimeStamp;
509 
510  frame->repeat_pict =
511  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
512  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
513  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
514  frame->top_field_first =
515  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
516  frame->interlaced_frame =
517  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
518  frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType);
519  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
520  if (avctx->codec_id == AV_CODEC_ID_H264)
521  frame->key_frame = !!(out_frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
522 
523  /* update the surface properties */
524  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
525  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
526 
527  *got_frame = 1;
528  }
529 
530  return bs.DataOffset;
531 }
532 
534 {
535  QSVFrame *cur = q->work_frames;
536 
537  if (q->session)
538  MFXVideoDECODE_Close(q->session);
539 
540  while (q->async_fifo && av_fifo_size(q->async_fifo)) {
541  QSVFrame *out_frame;
542  mfxSyncPoint *sync;
543 
544  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
545  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
546 
547  av_freep(&sync);
548  }
549 
550  while (cur) {
551  q->work_frames = cur->next;
552  av_frame_free(&cur->frame);
553  av_freep(&cur);
554  cur = q->work_frames;
555  }
556 
558  q->async_fifo = NULL;
559 
561 
565 
566  return 0;
567 }
568 
570  AVFrame *frame, int *got_frame, AVPacket *pkt)
571 {
572  int ret;
573  mfxVideoParam param = { 0 };
575 
576  if (!pkt->size)
577  return qsv_decode(avctx, q, frame, got_frame, pkt);
578 
579  /* TODO: flush delayed frames on reinit */
580 
581  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
582  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
583  // the assumption may be not corret but will be updated after header decoded if not true.
584  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
585  pix_fmt = q->orig_pix_fmt;
586  if (!avctx->coded_width)
587  avctx->coded_width = 1280;
588  if (!avctx->coded_height)
589  avctx->coded_height = 720;
590 
591  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
592 
593  if (ret >= 0 && (q->orig_pix_fmt != ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC) ||
594  avctx->coded_width != param.mfx.FrameInfo.Width ||
595  avctx->coded_height != param.mfx.FrameInfo.Height)) {
596  AVPacket zero_pkt = {0};
597 
598  if (q->buffered_count) {
599  q->reinit_flag = 1;
600  /* decode zero-size pkt to flush the buffered pkt before reinit */
601  q->buffered_count--;
602  return qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
603  }
604  q->reinit_flag = 0;
605 
606  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
607 
608  avctx->coded_width = param.mfx.FrameInfo.Width;
609  avctx->coded_height = param.mfx.FrameInfo.Height;
610 
611  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
612  if (ret < 0)
613  goto reinit_fail;
614  q->initialized = 0;
615  }
616 
617  if (!q->initialized) {
618  ret = qsv_decode_init(avctx, q, &param);
619  if (ret < 0)
620  goto reinit_fail;
621  q->initialized = 1;
622  }
623 
624  return qsv_decode(avctx, q, frame, got_frame, pkt);
625 
626 reinit_fail:
627  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
628  return ret;
629 }
630 
632 {
634  q->initialized = 0;
635 }
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:400
ff_qsv_hw_configs
const AVCodecHWConfigInternal * ff_qsv_hw_configs[]
Definition: qsvdec.c:46
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
AVCodecContext::hwaccel_context
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:1702
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
ff_qsv_process_data
int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: qsvdec.c:569
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1279
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:92
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:77
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
av_fifo_free
void av_fifo_free(AVFifoBuffer *f)
Free an AVFifoBuffer.
Definition: fifo.c:55
qsv_fifo_size
static unsigned int qsv_fifo_size(const AVFifoBuffer *fifo)
Definition: qsvdec.c:164
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ff_qsv_close_internal_session
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:810
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
pixdesc.h
ff_qsv_get_continuous_buffer
static int ff_qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:59
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:266
qsv_decode_init
static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:231
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1183
qsv_decode_header
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:257
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:237
qsvdec.h
av_buffer_allocz
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.h:52
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
ff_qsv_decode_close
int ff_qsv_decode_close(QSVContext *q)
Definition: qsvdec.c:533
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:71
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
QSVFrame::used
int used
Definition: qsv_internal.h:78
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:686
AVFifoBuffer
Definition: fifo.h:31
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:239
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
QSVContext
Definition: qsvdec.h:39
qsv_internal.h
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:389
ff_qsv_decode_flush
void ff_qsv_decode_flush(AVCodecContext *avctx, QSVContext *q)
Definition: qsvdec.c:631
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:182
QSVContext::iopattern
int iopattern
Definition: qsvdec.h:68
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:337
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.h:57
qsv_fifo_item_size
static unsigned int qsv_fifo_item_size(void)
Definition: qsvdec.c:159
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.h:47
QSVContext::internal_qs
QSVSession internal_qs
Definition: qsvdec.h:45
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
decode.h
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:434
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:40
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:76
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
if
if(ret)
Definition: filter_design.txt:179
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:763
QSVFrame
Definition: qsv_internal.h:70
NULL
#define NULL
Definition: coverity.c:32
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
qsv.h
ff_qsv_print_iopattern
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:100
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:276
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.h:74
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:72
time.h
QSVFramesContext::mids_buf
AVBufferRef * mids_buf
Definition: qsv_internal.h:99
QSVContext::async_fifo
AVFifoBuffer * async_fifo
Definition: qsvdec.h:54
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:222
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.h:71
AVCodecContext::level
int level
level
Definition: avcodec.h:1982
QSVContext::initialized
int initialized
Definition: qsvdec.h:64
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.h:73
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.h:61
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
AVPacket::size
int size
Definition: packet.h:356
QSVContext::buffered_count
int buffered_count
Definition: qsvdec.h:56
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:296
AVCodecHWConfigInternal
Definition: hwconfig.h:29
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:431
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:348
QSVFramesContext::mids
QSVMid * mids
Definition: qsv_internal.h:100
hwcontext_qsv.h
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:406
QSVContext::pool
AVBufferPool * pool
Definition: qsvdec.h:62
log.h
ff_qsv_map_picstruct
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:248
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.h:41
qsv_decode_preinit
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:169
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:2278
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:2226
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:96
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
QSVFrame::queued
int queued
Definition: qsv_internal.h:77
QSVContext::async_depth
int async_depth
Definition: qsvdec.h:67
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:162
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:43
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.h:55
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1685
AVCodecContext
main external API structure.
Definition: avcodec.h:526
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.h:59
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1859
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
QSVSession::session
mfxSession session
Definition: qsv_internal.h:84
ff_qsv_map_fourcc
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:192
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:446
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:42
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
AV_CODEC_HW_CONFIG_METHOD_AD_HOC
@ AV_CODEC_HW_CONFIG_METHOD_AD_HOC
The codec supports this format by some ad-hoc method.
Definition: codec.h:422
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:74
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1830
av_fifo_size
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: packet.h:332
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:336
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
av_fifo_alloc
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
imgutils.h
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
QSVContext::gpu_copy
int gpu_copy
Definition: qsvdec.h:69
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:80
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:172
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:380