FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include <stdint.h>
27 #include <string.h>
28 #include <sys/types.h>
29 
30 #include <mfxvideo.h>
31 
32 #include "libavutil/common.h"
33 #include "libavutil/fifo.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
37 #include "libavutil/mem.h"
38 #include "libavutil/log.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/pixfmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/imgutils.h"
44 
45 #include "avcodec.h"
46 #include "codec_internal.h"
47 #include "internal.h"
48 #include "decode.h"
49 #include "hwconfig.h"
50 #include "qsv.h"
51 #include "qsv_internal.h"
52 
53 #if QSV_ONEVPL
54 #include <mfxdispatcher.h>
55 #else
56 #define MFXUnload(a) do { } while(0)
57 #endif
58 
59 static const AVRational mfx_tb = { 1, 90000 };
60 
61 #define PTS_TO_MFX_PTS(pts, pts_tb) ((pts) == AV_NOPTS_VALUE ? \
62  MFX_TIMESTAMP_UNKNOWN : pts_tb.num ? \
63  av_rescale_q(pts, pts_tb, mfx_tb) : pts)
64 
65 #define MFX_PTS_TO_PTS(mfx_pts, pts_tb) ((mfx_pts) == MFX_TIMESTAMP_UNKNOWN ? \
66  AV_NOPTS_VALUE : pts_tb.num ? \
67  av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
68 
69 typedef struct QSVAsyncFrame {
70  mfxSyncPoint *sync;
73 
74 typedef struct QSVContext {
75  // the session used for decoding
76  mfxSession session;
77  mfxVersion ver;
78 
79  // the session we allocated internally, in case the caller did not provide
80  // one
82 
84 
85  /**
86  * a linked list of frames currently being used by QSV
87  */
89 
93 
95  uint32_t fourcc;
96  mfxFrameInfo frame_info;
100 
101  // options set by the caller
104  int gpu_copy;
105 
107 
108  mfxExtBuffer **ext_buffers;
110 } QSVContext;
111 
112 static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
113  &(const AVCodecHWConfigInternal) {
114  .public = {
118  .device_type = AV_HWDEVICE_TYPE_QSV,
119  },
120  .hwaccel = NULL,
121  },
122  NULL
123 };
124 
126  AVBufferPool *pool)
127 {
128  int ret = 0;
129 
131 
132  frame->width = avctx->width;
133  frame->height = avctx->height;
134 
135  switch (avctx->pix_fmt) {
136  case AV_PIX_FMT_NV12:
137  frame->linesize[0] = FFALIGN(avctx->width, 128);
138  break;
139  case AV_PIX_FMT_P010:
140  case AV_PIX_FMT_YUYV422:
141  frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
142  break;
143  case AV_PIX_FMT_Y210:
144  case AV_PIX_FMT_VUYX:
145  frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
146  break;
147  default:
148  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
149  return AVERROR(EINVAL);
150  }
151 
152  frame->buf[0] = av_buffer_pool_get(pool);
153  if (!frame->buf[0])
154  return AVERROR(ENOMEM);
155 
156  frame->data[0] = frame->buf[0]->data;
157  if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
158  avctx->pix_fmt == AV_PIX_FMT_P010) {
159  frame->linesize[1] = frame->linesize[0];
160  frame->data[1] = frame->data[0] +
161  frame->linesize[0] * FFALIGN(avctx->height, 64);
162  }
163 
165  if (ret < 0)
166  return ret;
167 
168  return 0;
169 }
170 
171 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
172  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
173 {
174  int ret;
175 
176  if (q->gpu_copy == MFX_GPUCOPY_ON &&
177  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
178  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
179  "only works in system memory mode.\n");
180  q->gpu_copy = MFX_GPUCOPY_OFF;
181  }
182  if (session) {
183  q->session = session;
184  } else if (hw_frames_ref) {
185  if (q->internal_qs.session) {
186  MFXClose(q->internal_qs.session);
187  q->internal_qs.session = NULL;
188  }
190 
191  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
192  if (!q->frames_ctx.hw_frames_ctx)
193  return AVERROR(ENOMEM);
194 
196  &q->frames_ctx, q->load_plugins,
197 #if QSV_HAVE_OPAQUE
198  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
199 #else
200  0,
201 #endif
202  q->gpu_copy);
203  if (ret < 0) {
205  return ret;
206  }
207 
208  q->session = q->internal_qs.session;
209  } else if (hw_device_ref) {
210  if (q->internal_qs.session) {
211  MFXClose(q->internal_qs.session);
212  q->internal_qs.session = NULL;
213  }
214 
216  hw_device_ref, q->load_plugins, q->gpu_copy);
217  if (ret < 0)
218  return ret;
219 
220  q->session = q->internal_qs.session;
221  } else {
222  if (!q->internal_qs.session) {
224  q->load_plugins, q->gpu_copy);
225  if (ret < 0)
226  return ret;
227  }
228 
229  q->session = q->internal_qs.session;
230  }
231 
232  if (MFXQueryVersion(q->session, &q->ver) != MFX_ERR_NONE) {
233  av_log(avctx, AV_LOG_ERROR, "Error querying the session version. \n");
234  q->session = NULL;
235 
236  if (q->internal_qs.session) {
237  MFXClose(q->internal_qs.session);
238  q->internal_qs.session = NULL;
239  }
240 
241  if (q->internal_qs.loader) {
243  q->internal_qs.loader = NULL;
244  }
245 
246  return AVERROR_EXTERNAL;
247  }
248 
249  /* make sure the decoder is uninitialized */
250  MFXVideoDECODE_Close(q->session);
251 
252  return 0;
253 }
254 
255 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
256 {
257  mfxSession session = NULL;
258  int iopattern = 0;
259  int ret;
260  enum AVPixelFormat pix_fmts[3] = {
261  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
262  pix_fmt, /* system memory format obtained from bitstream parser */
263  AV_PIX_FMT_NONE };
264 
265  ret = ff_get_format(avctx, pix_fmts);
266  if (ret < 0) {
267  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
268  return ret;
269  }
270 
271  if (!q->async_fifo) {
272  q->async_fifo = av_fifo_alloc2(q->async_depth, sizeof(QSVAsyncFrame), 0);
273  if (!q->async_fifo)
274  return AVERROR(ENOMEM);
275  }
276 
277  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
278  AVQSVContext *user_ctx = avctx->hwaccel_context;
279  session = user_ctx->session;
280  iopattern = user_ctx->iopattern;
281  q->ext_buffers = user_ctx->ext_buffers;
282  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
283  }
284 
285  if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_QSV) {
286  AVHWFramesContext *hwframes_ctx;
287  AVQSVFramesContext *frames_hwctx;
288 
290 
291  if (!avctx->hw_frames_ctx) {
292  av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
293  return AVERROR(ENOMEM);
294  }
295 
296  hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
297  frames_hwctx = hwframes_ctx->hwctx;
298  hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
299  hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
300  hwframes_ctx->format = AV_PIX_FMT_QSV;
301  hwframes_ctx->sw_format = avctx->sw_pix_fmt;
302  hwframes_ctx->initial_pool_size = q->suggest_pool_size + 16 + avctx->extra_hw_frames;
303  frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
304 
306 
307  if (ret < 0) {
308  av_log(NULL, AV_LOG_ERROR, "Error initializing a QSV frame pool\n");
310  return ret;
311  }
312  }
313 
314  if (avctx->hw_frames_ctx) {
315  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
316  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
317 
318  if (!iopattern) {
319 #if QSV_HAVE_OPAQUE
320  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
321  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
322  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
323  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
324 #else
325  if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
326  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
327 #endif
328  }
329  }
330 
331  if (!iopattern)
332  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
333  q->iopattern = iopattern;
334 
335  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
336 
337  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
338  if (ret < 0) {
339  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
340  return ret;
341  }
342 
343  param->IOPattern = q->iopattern;
344  param->AsyncDepth = q->async_depth;
345  param->ExtParam = q->ext_buffers;
346  param->NumExtParam = q->nb_ext_buffers;
347 
348  return 0;
349  }
350 
351 static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
352 {
353  int ret;
354 
355  avctx->width = param->mfx.FrameInfo.CropW;
356  avctx->height = param->mfx.FrameInfo.CropH;
357  avctx->coded_width = param->mfx.FrameInfo.Width;
358  avctx->coded_height = param->mfx.FrameInfo.Height;
359  avctx->level = param->mfx.CodecLevel;
360  avctx->profile = param->mfx.CodecProfile;
361  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
362  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
363 
364  ret = MFXVideoDECODE_Init(q->session, param);
365  if (ret < 0)
366  return ff_qsv_print_error(avctx, ret,
367  "Error initializing the MFX video decoder");
368 
369  q->frame_info = param->mfx.FrameInfo;
370 
371  if (!avctx->hw_frames_ctx)
373  FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz);
374  return 0;
375 }
376 
378  const AVPacket *avpkt, enum AVPixelFormat pix_fmt,
379  mfxVideoParam *param)
380 {
381  int ret;
382  mfxExtVideoSignalInfo video_signal_info = { 0 };
383  mfxExtBuffer *header_ext_params[1] = { (mfxExtBuffer *)&video_signal_info };
384  mfxBitstream bs = { 0 };
385 
386  if (avpkt->size) {
387  bs.Data = avpkt->data;
388  bs.DataLength = avpkt->size;
389  bs.MaxLength = bs.DataLength;
390  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
391  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
392  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
393  } else
394  return AVERROR_INVALIDDATA;
395 
396 
397  if(!q->session) {
398  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
399  if (ret < 0)
400  return ret;
401  }
402 
404  if (ret < 0)
405  return ret;
406 
407  param->mfx.CodecId = ret;
408  video_signal_info.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
409  video_signal_info.Header.BufferSz = sizeof(video_signal_info);
410  // The SDK doesn't support other ext buffers when calling MFXVideoDECODE_DecodeHeader,
411  // so do not append this buffer to the existent buffer array
412  param->ExtParam = header_ext_params;
413  param->NumExtParam = 1;
414  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
415  if (MFX_ERR_MORE_DATA == ret) {
416  return AVERROR(EAGAIN);
417  }
418  if (ret < 0)
419  return ff_qsv_print_error(avctx, ret,
420  "Error decoding stream header");
421 
422  avctx->color_range = video_signal_info.VideoFullRange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
423 
424  if (video_signal_info.ColourDescriptionPresent) {
425  avctx->color_primaries = video_signal_info.ColourPrimaries;
426  avctx->color_trc = video_signal_info.TransferCharacteristics;
427  avctx->colorspace = video_signal_info.MatrixCoefficients;
428  }
429 
430  param->ExtParam = q->ext_buffers;
431  param->NumExtParam = q->nb_ext_buffers;
432 
433 #if QSV_VERSION_ATLEAST(1, 34)
434  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1)
435  param->mfx.FilmGrain = (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) ? 0 : param->mfx.FilmGrain;
436 #endif
437 
438  return 0;
439 }
440 
442 {
443  int ret;
444 
445  if (q->pool)
446  ret = qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
447  else
448  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
449 
450  if (ret < 0)
451  return ret;
452 
453  if (frame->frame->format == AV_PIX_FMT_QSV) {
454  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
455  } else {
456  ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
457  if (ret < 0) {
458  av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
459  return ret;
460  }
461  }
462 
463  frame->surface.Info = q->frame_info;
464 
465  if (q->frames_ctx.mids) {
467  if (ret < 0)
468  return ret;
469 
470  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
471  }
472 
473  frame->surface.Data.ExtParam = frame->ext_param;
474  frame->surface.Data.NumExtParam = 0;
475  frame->num_ext_params = 0;
476  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
477  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
478  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->dec_info);
479 #if QSV_VERSION_ATLEAST(1, 34)
480  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1) {
481  frame->av1_film_grain_param.Header.BufferId = MFX_EXTBUFF_AV1_FILM_GRAIN_PARAM;
482  frame->av1_film_grain_param.Header.BufferSz = sizeof(frame->av1_film_grain_param);
483  frame->av1_film_grain_param.FilmGrainFlags = 0;
484  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->av1_film_grain_param);
485  }
486 #endif
487 
488  frame->used = 1;
489 
490  return 0;
491 }
492 
494 {
495  QSVFrame *cur = q->work_frames;
496  while (cur) {
497  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
498  cur->used = 0;
499  av_frame_unref(cur->frame);
500  }
501  cur = cur->next;
502  }
503 }
504 
505 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
506 {
507  QSVFrame *frame, **last;
508  int ret;
509 
511 
512  frame = q->work_frames;
513  last = &q->work_frames;
514  while (frame) {
515  if (!frame->used) {
516  ret = alloc_frame(avctx, q, frame);
517  if (ret < 0)
518  return ret;
519  *surf = &frame->surface;
520  return 0;
521  }
522 
523  last = &frame->next;
524  frame = frame->next;
525  }
526 
527  frame = av_mallocz(sizeof(*frame));
528  if (!frame)
529  return AVERROR(ENOMEM);
530  frame->frame = av_frame_alloc();
531  if (!frame->frame) {
532  av_freep(&frame);
533  return AVERROR(ENOMEM);
534  }
535  *last = frame;
536 
537  ret = alloc_frame(avctx, q, frame);
538  if (ret < 0)
539  return ret;
540 
541  *surf = &frame->surface;
542 
543  return 0;
544 }
545 
546 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
547 {
548  QSVFrame *cur = q->work_frames;
549  while (cur) {
550  if (surf == &cur->surface)
551  return cur;
552  cur = cur->next;
553  }
554  return NULL;
555 }
556 
557 #if QSV_VERSION_ATLEAST(1, 34)
558 static int qsv_export_film_grain(AVCodecContext *avctx, mfxExtAV1FilmGrainParam *ext_param, AVFrame *frame)
559 {
560  AVFilmGrainParams *fgp;
562  int i;
563 
564  if (!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_APPLY))
565  return 0;
566 
568 
569  if (!fgp)
570  return AVERROR(ENOMEM);
571 
573  fgp->seed = ext_param->GrainSeed;
574  aom = &fgp->codec.aom;
575 
576  aom->chroma_scaling_from_luma = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CHROMA_SCALING_FROM_LUMA);
577  aom->scaling_shift = ext_param->GrainScalingMinus8 + 8;
578  aom->ar_coeff_lag = ext_param->ArCoeffLag;
579  aom->ar_coeff_shift = ext_param->ArCoeffShiftMinus6 + 6;
580  aom->grain_scale_shift = ext_param->GrainScaleShift;
581  aom->overlap_flag = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_OVERLAP);
582  aom->limit_output_range = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CLIP_TO_RESTRICTED_RANGE);
583 
584  aom->num_y_points = ext_param->NumYPoints;
585 
586  for (i = 0; i < aom->num_y_points; i++) {
587  aom->y_points[i][0] = ext_param->PointY[i].Value;
588  aom->y_points[i][1] = ext_param->PointY[i].Scaling;
589  }
590 
591  aom->num_uv_points[0] = ext_param->NumCbPoints;
592 
593  for (i = 0; i < aom->num_uv_points[0]; i++) {
594  aom->uv_points[0][i][0] = ext_param->PointCb[i].Value;
595  aom->uv_points[0][i][1] = ext_param->PointCb[i].Scaling;
596  }
597 
598  aom->num_uv_points[1] = ext_param->NumCrPoints;
599 
600  for (i = 0; i < aom->num_uv_points[1]; i++) {
601  aom->uv_points[1][i][0] = ext_param->PointCr[i].Value;
602  aom->uv_points[1][i][1] = ext_param->PointCr[i].Scaling;
603  }
604 
605  for (i = 0; i < 24; i++)
606  aom->ar_coeffs_y[i] = ext_param->ArCoeffsYPlus128[i] - 128;
607 
608  for (i = 0; i < 25; i++) {
609  aom->ar_coeffs_uv[0][i] = ext_param->ArCoeffsCbPlus128[i] - 128;
610  aom->ar_coeffs_uv[1][i] = ext_param->ArCoeffsCrPlus128[i] - 128;
611  }
612 
613  aom->uv_mult[0] = ext_param->CbMult;
614  aom->uv_mult[1] = ext_param->CrMult;
615  aom->uv_mult_luma[0] = ext_param->CbLumaMult;
616  aom->uv_mult_luma[1] = ext_param->CrLumaMult;
617  aom->uv_offset[0] = ext_param->CbOffset;
618  aom->uv_offset[1] = ext_param->CrOffset;
619 
620  return 0;
621 }
622 #endif
623 
624 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
625  AVFrame *frame, int *got_frame,
626  const AVPacket *avpkt)
627 {
628  mfxFrameSurface1 *insurf;
629  mfxFrameSurface1 *outsurf;
630  mfxSyncPoint *sync;
631  mfxBitstream bs = { { { 0 } } };
632  int ret;
633 
634  if (avpkt->size) {
635  bs.Data = avpkt->data;
636  bs.DataLength = avpkt->size;
637  bs.MaxLength = bs.DataLength;
638  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
639  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
640  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
641  }
642 
643  sync = av_mallocz(sizeof(*sync));
644  if (!sync) {
645  av_freep(&sync);
646  return AVERROR(ENOMEM);
647  }
648 
649  do {
650  ret = get_surface(avctx, q, &insurf);
651  if (ret < 0) {
652  av_freep(&sync);
653  return ret;
654  }
655 
656  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
657  insurf, &outsurf, sync);
658  if (ret == MFX_WRN_DEVICE_BUSY)
659  av_usleep(500);
660 
661  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
662 
663  if (ret == MFX_ERR_INCOMPATIBLE_VIDEO_PARAM) {
664  q->reinit_flag = 1;
665  av_log(avctx, AV_LOG_DEBUG, "Video parameter change\n");
666  av_freep(&sync);
667  return 0;
668  }
669 
670  if (ret != MFX_ERR_NONE &&
671  ret != MFX_ERR_MORE_DATA &&
672  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
673  ret != MFX_ERR_MORE_SURFACE) {
674  av_freep(&sync);
675  return ff_qsv_print_error(avctx, ret,
676  "Error during QSV decoding.");
677  }
678 
679  /* make sure we do not enter an infinite loop if the SDK
680  * did not consume any data and did not return anything */
681  if (!*sync && !bs.DataOffset) {
682  bs.DataOffset = avpkt->size;
683  ++q->zero_consume_run;
684  if (q->zero_consume_run > 1)
685  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
686  } else {
687  q->zero_consume_run = 0;
688  }
689 
690  if (*sync) {
691  QSVAsyncFrame aframe;
692  QSVFrame *out_frame = find_frame(q, outsurf);
693 
694  if (!out_frame) {
695  av_log(avctx, AV_LOG_ERROR,
696  "The returned surface does not correspond to any frame\n");
697  av_freep(&sync);
698  return AVERROR_BUG;
699  }
700 
701  out_frame->queued += 1;
702 
703  aframe = (QSVAsyncFrame){ sync, out_frame };
704  av_fifo_write(q->async_fifo, &aframe, 1);
705  } else {
706  av_freep(&sync);
707  }
708 
709  if ((av_fifo_can_read(q->async_fifo) >= q->async_depth) ||
710  (!avpkt->size && av_fifo_can_read(q->async_fifo))) {
711  QSVAsyncFrame aframe;
712  AVFrame *src_frame;
713 
714  av_fifo_read(q->async_fifo, &aframe, 1);
715  aframe.frame->queued -= 1;
716 
717  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
718  do {
719  ret = MFXVideoCORE_SyncOperation(q->session, *aframe.sync, 1000);
720  } while (ret == MFX_WRN_IN_EXECUTION);
721  }
722 
723  av_freep(&aframe.sync);
724 
725  src_frame = aframe.frame->frame;
726 
727  ret = av_frame_ref(frame, src_frame);
728  if (ret < 0)
729  return ret;
730 
731  outsurf = &aframe.frame->surface;
732 
733  frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
734 #if QSV_VERSION_ATLEAST(1, 34)
736  QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) &&
737  avctx->codec_id == AV_CODEC_ID_AV1) {
738  ret = qsv_export_film_grain(avctx, &aframe.frame->av1_film_grain_param, frame);
739 
740  if (ret < 0)
741  return ret;
742  }
743 #endif
744 
745  frame->repeat_pict =
746  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
747  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
748  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
749  frame->top_field_first =
750  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
751  frame->interlaced_frame =
752  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
753  frame->pict_type = ff_qsv_map_pictype(aframe.frame->dec_info.FrameType);
754  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
755  if (avctx->codec_id == AV_CODEC_ID_H264)
756  frame->key_frame = !!(aframe.frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
757 
758  /* update the surface properties */
759  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
760  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
761 
762  *got_frame = 1;
763  }
764 
765  return bs.DataOffset;
766 }
767 
769 {
770  QSVFrame *cur = q->work_frames;
771 
772  if (q->session)
773  MFXVideoDECODE_Close(q->session);
774 
775  if (q->async_fifo) {
776  QSVAsyncFrame aframe;
777  while (av_fifo_read(q->async_fifo, &aframe, 1) >= 0)
778  av_freep(&aframe.sync);
780  }
781 
782  while (cur) {
783  q->work_frames = cur->next;
784  av_frame_free(&cur->frame);
785  av_freep(&cur);
786  cur = q->work_frames;
787  }
788 
790 
794 }
795 
797  AVFrame *frame, int *got_frame, const AVPacket *pkt)
798 {
799  int ret;
800  mfxVideoParam param = { 0 };
802 
803  if (!pkt->size)
804  return qsv_decode(avctx, q, frame, got_frame, pkt);
805 
806  /* TODO: flush delayed frames on reinit */
807 
808  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
809  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
810  // the assumption may be not corret but will be updated after header decoded if not true.
811  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
812  pix_fmt = q->orig_pix_fmt;
813  if (!avctx->coded_width)
814  avctx->coded_width = 1280;
815  if (!avctx->coded_height)
816  avctx->coded_height = 720;
817 
818  /* decode zero-size pkt to flush the buffered pkt before reinit */
819  if (q->reinit_flag) {
820  AVPacket zero_pkt = {0};
821  ret = qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
822  if (ret < 0 || *got_frame)
823  return ret;
824  }
825 
826  if (q->reinit_flag || !q->session || !q->initialized) {
827  mfxFrameAllocRequest request;
828  memset(&request, 0, sizeof(request));
829 
830  q->reinit_flag = 0;
831  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
832  if (ret < 0) {
833  if (ret == AVERROR(EAGAIN))
834  av_log(avctx, AV_LOG_INFO, "More data is required to decode header\n");
835  else
836  av_log(avctx, AV_LOG_ERROR, "Error decoding header\n");
837  goto reinit_fail;
838  }
839  param.IOPattern = q->iopattern;
840 
841  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
842 
843  avctx->coded_width = param.mfx.FrameInfo.Width;
844  avctx->coded_height = param.mfx.FrameInfo.Height;
845 
846  ret = MFXVideoDECODE_QueryIOSurf(q->session, &param, &request);
847  if (ret < 0)
848  return ff_qsv_print_error(avctx, ret, "Error querying IO surface");
849 
850  q->suggest_pool_size = request.NumFrameSuggested;
851 
852  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
853  if (ret < 0)
854  goto reinit_fail;
855  q->initialized = 0;
856  }
857 
858  if (!q->initialized) {
859  ret = qsv_decode_init_context(avctx, q, &param);
860  if (ret < 0)
861  goto reinit_fail;
862  q->initialized = 1;
863  }
864 
865  return qsv_decode(avctx, q, frame, got_frame, pkt);
866 
867 reinit_fail:
868  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
869  return ret;
870 }
871 
876 };
877 
878 typedef struct QSVDecContext {
879  AVClass *class;
881 
883 
885 
887 } QSVDecContext;
888 
890 {
891  AVPacket pkt;
892  while (av_fifo_read(s->packet_fifo, &pkt, 1) >= 0)
894 
895  av_packet_unref(&s->buffer_pkt);
896 }
897 
899 {
900  QSVDecContext *s = avctx->priv_data;
901 
903 
905 
906  av_fifo_freep2(&s->packet_fifo);
907 
908  return 0;
909 }
910 
912 {
913  QSVDecContext *s = avctx->priv_data;
914  int ret;
915  const char *uid = NULL;
916 
917  if (avctx->codec_id == AV_CODEC_ID_VP8) {
918  uid = "f622394d8d87452f878c51f2fc9b4131";
919  } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
920  uid = "a922394d8d87452f878c51f2fc9b4131";
921  }
922  else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) {
923  static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6";
924  static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e";
925 
926  if (s->qsv.load_plugins[0]) {
927  av_log(avctx, AV_LOG_WARNING,
928  "load_plugins is not empty, but load_plugin is not set to 'none'."
929  "The load_plugin value will be ignored.\n");
930  } else {
931  if (s->load_plugin == LOAD_PLUGIN_HEVC_SW)
932  uid = uid_hevcdec_sw;
933  else
934  uid = uid_hevcdec_hw;
935  }
936  }
937  if (uid) {
938  av_freep(&s->qsv.load_plugins);
939  s->qsv.load_plugins = av_strdup(uid);
940  if (!s->qsv.load_plugins)
941  return AVERROR(ENOMEM);
942  }
943 
944  s->qsv.orig_pix_fmt = AV_PIX_FMT_NV12;
945  s->packet_fifo = av_fifo_alloc2(1, sizeof(AVPacket),
947  if (!s->packet_fifo) {
948  ret = AVERROR(ENOMEM);
949  goto fail;
950  }
951 
952  if (!avctx->pkt_timebase.num)
953  av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
954 
955  return 0;
956 fail:
957  qsv_decode_close(avctx);
958  return ret;
959 }
960 
962  int *got_frame, AVPacket *avpkt)
963 {
964  QSVDecContext *s = avctx->priv_data;
965  int ret;
966 
967  /* buffer the input packet */
968  if (avpkt->size) {
969  AVPacket input_ref;
970 
971  ret = av_packet_ref(&input_ref, avpkt);
972  if (ret < 0)
973  return ret;
974  av_fifo_write(s->packet_fifo, &input_ref, 1);
975  }
976 
977  /* process buffered data */
978  while (!*got_frame) {
979  /* prepare the input data */
980  if (s->buffer_pkt.size <= 0) {
981  /* no more data */
982  if (!av_fifo_can_read(s->packet_fifo))
983  return avpkt->size ? avpkt->size : qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt);
984  /* in progress of reinit, no read from fifo and keep the buffer_pkt */
985  if (!s->qsv.reinit_flag) {
986  av_packet_unref(&s->buffer_pkt);
987  av_fifo_read(s->packet_fifo, &s->buffer_pkt, 1);
988  }
989  }
990 
991  ret = qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt);
992  if (ret < 0){
993  /* Drop buffer_pkt when failed to decode the packet. Otherwise,
994  the decoder will keep decoding the failure packet. */
995  av_packet_unref(&s->buffer_pkt);
996  return ret;
997  }
998  if (s->qsv.reinit_flag)
999  continue;
1000 
1001  s->buffer_pkt.size -= ret;
1002  s->buffer_pkt.data += ret;
1003  }
1004 
1005  return avpkt->size;
1006 }
1007 
1009 {
1010  QSVDecContext *s = avctx->priv_data;
1011 
1013 
1014  s->qsv.orig_pix_fmt = AV_PIX_FMT_NONE;
1015  s->qsv.initialized = 0;
1016 }
1017 
1018 #define OFFSET(x) offsetof(QSVDecContext, x)
1019 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1020 
1021 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
1022 static const AVClass x##_qsv_class = { \
1023  .class_name = #x "_qsv", \
1024  .item_name = av_default_item_name, \
1025  .option = opt, \
1026  .version = LIBAVUTIL_VERSION_INT, \
1027 }; \
1028 const FFCodec ff_##x##_qsv_decoder = { \
1029  .p.name = #x "_qsv", \
1030  CODEC_LONG_NAME(#X " video (Intel Quick Sync Video acceleration)"), \
1031  .priv_data_size = sizeof(QSVDecContext), \
1032  .p.type = AVMEDIA_TYPE_VIDEO, \
1033  .p.id = AV_CODEC_ID_##X, \
1034  .init = qsv_decode_init, \
1035  FF_CODEC_DECODE_CB(qsv_decode_frame), \
1036  .flush = qsv_decode_flush, \
1037  .close = qsv_decode_close, \
1038  .bsfs = bsf_name, \
1039  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
1040  .p.priv_class = &x##_qsv_class, \
1041  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
1042  AV_PIX_FMT_P010, \
1043  AV_PIX_FMT_YUYV422, \
1044  AV_PIX_FMT_Y210, \
1045  AV_PIX_FMT_VUYX, \
1046  AV_PIX_FMT_QSV, \
1047  AV_PIX_FMT_NONE }, \
1048  .hw_configs = qsv_hw_configs, \
1049  .p.wrapper_name = "qsv", \
1050  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \
1051 }; \
1052 
1053 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
1054 
1055 #if CONFIG_HEVC_QSV_DECODER
1056 static const AVOption hevc_options[] = {
1057  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1058 
1059  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, "load_plugin" },
1060  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, "load_plugin" },
1061  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, "load_plugin" },
1062  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, "load_plugin" },
1063 
1064  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
1065  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
1066 
1067  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
1068  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
1069  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
1070  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
1071  { NULL },
1072 };
1073 DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options)
1074 #endif
1075 
1076 static const AVOption options[] = {
1077  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1078 
1079  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
1080  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
1081  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
1082  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
1083  { NULL },
1084 };
1085 
1086 #if CONFIG_H264_QSV_DECODER
1087 DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb")
1088 #endif
1089 
1090 #if CONFIG_MPEG2_QSV_DECODER
1091 DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL)
1092 #endif
1093 
1094 #if CONFIG_VC1_QSV_DECODER
1095 DEFINE_QSV_DECODER(vc1, VC1, NULL)
1096 #endif
1097 
1098 #if CONFIG_MJPEG_QSV_DECODER
1099 DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL)
1100 #endif
1101 
1102 #if CONFIG_VP8_QSV_DECODER
1103 DEFINE_QSV_DECODER(vp8, VP8, NULL)
1104 #endif
1105 
1106 #if CONFIG_VP9_QSV_DECODER
1107 DEFINE_QSV_DECODER(vp9, VP9, NULL)
1108 #endif
1109 
1110 #if CONFIG_AV1_QSV_DECODER
1111 DEFINE_QSV_DECODER(av1, AV1, NULL)
1112 #endif
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:60
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1393
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uid
UID uid
Definition: mxfenc.c:2201
opt.h
qsv_process_data
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: qsvdec.c:796
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:975
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1147
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:110
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:88
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
AVFilmGrainAOMParams::uv_points
uint8_t uv_points[2][10][2]
Definition: film_grain_params.h:63
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
AVFilmGrainParams::aom
AVFilmGrainAOMParams aom
Definition: film_grain_params.h:236
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
ff_qsv_close_internal_session
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:1070
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:333
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:968
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:630
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1004
AVOption
AVOption.
Definition: opt.h:251
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:302
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvdec.c:873
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.c:88
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvdec.c:875
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:81
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
QSVFrame::used
int used
Definition: qsv_internal.h:95
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:946
ff_qsv_map_frame_to_surface
int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
Definition: qsv.c:256
fifo.h
QSVContext::suggest_pool_size
int suggest_pool_size
Definition: qsvdec.c:98
DEFINE_QSV_DECODER
#define DEFINE_QSV_DECODER(x, X, bsf_name)
Definition: qsvdec.c:1053
fail
#define fail()
Definition: checkasm.h:133
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AVFilmGrainAOMParams::grain_scale_shift
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
Definition: film_grain_params.h:99
QSVDecContext::qsv
QSVContext qsv
Definition: qsvdec.c:880
AVFilmGrainParams::codec
union AVFilmGrainParams::@310 codec
Additional fields may be added both here and in any structure included.
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
AVFilmGrainAOMParams::limit_output_range
int limit_output_range
Signals to clip to limited color levels after film grain application.
Definition: film_grain_params.h:122
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvdec.c:874
options
static const AVOption options[]
Definition: qsvdec.c:1076
DEFINE_QSV_DECODER_WITH_OPTION
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
Definition: qsvdec.c:1021
AVFilmGrainAOMParams::num_y_points
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
Definition: film_grain_params.h:49
mfx_tb
static const AVRational mfx_tb
Definition: qsvdec.c:59
AVRational::num
int num
Numerator.
Definition: rational.h:59
QSVDecContext::packet_fifo
AVFifo * packet_fifo
Definition: qsvdec.c:884
QSVContext::async_fifo
AVFifo * async_fifo
Definition: qsvdec.c:90
QSVContext
Definition: qsvdec.c:74
qsv_internal.h
AVFilmGrainAOMParams
This structure describes how to handle film grain synthesis for AOM codecs.
Definition: film_grain_params.h:44
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:546
qsv_decode_header
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, const AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:377
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:484
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:961
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:194
ASYNC_DEPTH_DEFAULT
#define ASYNC_DEPTH_DEFAULT
Definition: qsv_internal.h:51
film_grain_params.h
av_cold
#define av_cold
Definition: attributes.h:90
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
qsv_decode_frame
static int qsv_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:961
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
QSVDecContext
Definition: qsvdec.c:878
QSVContext::iopattern
int iopattern
Definition: qsvdec.c:103
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
qsv_decode_init
static av_cold int qsv_decode_init(AVCodecContext *avctx)
Definition: qsvdec.c:911
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:339
s
#define s(width, name)
Definition: cbs_vp9.c:256
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2773
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.c:92
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.c:83
QSVContext::internal_qs
QSVSession internal_qs
Definition: qsvdec.c:81
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:376
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
QSVContext::ver
mfxVersion ver
Definition: qsvdec.c:77
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:64
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:367
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:408
if
if(ret)
Definition: filter_design.txt:179
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:1023
QSVFrame
Definition: qsv_internal.h:80
AVFilmGrainAOMParams::uv_mult_luma
int uv_mult_luma[2]
Definition: film_grain_params.h:106
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:982
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:283
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
QSV_HAVE_OPAQUE
#define QSV_HAVE_OPAQUE
Definition: qsv_internal.h:69
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_qsv_print_iopattern
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:100
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
av_fifo_can_read
size_t av_fifo_can_read(const AVFifo *f)
Definition: fifo.c:87
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.c:109
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:82
time.h
QSVFramesContext::mids_buf
AVBufferRef * mids_buf
Definition: qsv_internal.h:117
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:212
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:430
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.c:106
AVCodecContext::level
int level
level
Definition: avcodec.h:1676
QSVContext::initialized
int initialized
Definition: qsvdec.c:99
qsv_clear_buffers
static void qsv_clear_buffers(QSVDecContext *s)
Definition: qsvdec.c:889
QSVContext::fourcc
uint32_t fourcc
Definition: qsvdec.c:95
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.c:108
AVFilmGrainAOMParams::num_uv_points
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
Definition: film_grain_params.h:62
PTS_TO_MFX_PTS
#define PTS_TO_MFX_PTS(pts, pts_tb)
Definition: qsvdec.c:61
qsv_decode_close_qsvcontext
static void qsv_decode_close_qsvcontext(QSVContext *q)
Definition: qsvdec.c:768
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.c:96
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1450
AVPacket::size
int size
Definition: packet.h:375
AVFifo
Definition: fifo.c:35
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
AVCodecContext::extra_hw_frames
int extra_hw_frames
Definition: avcodec.h:1983
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
LoadPlugin
LoadPlugin
Definition: qsvdec.c:872
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:1749
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:441
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *avpkt)
Definition: qsvdec.c:624
AVCodecHWConfigInternal
Definition: hwconfig.h:29
qsv_decode_close
static av_cold int qsv_decode_close(AVCodecContext *avctx)
Definition: qsvdec.c:898
frame.h
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:505
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVFilmGrainAOMParams::ar_coeffs_y
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
Definition: film_grain_params.h:80
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
QSVFramesContext::mids
QSVMid * mids
Definition: qsv_internal.h:118
QSVAsyncFrame::frame
QSVFrame * frame
Definition: qsvdec.c:71
hwcontext_qsv.h
QSVContext::pool
AVBufferPool * pool
Definition: qsvdec.c:97
log.h
ff_qsv_map_picstruct
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:315
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
QSVDecContext::buffer_pkt
AVPacket buffer_pkt
Definition: qsvdec.c:886
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.c:76
qsv_decode_preinit
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:255
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVFilmGrainAOMParams::scaling_shift
int scaling_shift
Specifies the shift applied to the chroma components.
Definition: film_grain_params.h:69
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1933
AVCodecContext::height
int height
Definition: avcodec.h:571
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:613
QSVDecContext::load_plugin
int load_plugin
Definition: qsvdec.c:882
OFFSET
#define OFFSET(x)
Definition: qsvdec.c:1018
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1883
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:171
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
QSVFrame::queued
int queued
Definition: qsv_internal.h:94
QSVContext::async_depth
int async_depth
Definition: qsvdec.c:102
MFX_PTS_TO_PTS
#define MFX_PTS_TO_PTS(mfx_pts, pts_tb)
Definition: qsvdec.c:65
QSVSession
Definition: qsv_internal.h:100
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:162
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:54
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.c:91
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1294
AVCodecContext
main external API structure.
Definition: avcodec.h:398
AVFilmGrainAOMParams::ar_coeff_lag
int ar_coeff_lag
Specifies the auto-regression lag.
Definition: film_grain_params.h:74
MFXUnload
#define MFXUnload(a)
Definition: qsvdec.c:56
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.c:94
AVFilmGrainAOMParams::y_points
uint8_t y_points[14][2]
Definition: film_grain_params.h:50
AVFilmGrainAOMParams::uv_offset
int uv_offset[2]
Offset used for component scaling function.
Definition: film_grain_params.h:112
qsv_decode_init_context
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:351
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1550
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
QSVSession::session
mfxSession session
Definition: qsv_internal.h:101
ff_qsv_map_fourcc
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:203
AVFilmGrainAOMParams::uv_mult
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
Definition: film_grain_params.h:105
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2009
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:480
qsv_get_continuous_buffer
static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:125
AVFilmGrainAOMParams::overlap_flag
int overlap_flag
Signals whether to overlap film grain blocks.
Definition: film_grain_params.h:117
VD
#define VD
Definition: qsvdec.c:1019
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:53
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:586
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:280
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:84
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1426
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:39
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
QSVAsyncFrame::sync
mfxSyncPoint * sync
Definition: qsvdec.c:70
QSVFramesContext
Definition: qsv_internal.h:109
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
AVPacket
This structure stores compressed data.
Definition: packet.h:351
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:493
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:571
imgutils.h
AV_CODEC_ID_VP8
@ AV_CODEC_ID_VP8
Definition: codec_id.h:192
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:326
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
QSVContext::gpu_copy
int gpu_copy
Definition: qsvdec.c:104
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1742
QSVAsyncFrame
Definition: qsvdec.c:69
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
AVFilmGrainAOMParams::chroma_scaling_from_luma
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
Definition: film_grain_params.h:56
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:375
QSVSession::loader
void * loader
Definition: qsv_internal.h:106
ff_qsv_frame_add_ext_param
void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame, mfxExtBuffer *param)
Definition: qsv.c:1088
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
AV_FILM_GRAIN_PARAMS_AV1
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
Definition: film_grain_params.h:30
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:97
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:185
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:220
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:635
qsv_decode_flush
static void qsv_decode_flush(AVCodecContext *avctx)
Definition: qsvdec.c:1008
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:371
qsv_hw_configs
static const AVCodecHWConfigInternal *const qsv_hw_configs[]
Definition: qsvdec.c:112
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
AVFilmGrainAOMParams::ar_coeff_shift
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
Definition: film_grain_params.h:93
AVFilmGrainAOMParams::ar_coeffs_uv
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
Definition: film_grain_params.h:86