FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include <stdint.h>
27 #include <string.h>
28 #include <sys/types.h>
29 
30 #include <mfx/mfxvideo.h>
31 
32 #include "libavutil/common.h"
33 #include "libavutil/fifo.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
37 #include "libavutil/mem.h"
38 #include "libavutil/log.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/pixfmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/imgutils.h"
44 
45 #include "avcodec.h"
46 #include "codec_internal.h"
47 #include "internal.h"
48 #include "decode.h"
49 #include "hwconfig.h"
50 #include "qsv.h"
51 #include "qsv_internal.h"
52 
53 static const AVRational mfx_tb = { 1, 90000 };
54 
55 #define PTS_TO_MFX_PTS(pts, pts_tb) ((pts) == AV_NOPTS_VALUE ? \
56  MFX_TIMESTAMP_UNKNOWN : pts_tb.num ? \
57  av_rescale_q(pts, pts_tb, mfx_tb) : pts)
58 
59 #define MFX_PTS_TO_PTS(mfx_pts, pts_tb) ((mfx_pts) == MFX_TIMESTAMP_UNKNOWN ? \
60  AV_NOPTS_VALUE : pts_tb.num ? \
61  av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
62 
63 typedef struct QSVAsyncFrame {
64  mfxSyncPoint *sync;
67 
68 typedef struct QSVContext {
69  // the session used for decoding
70  mfxSession session;
71  mfxVersion ver;
72 
73  // the session we allocated internally, in case the caller did not provide
74  // one
76 
78 
79  /**
80  * a linked list of frames currently being used by QSV
81  */
83 
87 
89  uint32_t fourcc;
90  mfxFrameInfo frame_info;
94 
95  // options set by the caller
97  int iopattern;
98  int gpu_copy;
99 
101 
102  mfxExtBuffer **ext_buffers;
104 } QSVContext;
105 
106 static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
107  &(const AVCodecHWConfigInternal) {
108  .public = {
112  .device_type = AV_HWDEVICE_TYPE_QSV,
113  },
114  .hwaccel = NULL,
115  },
116  NULL
117 };
118 
120  AVBufferPool *pool)
121 {
122  int ret = 0;
123 
125 
126  frame->width = avctx->width;
127  frame->height = avctx->height;
128 
129  switch (avctx->pix_fmt) {
130  case AV_PIX_FMT_NV12:
131  frame->linesize[0] = FFALIGN(avctx->width, 128);
132  break;
133  case AV_PIX_FMT_P010:
134  case AV_PIX_FMT_YUYV422:
135  frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
136  break;
137  case AV_PIX_FMT_Y210:
138  frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
139  break;
140  default:
141  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
142  return AVERROR(EINVAL);
143  }
144 
145  frame->buf[0] = av_buffer_pool_get(pool);
146  if (!frame->buf[0])
147  return AVERROR(ENOMEM);
148 
149  frame->data[0] = frame->buf[0]->data;
150  if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
151  avctx->pix_fmt == AV_PIX_FMT_P010) {
152  frame->linesize[1] = frame->linesize[0];
153  frame->data[1] = frame->data[0] +
154  frame->linesize[0] * FFALIGN(avctx->height, 64);
155  }
156 
158  if (ret < 0)
159  return ret;
160 
161  return 0;
162 }
163 
164 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
165  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
166 {
167  int ret;
168 
169  if (q->gpu_copy == MFX_GPUCOPY_ON &&
170  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
171  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
172  "only works in system memory mode.\n");
173  q->gpu_copy = MFX_GPUCOPY_OFF;
174  }
175  if (session) {
176  q->session = session;
177  } else if (hw_frames_ref) {
178  if (q->internal_qs.session) {
179  MFXClose(q->internal_qs.session);
180  q->internal_qs.session = NULL;
181  }
183 
184  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
185  if (!q->frames_ctx.hw_frames_ctx)
186  return AVERROR(ENOMEM);
187 
189  &q->frames_ctx, q->load_plugins,
190  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
191  q->gpu_copy);
192  if (ret < 0) {
194  return ret;
195  }
196 
197  q->session = q->internal_qs.session;
198  } else if (hw_device_ref) {
199  if (q->internal_qs.session) {
200  MFXClose(q->internal_qs.session);
201  q->internal_qs.session = NULL;
202  }
203 
205  hw_device_ref, q->load_plugins, q->gpu_copy);
206  if (ret < 0)
207  return ret;
208 
209  q->session = q->internal_qs.session;
210  } else {
211  if (!q->internal_qs.session) {
213  q->load_plugins, q->gpu_copy);
214  if (ret < 0)
215  return ret;
216  }
217 
218  q->session = q->internal_qs.session;
219  }
220 
221  if (MFXQueryVersion(q->session, &q->ver) != MFX_ERR_NONE) {
222  av_log(avctx, AV_LOG_ERROR, "Error querying the session version. \n");
223  q->session = NULL;
224 
225  if (q->internal_qs.session) {
226  MFXClose(q->internal_qs.session);
227  q->internal_qs.session = NULL;
228  }
229 
230  return AVERROR_EXTERNAL;
231  }
232 
233  /* make sure the decoder is uninitialized */
234  MFXVideoDECODE_Close(q->session);
235 
236  return 0;
237 }
238 
239 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
240 {
241  mfxSession session = NULL;
242  int iopattern = 0;
243  int ret;
244  enum AVPixelFormat pix_fmts[3] = {
245  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
246  pix_fmt, /* system memory format obtained from bitstream parser */
247  AV_PIX_FMT_NONE };
248 
249  ret = ff_get_format(avctx, pix_fmts);
250  if (ret < 0) {
251  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
252  return ret;
253  }
254 
255  if (!q->async_fifo) {
256  q->async_fifo = av_fifo_alloc2(q->async_depth, sizeof(QSVAsyncFrame), 0);
257  if (!q->async_fifo)
258  return AVERROR(ENOMEM);
259  }
260 
261  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
262  AVQSVContext *user_ctx = avctx->hwaccel_context;
263  session = user_ctx->session;
264  iopattern = user_ctx->iopattern;
265  q->ext_buffers = user_ctx->ext_buffers;
266  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
267  }
268 
269  if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_QSV) {
270  AVHWFramesContext *hwframes_ctx;
271  AVQSVFramesContext *frames_hwctx;
272 
274 
275  if (!avctx->hw_frames_ctx) {
276  av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
277  return AVERROR(ENOMEM);
278  }
279 
280  hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
281  frames_hwctx = hwframes_ctx->hwctx;
282  hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
283  hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
284  hwframes_ctx->format = AV_PIX_FMT_QSV;
285  hwframes_ctx->sw_format = avctx->sw_pix_fmt;
286  hwframes_ctx->initial_pool_size = q->suggest_pool_size + 16 + avctx->extra_hw_frames;
287  frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
288 
290 
291  if (ret < 0) {
292  av_log(NULL, AV_LOG_ERROR, "Error initializing a QSV frame pool\n");
294  return ret;
295  }
296  }
297 
298  if (avctx->hw_frames_ctx) {
299  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
300  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
301 
302  if (!iopattern) {
303  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
304  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
305  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
306  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
307  }
308  }
309 
310  if (!iopattern)
311  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
312  q->iopattern = iopattern;
313 
314  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
315 
316  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
317  if (ret < 0) {
318  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
319  return ret;
320  }
321 
322  param->IOPattern = q->iopattern;
323  param->AsyncDepth = q->async_depth;
324  param->ExtParam = q->ext_buffers;
325  param->NumExtParam = q->nb_ext_buffers;
326 
327  return 0;
328  }
329 
330 static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
331 {
332  int ret;
333 
334  avctx->width = param->mfx.FrameInfo.CropW;
335  avctx->height = param->mfx.FrameInfo.CropH;
336  avctx->coded_width = param->mfx.FrameInfo.Width;
337  avctx->coded_height = param->mfx.FrameInfo.Height;
338  avctx->level = param->mfx.CodecLevel;
339  avctx->profile = param->mfx.CodecProfile;
340  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
341  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
342 
343  ret = MFXVideoDECODE_Init(q->session, param);
344  if (ret < 0)
345  return ff_qsv_print_error(avctx, ret,
346  "Error initializing the MFX video decoder");
347 
348  q->frame_info = param->mfx.FrameInfo;
349 
350  if (!avctx->hw_frames_ctx)
352  FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz);
353  return 0;
354 }
355 
357  const AVPacket *avpkt, enum AVPixelFormat pix_fmt,
358  mfxVideoParam *param)
359 {
360  int ret;
361  mfxExtVideoSignalInfo video_signal_info = { 0 };
362  mfxExtBuffer *header_ext_params[1] = { (mfxExtBuffer *)&video_signal_info };
363  mfxBitstream bs = { 0 };
364 
365  if (avpkt->size) {
366  bs.Data = avpkt->data;
367  bs.DataLength = avpkt->size;
368  bs.MaxLength = bs.DataLength;
369  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
370  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
371  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
372  } else
373  return AVERROR_INVALIDDATA;
374 
375 
376  if(!q->session) {
377  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
378  if (ret < 0)
379  return ret;
380  }
381 
383  if (ret < 0)
384  return ret;
385 
386  param->mfx.CodecId = ret;
387  video_signal_info.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
388  video_signal_info.Header.BufferSz = sizeof(video_signal_info);
389  // The SDK doesn't support other ext buffers when calling MFXVideoDECODE_DecodeHeader,
390  // so do not append this buffer to the existent buffer array
391  param->ExtParam = header_ext_params;
392  param->NumExtParam = 1;
393  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
394  if (MFX_ERR_MORE_DATA == ret) {
395  return AVERROR(EAGAIN);
396  }
397  if (ret < 0)
398  return ff_qsv_print_error(avctx, ret,
399  "Error decoding stream header");
400 
401  avctx->color_range = video_signal_info.VideoFullRange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
402 
403  if (video_signal_info.ColourDescriptionPresent) {
404  avctx->color_primaries = video_signal_info.ColourPrimaries;
405  avctx->color_trc = video_signal_info.TransferCharacteristics;
406  avctx->colorspace = video_signal_info.MatrixCoefficients;
407  }
408 
409  param->ExtParam = q->ext_buffers;
410  param->NumExtParam = q->nb_ext_buffers;
411 
412 #if QSV_VERSION_ATLEAST(1, 34)
413  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1)
414  param->mfx.FilmGrain = (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) ? 0 : param->mfx.FilmGrain;
415 #endif
416 
417  return 0;
418 }
419 
421 {
422  int ret;
423 
424  if (q->pool)
425  ret = qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
426  else
427  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
428 
429  if (ret < 0)
430  return ret;
431 
432  if (frame->frame->format == AV_PIX_FMT_QSV) {
433  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
434  } else {
435  ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
436  if (ret < 0) {
437  av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
438  return ret;
439  }
440  }
441 
442  frame->surface.Info = q->frame_info;
443 
444  if (q->frames_ctx.mids) {
446  if (ret < 0)
447  return ret;
448 
449  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
450  }
451 
452  frame->surface.Data.ExtParam = frame->ext_param;
453  frame->surface.Data.NumExtParam = 0;
454  frame->num_ext_params = 0;
455  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
456  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
457  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->dec_info);
458 #if QSV_VERSION_ATLEAST(1, 34)
459  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1) {
460  frame->av1_film_grain_param.Header.BufferId = MFX_EXTBUFF_AV1_FILM_GRAIN_PARAM;
461  frame->av1_film_grain_param.Header.BufferSz = sizeof(frame->av1_film_grain_param);
462  frame->av1_film_grain_param.FilmGrainFlags = 0;
463  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->av1_film_grain_param);
464  }
465 #endif
466 
467  frame->used = 1;
468 
469  return 0;
470 }
471 
473 {
474  QSVFrame *cur = q->work_frames;
475  while (cur) {
476  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
477  cur->used = 0;
478  av_frame_unref(cur->frame);
479  }
480  cur = cur->next;
481  }
482 }
483 
484 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
485 {
486  QSVFrame *frame, **last;
487  int ret;
488 
490 
491  frame = q->work_frames;
492  last = &q->work_frames;
493  while (frame) {
494  if (!frame->used) {
495  ret = alloc_frame(avctx, q, frame);
496  if (ret < 0)
497  return ret;
498  *surf = &frame->surface;
499  return 0;
500  }
501 
502  last = &frame->next;
503  frame = frame->next;
504  }
505 
506  frame = av_mallocz(sizeof(*frame));
507  if (!frame)
508  return AVERROR(ENOMEM);
509  frame->frame = av_frame_alloc();
510  if (!frame->frame) {
511  av_freep(&frame);
512  return AVERROR(ENOMEM);
513  }
514  *last = frame;
515 
516  ret = alloc_frame(avctx, q, frame);
517  if (ret < 0)
518  return ret;
519 
520  *surf = &frame->surface;
521 
522  return 0;
523 }
524 
525 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
526 {
527  QSVFrame *cur = q->work_frames;
528  while (cur) {
529  if (surf == &cur->surface)
530  return cur;
531  cur = cur->next;
532  }
533  return NULL;
534 }
535 
536 #if QSV_VERSION_ATLEAST(1, 34)
537 static int qsv_export_film_grain(AVCodecContext *avctx, mfxExtAV1FilmGrainParam *ext_param, AVFrame *frame)
538 {
539  AVFilmGrainParams *fgp;
541  int i;
542 
543  if (!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_APPLY))
544  return 0;
545 
547 
548  if (!fgp)
549  return AVERROR(ENOMEM);
550 
552  fgp->seed = ext_param->GrainSeed;
553  aom = &fgp->codec.aom;
554 
555  aom->chroma_scaling_from_luma = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CHROMA_SCALING_FROM_LUMA);
556  aom->scaling_shift = ext_param->GrainScalingMinus8 + 8;
557  aom->ar_coeff_lag = ext_param->ArCoeffLag;
558  aom->ar_coeff_shift = ext_param->ArCoeffShiftMinus6 + 6;
559  aom->grain_scale_shift = ext_param->GrainScaleShift;
560  aom->overlap_flag = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_OVERLAP);
561  aom->limit_output_range = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CLIP_TO_RESTRICTED_RANGE);
562 
563  aom->num_y_points = ext_param->NumYPoints;
564 
565  for (i = 0; i < aom->num_y_points; i++) {
566  aom->y_points[i][0] = ext_param->PointY[i].Value;
567  aom->y_points[i][1] = ext_param->PointY[i].Scaling;
568  }
569 
570  aom->num_uv_points[0] = ext_param->NumCbPoints;
571 
572  for (i = 0; i < aom->num_uv_points[0]; i++) {
573  aom->uv_points[0][i][0] = ext_param->PointCb[i].Value;
574  aom->uv_points[0][i][1] = ext_param->PointCb[i].Scaling;
575  }
576 
577  aom->num_uv_points[1] = ext_param->NumCrPoints;
578 
579  for (i = 0; i < aom->num_uv_points[1]; i++) {
580  aom->uv_points[1][i][0] = ext_param->PointCr[i].Value;
581  aom->uv_points[1][i][1] = ext_param->PointCr[i].Scaling;
582  }
583 
584  for (i = 0; i < 24; i++)
585  aom->ar_coeffs_y[i] = ext_param->ArCoeffsYPlus128[i] - 128;
586 
587  for (i = 0; i < 25; i++) {
588  aom->ar_coeffs_uv[0][i] = ext_param->ArCoeffsCbPlus128[i] - 128;
589  aom->ar_coeffs_uv[1][i] = ext_param->ArCoeffsCrPlus128[i] - 128;
590  }
591 
592  aom->uv_mult[0] = ext_param->CbMult;
593  aom->uv_mult[1] = ext_param->CrMult;
594  aom->uv_mult_luma[0] = ext_param->CbLumaMult;
595  aom->uv_mult_luma[1] = ext_param->CrLumaMult;
596  aom->uv_offset[0] = ext_param->CbOffset;
597  aom->uv_offset[1] = ext_param->CrOffset;
598 
599  return 0;
600 }
601 #endif
602 
603 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
604  AVFrame *frame, int *got_frame,
605  const AVPacket *avpkt)
606 {
607  mfxFrameSurface1 *insurf;
608  mfxFrameSurface1 *outsurf;
609  mfxSyncPoint *sync;
610  mfxBitstream bs = { { { 0 } } };
611  int ret;
612 
613  if (avpkt->size) {
614  bs.Data = avpkt->data;
615  bs.DataLength = avpkt->size;
616  bs.MaxLength = bs.DataLength;
617  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
618  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
619  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
620  }
621 
622  sync = av_mallocz(sizeof(*sync));
623  if (!sync) {
624  av_freep(&sync);
625  return AVERROR(ENOMEM);
626  }
627 
628  do {
629  ret = get_surface(avctx, q, &insurf);
630  if (ret < 0) {
631  av_freep(&sync);
632  return ret;
633  }
634 
635  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
636  insurf, &outsurf, sync);
637  if (ret == MFX_WRN_DEVICE_BUSY)
638  av_usleep(500);
639 
640  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
641 
642  if (ret == MFX_ERR_INCOMPATIBLE_VIDEO_PARAM) {
643  q->reinit_flag = 1;
644  av_log(avctx, AV_LOG_DEBUG, "Video parameter change\n");
645  av_freep(&sync);
646  return 0;
647  }
648 
649  if (ret != MFX_ERR_NONE &&
650  ret != MFX_ERR_MORE_DATA &&
651  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
652  ret != MFX_ERR_MORE_SURFACE) {
653  av_freep(&sync);
654  return ff_qsv_print_error(avctx, ret,
655  "Error during QSV decoding.");
656  }
657 
658  /* make sure we do not enter an infinite loop if the SDK
659  * did not consume any data and did not return anything */
660  if (!*sync && !bs.DataOffset) {
661  bs.DataOffset = avpkt->size;
662  ++q->zero_consume_run;
663  if (q->zero_consume_run > 1)
664  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
665  } else {
666  q->zero_consume_run = 0;
667  }
668 
669  if (*sync) {
670  QSVAsyncFrame aframe;
671  QSVFrame *out_frame = find_frame(q, outsurf);
672 
673  if (!out_frame) {
674  av_log(avctx, AV_LOG_ERROR,
675  "The returned surface does not correspond to any frame\n");
676  av_freep(&sync);
677  return AVERROR_BUG;
678  }
679 
680  out_frame->queued += 1;
681 
682  aframe = (QSVAsyncFrame){ sync, out_frame };
683  av_fifo_write(q->async_fifo, &aframe, 1);
684  } else {
685  av_freep(&sync);
686  }
687 
688  if ((av_fifo_can_read(q->async_fifo) >= q->async_depth) ||
689  (!avpkt->size && av_fifo_can_read(q->async_fifo))) {
690  QSVAsyncFrame aframe;
691  AVFrame *src_frame;
692 
693  av_fifo_read(q->async_fifo, &aframe, 1);
694  aframe.frame->queued -= 1;
695 
696  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
697  do {
698  ret = MFXVideoCORE_SyncOperation(q->session, *aframe.sync, 1000);
699  } while (ret == MFX_WRN_IN_EXECUTION);
700  }
701 
702  av_freep(&aframe.sync);
703 
704  src_frame = aframe.frame->frame;
705 
706  ret = av_frame_ref(frame, src_frame);
707  if (ret < 0)
708  return ret;
709 
710  outsurf = &aframe.frame->surface;
711 
712  frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
713 #if QSV_VERSION_ATLEAST(1, 34)
715  QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) &&
716  avctx->codec_id == AV_CODEC_ID_AV1) {
717  ret = qsv_export_film_grain(avctx, &aframe.frame->av1_film_grain_param, frame);
718 
719  if (ret < 0)
720  return ret;
721  }
722 #endif
723 
724  frame->repeat_pict =
725  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
726  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
727  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
728  frame->top_field_first =
729  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
730  frame->interlaced_frame =
731  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
732  frame->pict_type = ff_qsv_map_pictype(aframe.frame->dec_info.FrameType);
733  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
734  if (avctx->codec_id == AV_CODEC_ID_H264)
735  frame->key_frame = !!(aframe.frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
736 
737  /* update the surface properties */
738  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
739  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
740 
741  *got_frame = 1;
742  }
743 
744  return bs.DataOffset;
745 }
746 
748 {
749  QSVFrame *cur = q->work_frames;
750 
751  if (q->session)
752  MFXVideoDECODE_Close(q->session);
753 
754  if (q->async_fifo) {
755  QSVAsyncFrame aframe;
756  while (av_fifo_read(q->async_fifo, &aframe, 1) >= 0)
757  av_freep(&aframe.sync);
759  }
760 
761  while (cur) {
762  q->work_frames = cur->next;
763  av_frame_free(&cur->frame);
764  av_freep(&cur);
765  cur = q->work_frames;
766  }
767 
769 
773 }
774 
776  AVFrame *frame, int *got_frame, const AVPacket *pkt)
777 {
778  int ret;
779  mfxVideoParam param = { 0 };
781 
782  if (!pkt->size)
783  return qsv_decode(avctx, q, frame, got_frame, pkt);
784 
785  /* TODO: flush delayed frames on reinit */
786 
787  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
788  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
789  // the assumption may be not corret but will be updated after header decoded if not true.
790  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
791  pix_fmt = q->orig_pix_fmt;
792  if (!avctx->coded_width)
793  avctx->coded_width = 1280;
794  if (!avctx->coded_height)
795  avctx->coded_height = 720;
796 
797  /* decode zero-size pkt to flush the buffered pkt before reinit */
798  if (q->reinit_flag) {
799  AVPacket zero_pkt = {0};
800  ret = qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
801  if (ret < 0 || *got_frame)
802  return ret;
803  }
804 
805  if (q->reinit_flag || !q->session || !q->initialized) {
806  mfxFrameAllocRequest request;
807  memset(&request, 0, sizeof(request));
808 
809  q->reinit_flag = 0;
810  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
811  if (ret < 0) {
812  if (ret == AVERROR(EAGAIN))
813  av_log(avctx, AV_LOG_INFO, "More data is required to decode header\n");
814  else
815  av_log(avctx, AV_LOG_ERROR, "Error decoding header\n");
816  goto reinit_fail;
817  }
818  param.IOPattern = q->iopattern;
819 
820  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
821 
822  avctx->coded_width = param.mfx.FrameInfo.Width;
823  avctx->coded_height = param.mfx.FrameInfo.Height;
824 
825  ret = MFXVideoDECODE_QueryIOSurf(q->session, &param, &request);
826  if (ret < 0)
827  return ff_qsv_print_error(avctx, ret, "Error querying IO surface");
828 
829  q->suggest_pool_size = request.NumFrameSuggested;
830 
831  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
832  if (ret < 0)
833  goto reinit_fail;
834  q->initialized = 0;
835  }
836 
837  if (!q->initialized) {
838  ret = qsv_decode_init_context(avctx, q, &param);
839  if (ret < 0)
840  goto reinit_fail;
841  q->initialized = 1;
842  }
843 
844  return qsv_decode(avctx, q, frame, got_frame, pkt);
845 
846 reinit_fail:
847  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
848  return ret;
849 }
850 
855 };
856 
857 typedef struct QSVDecContext {
858  AVClass *class;
860 
862 
864 
866 } QSVDecContext;
867 
869 {
870  AVPacket pkt;
871  while (av_fifo_read(s->packet_fifo, &pkt, 1) >= 0)
873 
874  av_packet_unref(&s->buffer_pkt);
875 }
876 
878 {
879  QSVDecContext *s = avctx->priv_data;
880 
882 
884 
885  av_fifo_freep2(&s->packet_fifo);
886 
887  return 0;
888 }
889 
891 {
892  QSVDecContext *s = avctx->priv_data;
893  int ret;
894  const char *uid = NULL;
895 
896  if (avctx->codec_id == AV_CODEC_ID_VP8) {
897  uid = "f622394d8d87452f878c51f2fc9b4131";
898  } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
899  uid = "a922394d8d87452f878c51f2fc9b4131";
900  }
901  else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) {
902  static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6";
903  static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e";
904 
905  if (s->qsv.load_plugins[0]) {
906  av_log(avctx, AV_LOG_WARNING,
907  "load_plugins is not empty, but load_plugin is not set to 'none'."
908  "The load_plugin value will be ignored.\n");
909  } else {
910  if (s->load_plugin == LOAD_PLUGIN_HEVC_SW)
911  uid = uid_hevcdec_sw;
912  else
913  uid = uid_hevcdec_hw;
914  }
915  }
916  if (uid) {
917  av_freep(&s->qsv.load_plugins);
918  s->qsv.load_plugins = av_strdup(uid);
919  if (!s->qsv.load_plugins)
920  return AVERROR(ENOMEM);
921  }
922 
923  s->qsv.orig_pix_fmt = AV_PIX_FMT_NV12;
924  s->packet_fifo = av_fifo_alloc2(1, sizeof(AVPacket),
926  if (!s->packet_fifo) {
927  ret = AVERROR(ENOMEM);
928  goto fail;
929  }
930 
931  if (!avctx->pkt_timebase.num)
932  av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
933 
934  return 0;
935 fail:
936  qsv_decode_close(avctx);
937  return ret;
938 }
939 
941  int *got_frame, AVPacket *avpkt)
942 {
943  QSVDecContext *s = avctx->priv_data;
944  int ret;
945 
946  /* buffer the input packet */
947  if (avpkt->size) {
948  AVPacket input_ref;
949 
950  ret = av_packet_ref(&input_ref, avpkt);
951  if (ret < 0)
952  return ret;
953  av_fifo_write(s->packet_fifo, &input_ref, 1);
954  }
955 
956  /* process buffered data */
957  while (!*got_frame) {
958  /* prepare the input data */
959  if (s->buffer_pkt.size <= 0) {
960  /* no more data */
961  if (!av_fifo_can_read(s->packet_fifo))
962  return avpkt->size ? avpkt->size : qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt);
963  /* in progress of reinit, no read from fifo and keep the buffer_pkt */
964  if (!s->qsv.reinit_flag) {
965  av_packet_unref(&s->buffer_pkt);
966  av_fifo_read(s->packet_fifo, &s->buffer_pkt, 1);
967  }
968  }
969 
970  ret = qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt);
971  if (ret < 0){
972  /* Drop buffer_pkt when failed to decode the packet. Otherwise,
973  the decoder will keep decoding the failure packet. */
974  av_packet_unref(&s->buffer_pkt);
975  return ret;
976  }
977  if (s->qsv.reinit_flag)
978  continue;
979 
980  s->buffer_pkt.size -= ret;
981  s->buffer_pkt.data += ret;
982  }
983 
984  return avpkt->size;
985 }
986 
987 static void qsv_decode_flush(AVCodecContext *avctx)
988 {
989  QSVDecContext *s = avctx->priv_data;
990 
992 
993  s->qsv.orig_pix_fmt = AV_PIX_FMT_NONE;
994  s->qsv.initialized = 0;
995 }
996 
997 #define OFFSET(x) offsetof(QSVDecContext, x)
998 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
999 
1000 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
1001 static const AVClass x##_qsv_class = { \
1002  .class_name = #x "_qsv", \
1003  .item_name = av_default_item_name, \
1004  .option = opt, \
1005  .version = LIBAVUTIL_VERSION_INT, \
1006 }; \
1007 const FFCodec ff_##x##_qsv_decoder = { \
1008  .p.name = #x "_qsv", \
1009  .p.long_name = NULL_IF_CONFIG_SMALL(#X " video (Intel Quick Sync Video acceleration)"), \
1010  .priv_data_size = sizeof(QSVDecContext), \
1011  .p.type = AVMEDIA_TYPE_VIDEO, \
1012  .p.id = AV_CODEC_ID_##X, \
1013  .init = qsv_decode_init, \
1014  FF_CODEC_DECODE_CB(qsv_decode_frame), \
1015  .flush = qsv_decode_flush, \
1016  .close = qsv_decode_close, \
1017  .bsfs = bsf_name, \
1018  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
1019  .p.priv_class = &x##_qsv_class, \
1020  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
1021  AV_PIX_FMT_P010, \
1022  AV_PIX_FMT_YUYV422, \
1023  AV_PIX_FMT_Y210, \
1024  AV_PIX_FMT_QSV, \
1025  AV_PIX_FMT_NONE }, \
1026  .hw_configs = qsv_hw_configs, \
1027  .p.wrapper_name = "qsv", \
1028 }; \
1029 
1030 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
1031 
1032 #if CONFIG_HEVC_QSV_DECODER
1033 static const AVOption hevc_options[] = {
1034  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1035 
1036  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, "load_plugin" },
1037  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, "load_plugin" },
1038  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, "load_plugin" },
1039  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, "load_plugin" },
1040 
1041  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
1042  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
1043 
1044  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
1045  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
1046  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
1047  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
1048  { NULL },
1049 };
1050 DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options)
1051 #endif
1052 
1053 static const AVOption options[] = {
1054  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1055 
1056  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
1057  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
1058  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
1059  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
1060  { NULL },
1061 };
1062 
1063 #if CONFIG_H264_QSV_DECODER
1064 DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb")
1065 #endif
1066 
1067 #if CONFIG_MPEG2_QSV_DECODER
1068 DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL)
1069 #endif
1070 
1071 #if CONFIG_VC1_QSV_DECODER
1072 DEFINE_QSV_DECODER(vc1, VC1, NULL)
1073 #endif
1074 
1075 #if CONFIG_MJPEG_QSV_DECODER
1076 DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL)
1077 #endif
1078 
1079 #if CONFIG_VP8_QSV_DECODER
1080 DEFINE_QSV_DECODER(vp8, VP8, NULL)
1081 #endif
1082 
1083 #if CONFIG_VP9_QSV_DECODER
1084 DEFINE_QSV_DECODER(vp9, VP9, NULL)
1085 #endif
1086 
1087 #if CONFIG_AV1_QSV_DECODER
1088 DEFINE_QSV_DECODER(av1, AV1, NULL)
1089 #endif
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:49
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:318
AVCodecContext::hwaccel_context
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:1390
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uid
UID uid
Definition: mxfenc.c:2200
opt.h
qsv_process_data
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: qsvdec.c:775
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:966
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1100
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:106
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:88
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
AVFilmGrainAOMParams::uv_points
uint8_t uv_points[2][10][2]
Definition: film_grain_params.h:63
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFilmGrainParams::aom
AVFilmGrainAOMParams aom
Definition: film_grain_params.h:236
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
ff_qsv_close_internal_session
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:851
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:294
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:959
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:599
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:995
av_fifo_can_read
size_t av_fifo_can_read(const AVFifo *f)
Definition: fifo.c:87
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:58
AVOption
AVOption.
Definition: opt.h:251
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:263
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvdec.c:852
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.c:82
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvdec.c:854
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:78
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
QSVFrame::used
int used
Definition: qsv_internal.h:92
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:724
ff_qsv_map_frame_to_surface
int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
Definition: qsv.c:227
fifo.h
QSVContext::suggest_pool_size
int suggest_pool_size
Definition: qsvdec.c:92
DEFINE_QSV_DECODER
#define DEFINE_QSV_DECODER(x, X, bsf_name)
Definition: qsvdec.c:1030
fail
#define fail()
Definition: checkasm.h:131
AVFilmGrainAOMParams::grain_scale_shift
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
Definition: film_grain_params.h:99
QSVDecContext::qsv
QSVContext qsv
Definition: qsvdec.c:859
AVFilmGrainParams::codec
union AVFilmGrainParams::@301 codec
Additional fields may be added both here and in any structure included.
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:577
AVFilmGrainAOMParams::limit_output_range
int limit_output_range
Signals to clip to limited color levels after film grain application.
Definition: film_grain_params.h:122
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvdec.c:853
options
static const AVOption options[]
Definition: qsvdec.c:1053
DEFINE_QSV_DECODER_WITH_OPTION
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
Definition: qsvdec.c:1000
AVFilmGrainAOMParams::num_y_points
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
Definition: film_grain_params.h:49
mfx_tb
static const AVRational mfx_tb
Definition: qsvdec.c:53
AVRational::num
int num
Numerator.
Definition: rational.h:59
QSVDecContext::packet_fifo
AVFifo * packet_fifo
Definition: qsvdec.c:863
QSVContext::async_fifo
AVFifo * async_fifo
Definition: qsvdec.c:84
QSVContext
Definition: qsvdec.c:68
qsv_internal.h
AVFilmGrainAOMParams
This structure describes how to handle film grain synthesis for AOM codecs.
Definition: film_grain_params.h:44
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:525
qsv_decode_header
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, const AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:356
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:458
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:952
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:169
ASYNC_DEPTH_DEFAULT
#define ASYNC_DEPTH_DEFAULT
Definition: qsv_internal.h:51
film_grain_params.h
av_cold
#define av_cold
Definition: attributes.h:90
qsv_decode_frame
static int qsv_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:940
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
QSVDecContext
Definition: qsvdec.c:857
QSVContext::iopattern
int iopattern
Definition: qsvdec.c:97
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:387
qsv_decode_init
static av_cold int qsv_decode_init(AVCodecContext *avctx)
Definition: qsvdec.c:890
s
#define s(width, name)
Definition: cbs_vp9.c:256
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2757
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.c:86
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.c:77
QSVContext::internal_qs
QSVSession internal_qs
Definition: qsvdec.c:75
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:367
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
QSVContext::ver
mfxVersion ver
Definition: qsvdec.c:71
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:64
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:359
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
if
if(ret)
Definition: filter_design.txt:179
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:804
QSVFrame
Definition: qsv_internal.h:77
AVFilmGrainAOMParams::uv_mult_luma
int uv_mult_luma[2]
Definition: film_grain_params.h:106
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:973
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:279
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_qsv_print_iopattern
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:85
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.c:103
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:79
time.h
QSVFramesContext::mids_buf
AVBufferRef * mids_buf
Definition: qsv_internal.h:113
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:212
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:430
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.c:100
AVCodecContext::level
int level
level
Definition: avcodec.h:1673
QSVContext::initialized
int initialized
Definition: qsvdec.c:93
qsv_clear_buffers
static void qsv_clear_buffers(QSVDecContext *s)
Definition: qsvdec.c:868
QSVContext::fourcc
uint32_t fourcc
Definition: qsvdec.c:89
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.c:102
AVFilmGrainAOMParams::num_uv_points
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
Definition: film_grain_params.h:62
PTS_TO_MFX_PTS
#define PTS_TO_MFX_PTS(pts, pts_tb)
Definition: qsvdec.c:55
qsv_decode_close_qsvcontext
static void qsv_decode_close_qsvcontext(QSVContext *q)
Definition: qsvdec.c:747
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.c:90
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1403
AVPacket::size
int size
Definition: packet.h:375
AVFifo
Definition: fifo.c:35
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:343
AVCodecContext::extra_hw_frames
int extra_hw_frames
Definition: avcodec.h:1980
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
LoadPlugin
LoadPlugin
Definition: qsvdec.c:851
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:1746
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:420
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *avpkt)
Definition: qsvdec.c:603
AVCodecHWConfigInternal
Definition: hwconfig.h:29
qsv_decode_close
static av_cold int qsv_decode_close(AVCodecContext *avctx)
Definition: qsvdec.c:877
frame.h
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:484
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVFilmGrainAOMParams::ar_coeffs_y
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
Definition: film_grain_params.h:80
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
QSVFramesContext::mids
QSVMid * mids
Definition: qsv_internal.h:114
QSVAsyncFrame::frame
QSVFrame * frame
Definition: qsvdec.c:65
hwcontext_qsv.h
QSVContext::pool
AVBufferPool * pool
Definition: qsvdec.c:91
log.h
ff_qsv_map_picstruct
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:276
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
QSVDecContext::buffer_pkt
AVPacket buffer_pkt
Definition: qsvdec.c:865
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.c:70
qsv_decode_preinit
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:239
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVFilmGrainAOMParams::scaling_shift
int scaling_shift
Specifies the shift applied to the chroma components.
Definition: film_grain_params.h:69
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1930
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:582
QSVDecContext::load_plugin
int load_plugin
Definition: qsvdec.c:861
OFFSET
#define OFFSET(x)
Definition: qsvdec.c:997
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1880
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:164
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
QSVFrame::queued
int queued
Definition: qsv_internal.h:91
QSVContext::async_depth
int async_depth
Definition: qsvdec.c:96
MFX_PTS_TO_PTS
#define MFX_PTS_TO_PTS(mfx_pts, pts_tb)
Definition: qsvdec.c:59
QSVSession
Definition: qsv_internal.h:97
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:162
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:43
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.c:85
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1247
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:331
AVCodecContext
main external API structure.
Definition: avcodec.h:389
AVFilmGrainAOMParams::ar_coeff_lag
int ar_coeff_lag
Specifies the auto-regression lag.
Definition: film_grain_params.h:74
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.c:88
AVFilmGrainAOMParams::y_points
uint8_t y_points[14][2]
Definition: film_grain_params.h:50
AVFilmGrainAOMParams::uv_offset
int uv_offset[2]
Offset used for component scaling function.
Definition: film_grain_params.h:112
qsv_decode_init_context
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:330
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1547
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
QSVSession::session
mfxSession session
Definition: qsv_internal.h:98
ff_qsv_map_fourcc
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:178
AVFilmGrainAOMParams::uv_mult
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
Definition: film_grain_params.h:105
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2006
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:455
qsv_get_continuous_buffer
static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:119
AVFilmGrainAOMParams::overlap_flag
int overlap_flag
Signals whether to overlap film grain blocks.
Definition: film_grain_params.h:117
VD
#define VD
Definition: qsvdec.c:998
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:42
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:577
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:280
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:81
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1379
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:39
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
QSVAsyncFrame::sync
mfxSyncPoint * sync
Definition: qsvdec.c:64
QSVFramesContext
Definition: qsv_internal.h:105
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AVPacket
This structure stores compressed data.
Definition: packet.h:351
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:472
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
imgutils.h
AV_CODEC_ID_VP8
@ AV_CODEC_ID_VP8
Definition: codec_id.h:190
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
QSVContext::gpu_copy
int gpu_copy
Definition: qsvdec.c:98
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1739
QSVAsyncFrame
Definition: qsvdec.c:63
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
AVFilmGrainAOMParams::chroma_scaling_from_luma
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
Definition: film_grain_params.h:56
ff_qsv_frame_add_ext_param
void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame, mfxExtBuffer *param)
Definition: qsv.c:863
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
AV_FILM_GRAIN_PARAMS_AV1
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
Definition: film_grain_params.h:30
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:94
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:160
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:220
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:407
qsv_decode_flush
static void qsv_decode_flush(AVCodecContext *avctx)
Definition: qsvdec.c:987
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:362
qsv_hw_configs
static const AVCodecHWConfigInternal *const qsv_hw_configs[]
Definition: qsvdec.c:106
AVFilmGrainAOMParams::ar_coeff_shift
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
Definition: film_grain_params.h:93
AVFilmGrainAOMParams::ar_coeffs_uv
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
Definition: film_grain_params.h:86