FFmpeg
dxva2_vp9.c
Go to the documentation of this file.
1 /*
2  * DXVA2 VP9 HW acceleration.
3  *
4  * copyright (c) 2015 Hendrik Leppkes
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config_components.h"
24 
25 #include "libavutil/avassert.h"
26 #include "libavutil/pixdesc.h"
27 
28 #include "dxva2_internal.h"
29 #include "hwaccel_internal.h"
30 #include "vp9shared.h"
31 
33  DXVA_PicParams_VP9 pp;
34  DXVA_Slice_VPx_Short slice;
35  const uint8_t *bitstream;
36  unsigned bitstream_size;
37 };
38 
39 static void fill_picture_entry(DXVA_PicEntry_VPx *pic,
40  unsigned index, unsigned flag)
41 {
42  av_assert0((index & 0x7f) == index && (flag & 0x01) == flag);
43  pic->bPicEntry = index | (flag << 7);
44 }
45 
47  DXVA_PicParams_VP9 *pp)
48 {
49  const VP9SharedContext *h = avctx->priv_data;
50  const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
51  int i;
52 
53  if (!pixdesc)
54  return -1;
55 
56  memset(pp, 0, sizeof(*pp));
57 
58  pp->profile = h->h.profile;
59  pp->wFormatAndPictureInfoFlags = ((h->h.keyframe == 0) << 0) |
60  ((h->h.invisible == 0) << 1) |
61  (h->h.errorres << 2) |
62  (pixdesc->log2_chroma_w << 3) | /* subsampling_x */
63  (pixdesc->log2_chroma_h << 4) | /* subsampling_y */
64  (0 << 5) | /* extra_plane */
65  (h->h.refreshctx << 6) |
66  (h->h.parallelmode << 7) |
67  (h->h.intraonly << 8) |
68  (h->h.framectxid << 9) |
69  (h->h.resetctx << 11) |
70  ((h->h.keyframe ? 0 : h->h.highprecisionmvs) << 13) |
71  (0 << 14); /* ReservedFormatInfo2Bits */
72 
73  pp->width = avctx->width;
74  pp->height = avctx->height;
75  pp->BitDepthMinus8Luma = pixdesc->comp[0].depth - 8;
76  pp->BitDepthMinus8Chroma = pixdesc->comp[1].depth - 8;
77  /* swap 0/1 to match the reference */
78  pp->interp_filter = h->h.filtermode ^ (h->h.filtermode <= 1);
79  pp->Reserved8Bits = 0;
80 
81  for (i = 0; i < 8; i++) {
82  if (h->refs[i].f) {
83  fill_picture_entry(&pp->ref_frame_map[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[i].f, 0), 0);
84  pp->ref_frame_coded_width[i] = h->refs[i].f->width;
85  pp->ref_frame_coded_height[i] = h->refs[i].f->height;
86  } else
87  pp->ref_frame_map[i].bPicEntry = 0xFF;
88  }
89 
90  for (i = 0; i < 3; i++) {
91  uint8_t refidx = h->h.refidx[i];
92  if (h->refs[refidx].f)
93  fill_picture_entry(&pp->frame_refs[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[refidx].f, 0), 0);
94  else
95  pp->frame_refs[i].bPicEntry = 0xFF;
96 
97  pp->ref_frame_sign_bias[i + 1] = h->h.signbias[i];
98  }
99 
100  fill_picture_entry(&pp->CurrPic, ff_dxva2_get_surface_index(avctx, ctx, h->frames[CUR_FRAME].tf.f, 1), 0);
101 
102  pp->filter_level = h->h.filter.level;
103  pp->sharpness_level = h->h.filter.sharpness;
104 
105  pp->wControlInfoFlags = (h->h.lf_delta.enabled << 0) |
106  (h->h.lf_delta.updated << 1) |
107  (h->h.use_last_frame_mvs << 2) |
108  (0 << 3); /* ReservedControlInfo5Bits */
109 
110  for (i = 0; i < 4; i++)
111  pp->ref_deltas[i] = h->h.lf_delta.ref[i];
112 
113  for (i = 0; i < 2; i++)
114  pp->mode_deltas[i] = h->h.lf_delta.mode[i];
115 
116  pp->base_qindex = h->h.yac_qi;
117  pp->y_dc_delta_q = h->h.ydc_qdelta;
118  pp->uv_dc_delta_q = h->h.uvdc_qdelta;
119  pp->uv_ac_delta_q = h->h.uvac_qdelta;
120 
121  /* segmentation data */
122  pp->stVP9Segments.wSegmentInfoFlags = (h->h.segmentation.enabled << 0) |
123  (h->h.segmentation.update_map << 1) |
124  (h->h.segmentation.temporal << 2) |
125  (h->h.segmentation.absolute_vals << 3) |
126  (0 << 4); /* ReservedSegmentFlags4Bits */
127 
128  for (i = 0; i < 7; i++)
129  pp->stVP9Segments.tree_probs[i] = h->h.segmentation.prob[i];
130 
131  if (h->h.segmentation.temporal)
132  for (i = 0; i < 3; i++)
133  pp->stVP9Segments.pred_probs[i] = h->h.segmentation.pred_prob[i];
134  else
135  memset(pp->stVP9Segments.pred_probs, 255, sizeof(pp->stVP9Segments.pred_probs));
136 
137  for (i = 0; i < 8; i++) {
138  pp->stVP9Segments.feature_mask[i] = (h->h.segmentation.feat[i].q_enabled << 0) |
139  (h->h.segmentation.feat[i].lf_enabled << 1) |
140  (h->h.segmentation.feat[i].ref_enabled << 2) |
141  (h->h.segmentation.feat[i].skip_enabled << 3);
142 
143  pp->stVP9Segments.feature_data[i][0] = h->h.segmentation.feat[i].q_val;
144  pp->stVP9Segments.feature_data[i][1] = h->h.segmentation.feat[i].lf_val;
145  pp->stVP9Segments.feature_data[i][2] = h->h.segmentation.feat[i].ref_val;
146  pp->stVP9Segments.feature_data[i][3] = 0; /* no data for skip */
147  }
148 
149  pp->log2_tile_cols = h->h.tiling.log2_tile_cols;
150  pp->log2_tile_rows = h->h.tiling.log2_tile_rows;
151 
152  pp->uncompressed_header_size_byte_aligned = h->h.uncompressed_header_size;
153  pp->first_partition_size = h->h.compressed_header_size;
154 
155  pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
156  return 0;
157 }
158 
159 static void fill_slice_short(DXVA_Slice_VPx_Short *slice,
160  unsigned position, unsigned size)
161 {
162  memset(slice, 0, sizeof(*slice));
163  slice->BSNALunitDataLocation = position;
164  slice->SliceBytesInBuffer = size;
165  slice->wBadSliceChopping = 0;
166 }
167 
171 {
172  const VP9SharedContext *h = avctx->priv_data;
173  AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
174  struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
175  void *dxva_data_ptr;
176  uint8_t *dxva_data;
177  unsigned dxva_size;
178  unsigned padding;
179  unsigned type;
180 
181 #if CONFIG_D3D11VA
182  if (ff_dxva2_is_d3d11(avctx)) {
183  type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
184  if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
186  type,
187  &dxva_size, &dxva_data_ptr)))
188  return -1;
189  }
190 #endif
191 #if CONFIG_DXVA2
192  if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
193  type = DXVA2_BitStreamDateBufferType;
194  if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
195  type,
196  &dxva_data_ptr, &dxva_size)))
197  return -1;
198  }
199 #endif
200 
201  dxva_data = dxva_data_ptr;
202 
203  if (ctx_pic->slice.SliceBytesInBuffer > dxva_size) {
204  av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream");
205  return -1;
206  }
207 
208  memcpy(dxva_data, ctx_pic->bitstream, ctx_pic->slice.SliceBytesInBuffer);
209 
210  padding = FFMIN(128 - ((ctx_pic->slice.SliceBytesInBuffer) & 127), dxva_size - ctx_pic->slice.SliceBytesInBuffer);
211  if (padding > 0) {
212  memset(dxva_data + ctx_pic->slice.SliceBytesInBuffer, 0, padding);
213  ctx_pic->slice.SliceBytesInBuffer += padding;
214  }
215 
216 #if CONFIG_D3D11VA
217  if (ff_dxva2_is_d3d11(avctx))
218  if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
219  return -1;
220 #endif
221 #if CONFIG_DXVA2
222  if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
223  if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
224  return -1;
225 #endif
226 
227 #if CONFIG_D3D11VA
228  if (ff_dxva2_is_d3d11(avctx)) {
229  D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
230  memset(dsc11, 0, sizeof(*dsc11));
231  dsc11->BufferType = type;
232  dsc11->DataSize = ctx_pic->slice.SliceBytesInBuffer;
233  dsc11->NumMBsInBuffer = 0;
234 
235  type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
236  }
237 #endif
238 #if CONFIG_DXVA2
239  if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
240  DXVA2_DecodeBufferDesc *dsc2 = bs;
241  memset(dsc2, 0, sizeof(*dsc2));
242  dsc2->CompressedBufferType = type;
243  dsc2->DataSize = ctx_pic->slice.SliceBytesInBuffer;
244  dsc2->NumMBsInBuffer = 0;
245 
246  type = DXVA2_SliceControlBufferType;
247  }
248 #endif
249 
250  return ff_dxva2_commit_buffer(avctx, ctx, sc,
251  type,
252  &ctx_pic->slice, sizeof(ctx_pic->slice), 0);
253 }
254 
255 
257  av_unused const uint8_t *buffer,
258  av_unused uint32_t size)
259 {
260  const VP9SharedContext *h = avctx->priv_data;
261  AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
262  struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
263 
264  if (!DXVA_CONTEXT_VALID(avctx, ctx))
265  return -1;
266  av_assert0(ctx_pic);
267 
268  /* Fill up DXVA_PicParams_VP9 */
269  if (ff_dxva2_vp9_fill_picture_parameters(avctx, ctx, &ctx_pic->pp) < 0)
270  return -1;
271 
272  ctx_pic->bitstream_size = 0;
273  ctx_pic->bitstream = NULL;
274  return 0;
275 }
276 
278  const uint8_t *buffer,
279  uint32_t size)
280 {
281  const VP9SharedContext *h = avctx->priv_data;
282  struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
283  unsigned position;
284 
285  if (!ctx_pic->bitstream)
286  ctx_pic->bitstream = buffer;
287  ctx_pic->bitstream_size += size;
288 
289  position = buffer - ctx_pic->bitstream;
290  fill_slice_short(&ctx_pic->slice, position, size);
291 
292  return 0;
293 }
294 
296 {
297  VP9SharedContext *h = avctx->priv_data;
298  struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
299  int ret;
300 
301  if (ctx_pic->bitstream_size <= 0)
302  return -1;
303 
304  ret = ff_dxva2_common_end_frame(avctx, h->frames[CUR_FRAME].tf.f,
305  &ctx_pic->pp, sizeof(ctx_pic->pp),
306  NULL, 0,
308  return ret;
309 }
310 
311 #if CONFIG_VP9_DXVA2_HWACCEL
313  .p.name = "vp9_dxva2",
314  .p.type = AVMEDIA_TYPE_VIDEO,
315  .p.id = AV_CODEC_ID_VP9,
316  .p.pix_fmt = AV_PIX_FMT_DXVA2_VLD,
317  .init = ff_dxva2_decode_init,
318  .uninit = ff_dxva2_decode_uninit,
319  .start_frame = dxva2_vp9_start_frame,
320  .decode_slice = dxva2_vp9_decode_slice,
321  .end_frame = dxva2_vp9_end_frame,
322  .frame_params = ff_dxva2_common_frame_params,
323  .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
324  .priv_data_size = sizeof(FFDXVASharedContext),
325 };
326 #endif
327 
328 #if CONFIG_VP9_D3D11VA_HWACCEL
330  .p.name = "vp9_d3d11va",
331  .p.type = AVMEDIA_TYPE_VIDEO,
332  .p.id = AV_CODEC_ID_VP9,
333  .p.pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
334  .init = ff_dxva2_decode_init,
335  .uninit = ff_dxva2_decode_uninit,
336  .start_frame = dxva2_vp9_start_frame,
337  .decode_slice = dxva2_vp9_decode_slice,
338  .end_frame = dxva2_vp9_end_frame,
339  .frame_params = ff_dxva2_common_frame_params,
340  .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
341  .priv_data_size = sizeof(FFDXVASharedContext),
342 };
343 #endif
344 
345 #if CONFIG_VP9_D3D11VA2_HWACCEL
347  .p.name = "vp9_d3d11va2",
348  .p.type = AVMEDIA_TYPE_VIDEO,
349  .p.id = AV_CODEC_ID_VP9,
350  .p.pix_fmt = AV_PIX_FMT_D3D11,
351  .init = ff_dxva2_decode_init,
352  .uninit = ff_dxva2_decode_uninit,
353  .start_frame = dxva2_vp9_start_frame,
354  .decode_slice = dxva2_vp9_decode_slice,
355  .end_frame = dxva2_vp9_end_frame,
356  .frame_params = ff_dxva2_common_frame_params,
357  .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
358  .priv_data_size = sizeof(FFDXVASharedContext),
359 };
360 #endif
fill_picture_entry
static void fill_picture_entry(DXVA_PicEntry_VPx *pic, unsigned index, unsigned flag)
Definition: dxva2_vp9.c:39
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
av_unused
#define av_unused
Definition: attributes.h:131
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
pixdesc.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
ff_dxva2_common_end_frame
int ff_dxva2_common_end_frame(AVCodecContext *avctx, AVFrame *frame, const void *pp, unsigned pp_size, const void *qm, unsigned qm_size, int(*commit_bs_si)(AVCodecContext *, DECODER_BUFFER_DESC *bs, DECODER_BUFFER_DESC *slice))
Definition: dxva2.c:892
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
ff_vp9_d3d11va2_hwaccel
const struct FFHWAccel ff_vp9_d3d11va2_hwaccel
decoder
static const chunk_decoder decoder[8]
Definition: dfa.c:331
FFHWAccel
Definition: hwaccel_internal.h:34
commit_bitstream_and_slice_buffer
static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, DECODER_BUFFER_DESC *bs, DECODER_BUFFER_DESC *sc)
Definition: dxva2_vp9.c:168
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
vp9shared.h
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
DXVA_CONTEXT_VALID
#define DXVA_CONTEXT_VALID(avctx, ctx)
Definition: dxva2_internal.h:133
DXVA2_CONTEXT
#define DXVA2_CONTEXT(ctx)
Definition: dxva2_internal.h:121
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
VP9SharedContext
Definition: vp9shared.h:164
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DXVA_CONTEXT
#define DXVA_CONTEXT(avctx)
Definition: dxva2_internal.h:118
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
vp9_dxva2_picture_context::bitstream
const uint8_t * bitstream
Definition: dxva2_vp9.c:35
dxva2_internal.h
if
if(ret)
Definition: filter_design.txt:179
NULL
#define NULL
Definition: coverity.c:32
ff_dxva2_decode_init
int ff_dxva2_decode_init(AVCodecContext *avctx)
Definition: dxva2.c:656
hwaccel_internal.h
vp9_dxva2_picture_context::pp
DXVA_PicParams_VP9 pp
Definition: dxva2_vp9.c:33
FFDXVASharedContext
Definition: dxva2_internal.h:86
ff_vp9_dxva2_hwaccel
const struct FFHWAccel ff_vp9_dxva2_hwaccel
ff_dxva2_get_surface_index
unsigned ff_dxva2_get_surface_index(const AVCodecContext *avctx, AVDXVAContext *ctx, const AVFrame *frame, int curr)
Definition: dxva2.c:771
dxva2_vp9_start_frame
static int dxva2_vp9_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: dxva2_vp9.c:256
index
int index
Definition: gxfenc.c:90
dxva2_vp9_decode_slice
static int dxva2_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: dxva2_vp9.c:277
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
size
int size
Definition: twinvq_data.h:10344
AVDXVAContext
Definition: dxva2_internal.h:74
DECODER_BUFFER_DESC
void DECODER_BUFFER_DESC
Definition: dxva2_internal.h:72
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
DXVA_CONTEXT_REPORT_ID
#define DXVA_CONTEXT_REPORT_ID(avctx, ctx)
Definition: dxva2_internal.h:125
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2094
flag
#define flag(name)
Definition: cbs_av1.c:466
D3D11VA_CONTEXT
#define D3D11VA_CONTEXT(ctx)
Definition: dxva2_internal.h:120
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
vp9_dxva2_picture_context::slice
DXVA_Slice_VPx_Short slice
Definition: dxva2_vp9.c:34
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vp9_dxva2_picture_context::bitstream_size
unsigned bitstream_size
Definition: dxva2_vp9.c:36
ff_dxva2_commit_buffer
int ff_dxva2_commit_buffer(AVCodecContext *avctx, AVDXVAContext *ctx, DECODER_BUFFER_DESC *dsc, unsigned type, const void *data, unsigned size, unsigned mb_count)
Definition: dxva2.c:803
vp9_dxva2_picture_context
Definition: dxva2_vp9.c:32
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
ff_dxva2_common_frame_params
int ff_dxva2_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: dxva2.c:593
fill_slice_short
static void fill_slice_short(DXVA_Slice_VPx_Short *slice, unsigned position, unsigned size)
Definition: dxva2_vp9.c:159
ret
ret
Definition: filter_design.txt:187
ff_dxva2_vp9_fill_picture_parameters
int ff_dxva2_vp9_fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, DXVA_PicParams_VP9 *pp)
Definition: dxva2_vp9.c:46
AVCodecContext
main external API structure.
Definition: avcodec.h:445
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_dxva2_is_d3d11
int ff_dxva2_is_d3d11(const AVCodecContext *avctx)
Definition: dxva2.c:1057
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_vp9_d3d11va_hwaccel
const struct FFHWAccel ff_vp9_d3d11va_hwaccel
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_dxva2_decode_uninit
int ff_dxva2_decode_uninit(AVCodecContext *avctx)
Definition: dxva2.c:731
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:168
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:664
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
dxva2_vp9_end_frame
static int dxva2_vp9_end_frame(AVCodecContext *avctx)
Definition: dxva2_vp9.c:295