FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vdpau.c
Go to the documentation of this file.
1 /*
2  * Video Decode and Presentation API for UNIX (VDPAU) is used for
3  * HW decode acceleration for MPEG-1/2, MPEG-4 ASP, H.264 and VC-1.
4  *
5  * Copyright (c) 2008 NVIDIA
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <limits.h>
25 
26 #include "avcodec.h"
27 #include "internal.h"
28 #include "h264.h"
29 #include "vc1.h"
30 #include "vdpau.h"
31 #include "vdpau_compat.h"
32 #include "vdpau_internal.h"
33 
34 // XXX: at the time of adding this ifdefery, av_assert* wasn't use outside.
35 // When dropping it, make sure other av_assert* were not added since then.
36 #if FF_API_BUFS_VDPAU
37 #include "libavutil/avassert.h"
38 #endif
39 
40 #if FF_API_VDPAU
41 #undef NDEBUG
42 #include <assert.h>
43 #endif
44 
45 /**
46  * @addtogroup VDPAU_Decoding
47  *
48  * @{
49  */
50 
51 static int vdpau_error(VdpStatus status)
52 {
53  switch (status) {
54  case VDP_STATUS_OK:
55  return 0;
56  case VDP_STATUS_NO_IMPLEMENTATION:
57  return AVERROR(ENOSYS);
58  case VDP_STATUS_DISPLAY_PREEMPTED:
59  return AVERROR(EIO);
60  case VDP_STATUS_INVALID_HANDLE:
61  return AVERROR(EBADF);
62  case VDP_STATUS_INVALID_POINTER:
63  return AVERROR(EFAULT);
64  case VDP_STATUS_RESOURCES:
65  return AVERROR(ENOBUFS);
66  case VDP_STATUS_HANDLE_DEVICE_MISMATCH:
67  return AVERROR(EXDEV);
68  case VDP_STATUS_ERROR:
69  return AVERROR(EIO);
70  default:
71  return AVERROR(EINVAL);
72  }
73 }
74 
76 {
77  return av_vdpau_alloc_context();
78 }
79 
80 MAKE_ACCESSORS(AVVDPAUContext, vdpau_hwaccel, AVVDPAU_Render2, render2)
81 
83  VdpChromaType *type,
84  uint32_t *width, uint32_t *height)
85 {
86  VdpChromaType t;
87  uint32_t w = avctx->coded_width;
88  uint32_t h = avctx->coded_height;
89 
90  /* See <vdpau/vdpau.h> for per-type alignment constraints. */
91  switch (avctx->sw_pix_fmt) {
92  case AV_PIX_FMT_YUV420P:
94  t = VDP_CHROMA_TYPE_420;
95  w = (w + 1) & ~1;
96  h = (h + 3) & ~3;
97  break;
98  case AV_PIX_FMT_YUV422P:
100  t = VDP_CHROMA_TYPE_422;
101  w = (w + 1) & ~1;
102  h = (h + 1) & ~1;
103  break;
104  case AV_PIX_FMT_YUV444P:
105  case AV_PIX_FMT_YUVJ444P:
106  t = VDP_CHROMA_TYPE_444;
107  h = (h + 1) & ~1;
108  break;
109  default:
110  return AVERROR(ENOSYS);
111  }
112 
113  if (type)
114  *type = t;
115  if (width)
116  *width = w;
117  if (height)
118  *height = h;
119  return 0;
120 }
121 
122 int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile,
123  int level)
124 {
125  VDPAUHWContext *hwctx = avctx->hwaccel_context;
126  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
127  VdpVideoSurfaceQueryCapabilities *surface_query_caps;
128  VdpDecoderQueryCapabilities *decoder_query_caps;
129  VdpDecoderCreate *create;
130  void *func;
131  VdpStatus status;
132  VdpBool supported;
133  uint32_t max_level, max_mb, max_width, max_height;
134  VdpChromaType type;
135  uint32_t width;
136  uint32_t height;
137 
138  vdctx->width = UINT32_MAX;
139  vdctx->height = UINT32_MAX;
140 
141  if (!hwctx) {
142  vdctx->device = VDP_INVALID_HANDLE;
143  av_log(avctx, AV_LOG_WARNING, "hwaccel_context has not been setup by the user application, cannot initialize\n");
144  return 0;
145  }
146 
147  if (hwctx->context.decoder != VDP_INVALID_HANDLE) {
148  vdctx->decoder = hwctx->context.decoder;
149  vdctx->render = hwctx->context.render;
150  vdctx->device = VDP_INVALID_HANDLE;
151  return 0; /* Decoder created by user */
152  }
153  hwctx->reset = 0;
154 
155  vdctx->device = hwctx->device;
156  vdctx->get_proc_address = hwctx->get_proc_address;
157 
158  if (hwctx->flags & AV_HWACCEL_FLAG_IGNORE_LEVEL)
159  level = 0;
160  else if (level < 0)
161  return AVERROR(ENOTSUP);
162 
163  if (av_vdpau_get_surface_parameters(avctx, &type, &width, &height))
164  return AVERROR(ENOSYS);
165 
166  if (!(hwctx->flags & AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH) &&
167  type != VDP_CHROMA_TYPE_420)
168  return AVERROR(ENOSYS);
169 
170  status = vdctx->get_proc_address(vdctx->device,
171  VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES,
172  &func);
173  if (status != VDP_STATUS_OK)
174  return vdpau_error(status);
175  else
176  surface_query_caps = func;
177 
178  status = surface_query_caps(vdctx->device, type, &supported,
179  &max_width, &max_height);
180  if (status != VDP_STATUS_OK)
181  return vdpau_error(status);
182  if (supported != VDP_TRUE ||
183  max_width < width || max_height < height)
184  return AVERROR(ENOTSUP);
185 
186  status = vdctx->get_proc_address(vdctx->device,
187  VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES,
188  &func);
189  if (status != VDP_STATUS_OK)
190  return vdpau_error(status);
191  else
192  decoder_query_caps = func;
193 
194  status = decoder_query_caps(vdctx->device, profile, &supported, &max_level,
195  &max_mb, &max_width, &max_height);
196 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
197  if ((status != VDP_STATUS_OK || supported != VDP_TRUE) && profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE) {
198  profile = VDP_DECODER_PROFILE_H264_MAIN;
199  status = decoder_query_caps(vdctx->device, profile, &supported,
200  &max_level, &max_mb,
201  &max_width, &max_height);
202  }
203 #endif
204  if (status != VDP_STATUS_OK)
205  return vdpau_error(status);
206 
207  if (supported != VDP_TRUE || max_level < level ||
208  max_width < width || max_height < height)
209  return AVERROR(ENOTSUP);
210 
211  status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_CREATE,
212  &func);
213  if (status != VDP_STATUS_OK)
214  return vdpau_error(status);
215  else
216  create = func;
217 
218  status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_RENDER,
219  &func);
220  if (status != VDP_STATUS_OK)
221  return vdpau_error(status);
222  else
223  vdctx->render = func;
224 
225  status = create(vdctx->device, profile, width, height, avctx->refs,
226  &vdctx->decoder);
227  if (status == VDP_STATUS_OK) {
228  vdctx->width = avctx->coded_width;
229  vdctx->height = avctx->coded_height;
230  }
231 
232  return vdpau_error(status);
233 }
234 
236 {
237  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
238  VdpDecoderDestroy *destroy;
239  void *func;
240  VdpStatus status;
241 
242  if (vdctx->device == VDP_INVALID_HANDLE)
243  return 0; /* Decoder created and destroyed by user */
244  if (vdctx->width == UINT32_MAX && vdctx->height == UINT32_MAX)
245  return 0;
246 
247  status = vdctx->get_proc_address(vdctx->device,
248  VDP_FUNC_ID_DECODER_DESTROY, &func);
249  if (status != VDP_STATUS_OK)
250  return vdpau_error(status);
251  else
252  destroy = func;
253 
254  status = destroy(vdctx->decoder);
255  return vdpau_error(status);
256 }
257 
259 {
260  VDPAUHWContext *hwctx = avctx->hwaccel_context;
261  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
262 
263  if (vdctx->device == VDP_INVALID_HANDLE)
264  return 0; /* Decoder created by user */
265  if (avctx->coded_width == vdctx->width &&
266  avctx->coded_height == vdctx->height && !hwctx->reset)
267  return 0;
268 
269  avctx->hwaccel->uninit(avctx);
270  return avctx->hwaccel->init(avctx);
271 }
272 
273 int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx,
274  av_unused const uint8_t *buffer,
275  av_unused uint32_t size)
276 {
277  pic_ctx->bitstream_buffers_allocated = 0;
278  pic_ctx->bitstream_buffers_used = 0;
279  pic_ctx->bitstream_buffers = NULL;
280  return 0;
281 }
282 
284  struct vdpau_picture_context *pic_ctx)
285 {
286  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
287  AVVDPAUContext *hwctx = avctx->hwaccel_context;
288  VdpVideoSurface surf = ff_vdpau_get_surface_id(frame);
289  VdpStatus status;
290  int val;
291 
292  val = ff_vdpau_common_reinit(avctx);
293  if (val < 0)
294  return val;
295 
296 #if FF_API_BUFS_VDPAU
298  av_assert0(sizeof(hwctx->info) <= sizeof(pic_ctx->info));
299  memcpy(&hwctx->info, &pic_ctx->info, sizeof(hwctx->info));
300  hwctx->bitstream_buffers = pic_ctx->bitstream_buffers;
301  hwctx->bitstream_buffers_used = pic_ctx->bitstream_buffers_used;
302  hwctx->bitstream_buffers_allocated = pic_ctx->bitstream_buffers_allocated;
304 #endif
305 
306  if (!hwctx->render && hwctx->render2) {
307  status = hwctx->render2(avctx, frame, (void *)&pic_ctx->info,
308  pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
309  } else
310  status = vdctx->render(vdctx->decoder, surf, (void *)&pic_ctx->info,
311  pic_ctx->bitstream_buffers_used,
312  pic_ctx->bitstream_buffers);
313 
314  av_freep(&pic_ctx->bitstream_buffers);
315 
316 #if FF_API_BUFS_VDPAU
318  hwctx->bitstream_buffers = NULL;
319  hwctx->bitstream_buffers_used = 0;
320  hwctx->bitstream_buffers_allocated = 0;
322 #endif
323 
324  return vdpau_error(status);
325 }
326 
327 #if CONFIG_MPEG1_VDPAU_HWACCEL || \
328  CONFIG_MPEG2_VDPAU_HWACCEL || CONFIG_MPEG4_VDPAU_HWACCEL || \
329  CONFIG_VC1_VDPAU_HWACCEL || CONFIG_WMV3_VDPAU_HWACCEL
331 {
332  MpegEncContext *s = avctx->priv_data;
333  Picture *pic = s->current_picture_ptr;
334  struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
335  int val;
336 
337  val = ff_vdpau_common_end_frame(avctx, pic->f, pic_ctx);
338  if (val < 0)
339  return val;
340 
342  return 0;
343 }
344 #endif
345 
346 int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx,
347  const uint8_t *buf, uint32_t size)
348 {
349  VdpBitstreamBuffer *buffers = pic_ctx->bitstream_buffers;
350 
351  buffers = av_fast_realloc(buffers, &pic_ctx->bitstream_buffers_allocated,
352  (pic_ctx->bitstream_buffers_used + 1) * sizeof(*buffers));
353  if (!buffers)
354  return AVERROR(ENOMEM);
355 
356  pic_ctx->bitstream_buffers = buffers;
357  buffers += pic_ctx->bitstream_buffers_used++;
358 
359  buffers->struct_version = VDP_BITSTREAM_BUFFER_VERSION;
360  buffers->bitstream = buf;
361  buffers->bitstream_bytes = size;
362  return 0;
363 }
364 
365 /* Obsolete non-hwaccel VDPAU support below... */
366 
367 #if FF_API_VDPAU
368 void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
369 {
370  struct vdpau_render_state *render = (struct vdpau_render_state*)data;
371  assert(render);
372 
374  render->bitstream_buffers,
376  sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
377  );
378 
379  render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
380  render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf;
381  render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
382  render->bitstream_buffers_used++;
383 }
384 
385 #if CONFIG_H264_VDPAU_DECODER
387 {
388  struct vdpau_render_state *render, *render_ref;
389  VdpReferenceFrameH264 *rf, *rf2;
390  H264Picture *pic;
391  int i, list, pic_frame_idx;
392 
393  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
394  assert(render);
395 
396  rf = &render->info.h264.referenceFrames[0];
397 #define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames)
398 
399  for (list = 0; list < 2; ++list) {
400  H264Picture **lp = list ? h->long_ref : h->short_ref;
401  int ls = list ? 16 : h->short_ref_count;
402 
403  for (i = 0; i < ls; ++i) {
404  pic = lp[i];
405  if (!pic || !pic->reference)
406  continue;
407  pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
408 
409  render_ref = (struct vdpau_render_state *)pic->f->data[0];
410  assert(render_ref);
411 
412  rf2 = &render->info.h264.referenceFrames[0];
413  while (rf2 != rf) {
414  if (
415  (rf2->surface == render_ref->surface)
416  && (rf2->is_long_term == pic->long_ref)
417  && (rf2->frame_idx == pic_frame_idx)
418  )
419  break;
420  ++rf2;
421  }
422  if (rf2 != rf) {
423  rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
424  rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
425  continue;
426  }
427 
428  if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT])
429  continue;
430 
431  rf->surface = render_ref->surface;
432  rf->is_long_term = pic->long_ref;
433  rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
434  rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
435  rf->field_order_cnt[0] = pic->field_poc[0];
436  rf->field_order_cnt[1] = pic->field_poc[1];
437  rf->frame_idx = pic_frame_idx;
438 
439  ++rf;
440  }
441  }
442 
443  for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) {
444  rf->surface = VDP_INVALID_HANDLE;
445  rf->is_long_term = 0;
446  rf->top_is_reference = 0;
447  rf->bottom_is_reference = 0;
448  rf->field_order_cnt[0] = 0;
449  rf->field_order_cnt[1] = 0;
450  rf->frame_idx = 0;
451  }
452 }
453 
455 {
456  struct vdpau_render_state *render;
457  int i;
458 
459  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
460  assert(render);
461 
462  for (i = 0; i < 2; ++i) {
463  int foc = h->cur_pic_ptr->field_poc[i];
464  if (foc == INT_MAX)
465  foc = 0;
466  render->info.h264.field_order_cnt[i] = foc;
467  }
468 
469  render->info.h264.frame_num = h->poc.frame_num;
470 }
471 
473 {
474  struct vdpau_render_state *render;
475 
476  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
477  assert(render);
478 
479  render->info.h264.slice_count = h->current_slice;
480  if (render->info.h264.slice_count < 1)
481  return;
482 
483  render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
484  render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME;
485  render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD;
486  render->info.h264.num_ref_frames = h->ps.sps->ref_frame_count;
487  render->info.h264.mb_adaptive_frame_field_flag = h->ps.sps->mb_aff && !render->info.h264.field_pic_flag;
488  render->info.h264.constrained_intra_pred_flag = h->ps.pps->constrained_intra_pred;
489  render->info.h264.weighted_pred_flag = h->ps.pps->weighted_pred;
490  render->info.h264.weighted_bipred_idc = h->ps.pps->weighted_bipred_idc;
491  render->info.h264.frame_mbs_only_flag = h->ps.sps->frame_mbs_only_flag;
492  render->info.h264.transform_8x8_mode_flag = h->ps.pps->transform_8x8_mode;
493  render->info.h264.chroma_qp_index_offset = h->ps.pps->chroma_qp_index_offset[0];
494  render->info.h264.second_chroma_qp_index_offset = h->ps.pps->chroma_qp_index_offset[1];
495  render->info.h264.pic_init_qp_minus26 = h->ps.pps->init_qp - 26;
496  render->info.h264.num_ref_idx_l0_active_minus1 = h->ps.pps->ref_count[0] - 1;
497  render->info.h264.num_ref_idx_l1_active_minus1 = h->ps.pps->ref_count[1] - 1;
498  render->info.h264.log2_max_frame_num_minus4 = h->ps.sps->log2_max_frame_num - 4;
499  render->info.h264.pic_order_cnt_type = h->ps.sps->poc_type;
500  render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->ps.sps->poc_type ? 0 : h->ps.sps->log2_max_poc_lsb - 4;
501  render->info.h264.delta_pic_order_always_zero_flag = h->ps.sps->delta_pic_order_always_zero_flag;
502  render->info.h264.direct_8x8_inference_flag = h->ps.sps->direct_8x8_inference_flag;
503  render->info.h264.entropy_coding_mode_flag = h->ps.pps->cabac;
504  render->info.h264.pic_order_present_flag = h->ps.pps->pic_order_present;
505  render->info.h264.deblocking_filter_control_present_flag = h->ps.pps->deblocking_filter_parameters_present;
506  render->info.h264.redundant_pic_cnt_present_flag = h->ps.pps->redundant_pic_cnt_present;
507  memcpy(render->info.h264.scaling_lists_4x4, h->ps.pps->scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
508  memcpy(render->info.h264.scaling_lists_8x8[0], h->ps.pps->scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
509  memcpy(render->info.h264.scaling_lists_8x8[1], h->ps.pps->scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
510 
511  ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height);
512  render->bitstream_buffers_used = 0;
513 }
514 #endif /* CONFIG_H264_VDPAU_DECODER */
515 
516 #if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER
518  int buf_size, int slice_count)
519 {
520  struct vdpau_render_state *render, *last, *next;
521  int i;
522 
523  if (!s->current_picture_ptr) return;
524 
525  render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
526  assert(render);
527 
528  /* fill VdpPictureInfoMPEG1Or2 struct */
529  render->info.mpeg.picture_structure = s->picture_structure;
530  render->info.mpeg.picture_coding_type = s->pict_type;
531  render->info.mpeg.intra_dc_precision = s->intra_dc_precision;
532  render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct;
533  render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
534  render->info.mpeg.intra_vlc_format = s->intra_vlc_format;
535  render->info.mpeg.alternate_scan = s->alternate_scan;
536  render->info.mpeg.q_scale_type = s->q_scale_type;
537  render->info.mpeg.top_field_first = s->top_field_first;
538  render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2
539  render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2
540  render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
541  render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1];
542  render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0];
543  render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1];
544  for (i = 0; i < 64; ++i) {
545  render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i];
546  render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
547  }
548 
549  render->info.mpeg.forward_reference = VDP_INVALID_HANDLE;
550  render->info.mpeg.backward_reference = VDP_INVALID_HANDLE;
551 
552  switch(s->pict_type){
553  case AV_PICTURE_TYPE_B:
554  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
555  assert(next);
556  render->info.mpeg.backward_reference = next->surface;
557  // no return here, going to set forward prediction
558  case AV_PICTURE_TYPE_P:
559  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
560  if (!last) // FIXME: Does this test make sense?
561  last = render; // predict second field from the first
562  render->info.mpeg.forward_reference = last->surface;
563  }
564 
565  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
566 
567  render->info.mpeg.slice_count = slice_count;
568 
569  if (slice_count)
571  render->bitstream_buffers_used = 0;
572 }
573 #endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */
574 
575 #if CONFIG_VC1_VDPAU_DECODER
577  int buf_size)
578 {
579  VC1Context *v = s->avctx->priv_data;
580  struct vdpau_render_state *render, *last, *next;
581 
582  render = (struct vdpau_render_state *)s->current_picture.f->data[0];
583  assert(render);
584 
585  /* fill LvPictureInfoVC1 struct */
586  render->info.vc1.frame_coding_mode = v->fcm ? v->fcm + 1 : 0;
587  render->info.vc1.postprocflag = v->postprocflag;
588  render->info.vc1.pulldown = v->broadcast;
589  render->info.vc1.interlace = v->interlace;
590  render->info.vc1.tfcntrflag = v->tfcntrflag;
591  render->info.vc1.finterpflag = v->finterpflag;
592  render->info.vc1.psf = v->psf;
593  render->info.vc1.dquant = v->dquant;
594  render->info.vc1.panscan_flag = v->panscanflag;
595  render->info.vc1.refdist_flag = v->refdist_flag;
596  render->info.vc1.quantizer = v->quantizer_mode;
597  render->info.vc1.extended_mv = v->extended_mv;
598  render->info.vc1.extended_dmv = v->extended_dmv;
599  render->info.vc1.overlap = v->overlap;
600  render->info.vc1.vstransform = v->vstransform;
601  render->info.vc1.loopfilter = v->s.loop_filter;
602  render->info.vc1.fastuvmc = v->fastuvmc;
603  render->info.vc1.range_mapy_flag = v->range_mapy_flag;
604  render->info.vc1.range_mapy = v->range_mapy;
605  render->info.vc1.range_mapuv_flag = v->range_mapuv_flag;
606  render->info.vc1.range_mapuv = v->range_mapuv;
607  /* Specific to simple/main profile only */
608  render->info.vc1.multires = v->multires;
609  render->info.vc1.syncmarker = v->resync_marker;
610  render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1);
611  render->info.vc1.maxbframes = v->s.max_b_frames;
612 
613  render->info.vc1.deblockEnable = v->postprocflag & 1;
614  render->info.vc1.pquant = v->pq;
615 
616  render->info.vc1.forward_reference = VDP_INVALID_HANDLE;
617  render->info.vc1.backward_reference = VDP_INVALID_HANDLE;
618 
619  if (v->bi_type)
620  render->info.vc1.picture_type = 4;
621  else
622  render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
623 
624  switch(s->pict_type){
625  case AV_PICTURE_TYPE_B:
626  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
627  assert(next);
628  render->info.vc1.backward_reference = next->surface;
629  // no break here, going to set forward prediction
630  case AV_PICTURE_TYPE_P:
631  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
632  if (!last) // FIXME: Does this test make sense?
633  last = render; // predict second field from the first
634  render->info.vc1.forward_reference = last->surface;
635  }
636 
637  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
638 
639  render->info.vc1.slice_count = 1;
640 
642  render->bitstream_buffers_used = 0;
643 }
644 #endif /* (CONFIG_VC1_VDPAU_DECODER */
645 
646 #if CONFIG_MPEG4_VDPAU_DECODER
648  int buf_size)
649 {
650  MpegEncContext *s = &ctx->m;
651  struct vdpau_render_state *render, *last, *next;
652  int i;
653 
654  if (!s->current_picture_ptr) return;
655 
656  render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
657  assert(render);
658 
659  /* fill VdpPictureInfoMPEG4Part2 struct */
660  render->info.mpeg4.trd[0] = s->pp_time;
661  render->info.mpeg4.trb[0] = s->pb_time;
662  render->info.mpeg4.trd[1] = s->pp_field_time >> 1;
663  render->info.mpeg4.trb[1] = s->pb_field_time >> 1;
664  render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den;
665  render->info.mpeg4.vop_coding_type = 0;
666  render->info.mpeg4.vop_fcode_forward = s->f_code;
667  render->info.mpeg4.vop_fcode_backward = s->b_code;
668  render->info.mpeg4.resync_marker_disable = !ctx->resync_marker;
669  render->info.mpeg4.interlaced = !s->progressive_sequence;
670  render->info.mpeg4.quant_type = s->mpeg_quant;
671  render->info.mpeg4.quarter_sample = s->quarter_sample;
672  render->info.mpeg4.short_video_header = s->avctx->codec->id == AV_CODEC_ID_H263;
673  render->info.mpeg4.rounding_control = s->no_rounding;
674  render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan;
675  render->info.mpeg4.top_field_first = s->top_field_first;
676  for (i = 0; i < 64; ++i) {
677  render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i];
678  render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
679  }
680  render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE;
681  render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE;
682 
683  switch (s->pict_type) {
684  case AV_PICTURE_TYPE_B:
685  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
686  assert(next);
687  render->info.mpeg4.backward_reference = next->surface;
688  render->info.mpeg4.vop_coding_type = 2;
689  // no break here, going to set forward prediction
690  case AV_PICTURE_TYPE_P:
691  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
692  assert(last);
693  render->info.mpeg4.forward_reference = last->surface;
694  }
695 
696  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
697 
699  render->bitstream_buffers_used = 0;
700 }
701 #endif /* CONFIG_MPEG4_VDPAU_DECODER */
702 #endif /* FF_API_VDPAU */
703 
704 #if FF_API_VDPAU_PROFILE
705 int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
706 {
707 #define PROFILE(prof) \
708 do { \
709  *profile = VDP_DECODER_PROFILE_##prof; \
710  return 0; \
711 } while (0)
712 
713  switch (avctx->codec_id) {
714  case AV_CODEC_ID_MPEG1VIDEO: PROFILE(MPEG1);
716  switch (avctx->profile) {
717  case FF_PROFILE_MPEG2_MAIN: PROFILE(MPEG2_MAIN);
718  case FF_PROFILE_MPEG2_SIMPLE: PROFILE(MPEG2_SIMPLE);
719  default: return AVERROR(EINVAL);
720  }
721  case AV_CODEC_ID_H263: PROFILE(MPEG4_PART2_ASP);
722  case AV_CODEC_ID_MPEG4:
723  switch (avctx->profile) {
724  case FF_PROFILE_MPEG4_SIMPLE: PROFILE(MPEG4_PART2_SP);
725  case FF_PROFILE_MPEG4_ADVANCED_SIMPLE: PROFILE(MPEG4_PART2_ASP);
726  default: return AVERROR(EINVAL);
727  }
728  case AV_CODEC_ID_H264:
729  switch (avctx->profile & ~FF_PROFILE_H264_INTRA) {
730  case FF_PROFILE_H264_BASELINE: PROFILE(H264_BASELINE);
732  case FF_PROFILE_H264_MAIN: PROFILE(H264_MAIN);
733  case FF_PROFILE_H264_HIGH: PROFILE(H264_HIGH);
734 #ifdef VDP_DECODER_PROFILE_H264_EXTENDED
735  case FF_PROFILE_H264_EXTENDED: PROFILE(H264_EXTENDED);
736 #endif
737  default: return AVERROR(EINVAL);
738  }
739  case AV_CODEC_ID_WMV3:
740  case AV_CODEC_ID_VC1:
741  switch (avctx->profile) {
742  case FF_PROFILE_VC1_SIMPLE: PROFILE(VC1_SIMPLE);
743  case FF_PROFILE_VC1_MAIN: PROFILE(VC1_MAIN);
744  case FF_PROFILE_VC1_ADVANCED: PROFILE(VC1_ADVANCED);
745  default: return AVERROR(EINVAL);
746  }
747  }
748  return AVERROR(EINVAL);
749 #undef PROFILE
750 }
751 #endif /* FF_API_VDPAU_PROFILE */
752 
754 {
755  return av_mallocz(sizeof(AVVDPAUContext));
756 }
757 
758 int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,
759  VdpGetProcAddress *get_proc, unsigned flags)
760 {
761  VDPAUHWContext *hwctx;
762 
764  return AVERROR(EINVAL);
765 
766  if (av_reallocp(&avctx->hwaccel_context, sizeof(*hwctx)))
767  return AVERROR(ENOMEM);
768 
769  hwctx = avctx->hwaccel_context;
770 
771  memset(hwctx, 0, sizeof(*hwctx));
772  hwctx->context.decoder = VDP_INVALID_HANDLE;
773  hwctx->device = device;
774  hwctx->get_proc_address = get_proc;
775  hwctx->flags = flags;
776  hwctx->reset = 1;
777  return 0;
778 }
779 
780 /* @}*/
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:3187
#define FF_PROFILE_MPEG4_SIMPLE
Definition: avcodec.h:3204
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1658
const char const char void * val
Definition: avisynth_c.h:634
void ff_vdpau_h264_picture_complete(H264Context *h)
int long_ref
1->long term reference 0->short term reference
Definition: h264.h:289
const char * s
Definition: avisynth_c.h:631
H264POCContext poc
Definition: h264.h:578
The VC1 Context.
Definition: vc1.h:173
#define FF_PROFILE_MPEG2_MAIN
Definition: avcodec.h:3179
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3779
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1851
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int weighted_bipred_idc
Definition: h264.h:208
int chroma_qp_index_offset[2]
Definition: h264.h:211
int resync_marker
could this stream contain resync markers
Definition: mpeg4video.h:82
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vdpau.c:273
int extended_mv
Ext MV in P/B (not in Simple)
Definition: vc1.h:223
int broadcast
TFF/RFF present.
Definition: vc1.h:200
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:3183
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:206
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:302
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264.c:102
int frame_mbs_only_flag
Definition: h264.h:153
VdpPictureInfoMPEG1Or2 mpeg
Definition: vdpau.h:63
attribute_deprecated VdpBitstreamBuffer * bitstream_buffers
Table of bitstream buffers.
Definition: vdpau.h:137
H264Context.
Definition: h264.h:456
AVFrame * f
Definition: h264.h:264
Public libavcodec VDPAU header.
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:222
H264Picture * long_ref[32]
Definition: h264.h:592
int profile
profile
Definition: avcodec.h:3153
int picture_structure
Definition: h264.h:528
AVVDPAUContext * av_vdpau_alloc_context(void)
Allocate an AVVDPAUContext.
Definition: vdpau.c:753
AVVDPAUContext * av_alloc_vdpaucontext(void)
allocation function for AVVDPAUContext
Definition: vdpau.c:75
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1786
int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, uint32_t *width, uint32_t *height)
Gets the parameters to create an adequate VDPAU video surface for the codec context using VDPAU hardw...
Definition: vdpau.c:82
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2968
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2732
uint8_t scaling_matrix4[6][16]
Definition: h264.h:216
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264.h:212
int bi_type
Definition: vc1.h:381
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:3185
const PPS * pps
Definition: h264.h:236
int ff_vdpau_common_uninit(AVCodecContext *avctx)
Definition: vdpau.c:235
uint8_t
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:2980
int panscanflag
NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present.
Definition: vc1.h:203
int interlace
Progressive/interlaced (RPTFTM syntax element)
Definition: vc1.h:201
void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *s, const uint8_t *buf, int buf_size)
int cabac
entropy_coding_mode_flag
Definition: h264.h:202
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
int full_pel[2]
Definition: mpegvideo.h:479
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:177
int intra_dc_precision
Definition: mpegvideo.h:460
static AVFrame * frame
attribute_deprecated int bitstream_buffers_used
Useful bitstream buffers in the bitstream buffers table.
Definition: vdpau.h:128
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int refdist_flag
REFDIST syntax element present in II, IP, PI or PP field picture headers.
Definition: vc1.h:204
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:75
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264.h:214
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:3188
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:390
ptrdiff_t size
Definition: opengl_enc.c:101
#define av_log(a,...)
void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, int buf_size, int slice_count)
int psf
Progressive Segmented Frame.
Definition: vc1.h:211
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
Get a decoder profile that should be used for initializing a VDPAU decoder.
Definition: vdpau.c:705
MpegEncContext m
Definition: mpeg4video.h:66
H.264 / AVC / MPEG-4 part10 codec.
attribute_deprecated union AVVDPAUPictureInfo info
VDPAU picture information.
Definition: vdpau.h:112
enum AVCodecID id
Definition: avcodec.h:3556
#define AV_HWACCEL_FLAG_IGNORE_LEVEL
Hardware acceleration should be used for decoding even if the codec level used is unknown or higher t...
Definition: avcodec.h:3804
#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH
Hardware acceleration can output YUV pixel formats with a different chroma sampling than 4:2:0 and/or...
Definition: avcodec.h:3810
#define PROFILE(prof)
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264.h:154
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
Definition: vdpau.c:122
VdpBitstreamBuffer * bitstream_buffers
The user is responsible for freeing this buffer using av_freep().
Definition: vdpau.h:247
int overlap
overlapped transforms in use
Definition: vc1.h:226
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given block if it is not large enough, otherwise do nothing.
Definition: mem.c:480
This structure is used to share data between the libavcodec library and the client video application...
Definition: vdpau.h:90
int poc_type
pic_order_cnt_type
Definition: h264.h:143
int constrained_intra_pred
constrained_intra_pred_flag
Definition: h264.h:213
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:87
#define AVERROR(e)
Definition: error.h:43
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:3189
simple assert() macros that are a bit more flexible than ISO C assert().
int weighted_pred
weighted_pred_flag
Definition: h264.h:207
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:399
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264.h:284
int resync_marker
could this stream contain resync markers
Definition: vc1.h:396
int postprocflag
Per-frame processing suggestion flag present.
Definition: vc1.h:199
int delta_pic_order_always_zero_flag
Definition: h264.h:145
attribute_deprecated int bitstream_buffers_allocated
Allocated size of the bitstream_buffers table.
Definition: vdpau.h:120
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
uint8_t scaling_matrix8[6][64]
Definition: h264.h:217
int refs
number of reference frames
Definition: avcodec.h:2329
int intra_vlc_format
Definition: mpegvideo.h:466
void ff_vdpau_h264_picture_start(H264Context *h)
union AVVDPAUPictureInfo info
picture parameter information for all supported codecs
Definition: vdpau.h:240
int ref_frame_count
num_ref_frames
Definition: h264.h:149
int top_field_first
Definition: mpegvideo.h:462
int reference
Definition: h264.h:295
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
int tfcntrflag
TFCNTR present.
Definition: vc1.h:202
#define width
#define FF_PROFILE_VC1_MAIN
Definition: avcodec.h:3200
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:181
Picture.
Definition: mpegpicture.h:45
int alternate_scan
Definition: mpegvideo.h:467
void * hwaccel_picture_private
Hardware accelerator private data.
Definition: mpegpicture.h:77
static int vdpau_error(VdpStatus status)
Definition: vdpau.c:51
static struct ResampleContext * create(struct ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta, double precision, int cheby, int exact_rational)
Definition: soxr_resample.c:32
AVFormatContext * ctx
Definition: movenc.c:48
int init_qp
pic_init_qp_minus26 + 26
Definition: h264.h:209
H264SliceContext * slice_ctx
Definition: h264.h:470
int direct_8x8_inference_flag
Definition: h264.h:155
uint8_t range_mapuv_flag
Definition: vc1.h:329
int mpeg_f_code[2][2]
Definition: mpegvideo.h:454
#define FF_PROFILE_VC1_SIMPLE
Definition: avcodec.h:3199
VdpPictureInfoMPEG4Part2 mpeg4
Definition: vdpau.h:65
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:194
int pic_order_present
pic_order_present_flag
Definition: h264.h:203
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
Definition: vc1.h:189
int frame_pred_frame_dct
Definition: mpegvideo.h:461
static void destroy(struct ResampleContext **c)
Definition: soxr_resample.c:64
int finterpflag
INTERPFRM present.
Definition: vc1.h:228
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
AVCodecContext * avctx
Definition: h264.h:458
int concealment_motion_vectors
Definition: mpegvideo.h:463
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:1666
H264Picture * short_ref[32]
Definition: h264.h:591
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
int multires
frame-level RESPIC syntax element present
Definition: vc1.h:186
int field_poc[2]
top/bottom POC
Definition: h264.h:282
main external API structure.
Definition: avcodec.h:1649
#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE
Definition: avcodec.h:3219
int bitstream_buffers_used
Definition: vdpau.h:245
uint8_t range_mapy
Definition: vc1.h:330
void ff_vdpau_h264_set_reference_frames(H264Context *h)
int extended_dmv
Additional extended dmv range at P/B-frame-level.
Definition: vc1.h:205
void * buf
Definition: avisynth_c.h:553
GLint GLenum type
Definition: opengl_enc.c:105
int progressive_sequence
Definition: mpegvideo.h:453
int bitstream_buffers_allocated
Describe size/location of the compressed video data.
Definition: vdpau.h:244
int coded_height
Definition: avcodec.h:1851
#define H264_RF_COUNT
struct AVFrame * f
Definition: mpegpicture.h:46
int(* func)(AVBPrint *dst, const char *in, const char *arg)
Definition: jacosubdec.c:67
H264Picture * cur_pic_ptr
Definition: h264.h:466
VdpDecoderRender * render
VDPAU decoder render callback.
Definition: vdpau.h:103
int quantizer_mode
2 bits, quantizer mode used for sequence, see QUANT_*
Definition: vc1.h:227
int f_code
forward MV resolution
Definition: mpegvideo.h:235
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:144
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:112
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:209
int(* AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, const VdpPictureInfo *, uint32_t, const VdpBitstreamBuffer *)
Definition: vdpau.h:72
mfxU16 profile
Definition: qsvenc.c:42
int vstransform
variable-size [48]x[48] transform type + info
Definition: vc1.h:225
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264.h:215
static int flags
Definition: cpu.c:47
uint8_t range_mapuv
Definition: vc1.h:331
uint16_t pb_field_time
like above, just for interlaced
Definition: mpegvideo.h:393
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, VdpGetProcAddress *get_proc, unsigned flags)
Associate a VDPAU device with a codec context for hardware acceleration.
Definition: vdpau.c:758
uint8_t level
Definition: svq3.c:193
MpegEncContext s
Definition: vc1.h:174
MpegEncContext.
Definition: mpegvideo.h:78
struct AVCodecContext * avctx
Definition: mpegvideo.h:95
uint16_t pp_field_time
Definition: mpegvideo.h:392
uint8_t pq
Definition: vc1.h:238
int pic_id
pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num) ...
Definition: h264.h:287
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
AVVDPAU_Render2 render2
Definition: vdpau.h:139
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:282
VdpPictureInfoH264 h264
Definition: vdpau.h:62
This structure is used as a callback between the FFmpeg decoder (vd_) and presentation (vo_) module...
Definition: vdpau.h:234
GLuint * buffers
Definition: opengl_enc.c:99
VdpPictureInfoVC1 vc1
Definition: vdpau.h:64
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
Definition: vdpau.c:346
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:308
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:142
int av_reallocp(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:187
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3787
H264ParamSets ps
Definition: h264.h:574
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:162
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:159
Bi-dir predicted.
Definition: avutil.h:268
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:76
int den
denominator
Definition: rational.h:45
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
Definition: vdpau.c:283
void * priv_data
Definition: avcodec.h:1691
#define FF_PROFILE_VC1_ADVANCED
Definition: avcodec.h:3202
#define PICT_FRAME
Definition: mpegutils.h:39
int picture_structure
Definition: mpegvideo.h:457
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1699
#define FF_PROFILE_MPEG2_SIMPLE
Definition: avcodec.h:3180
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:165
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264.h:615
SPS * sps
Definition: h264.h:238
uint8_t range_mapy_flag
Definition: vc1.h:328
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
#define av_freep(p)
int dquant
How qscale varies with MBs, 2 bits (not in Simple)
Definition: vc1.h:224
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:3186
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:236
void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, int buf_size)
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
static int ff_vdpau_common_reinit(AVCodecContext *avctx)
Definition: vdpau.c:258
VdpVideoSurface surface
Used as rendered surface, never changed.
Definition: vdpau.h:235
Predicted.
Definition: avutil.h:267
GLuint buffer
Definition: opengl_enc.c:102
#define av_unused
Definition: attributes.h:126
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:391
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
int short_ref_count
number of actual short term references
Definition: h264.h:606