FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "config_components.h"
25 #include "videotoolbox.h"
27 #include "vt_internal.h"
28 #include "libavutil/avutil.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/pixdesc.h"
31 #include "bytestream.h"
32 #include "decode.h"
33 #include "internal.h"
34 #include "h264dec.h"
35 #include "hevcdec.h"
36 #include "hwaccel_internal.h"
37 #include "mpegvideo.h"
38 #include "proresdec.h"
39 #include <Availability.h>
40 #include <AvailabilityMacros.h>
41 #include <TargetConditionals.h>
42 
43 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
44 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
45 #endif
46 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
47 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
48 #endif
49 
50 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
51 enum { kCMVideoCodecType_HEVC = 'hvc1' };
52 #endif
53 
54 #if !HAVE_KCMVIDEOCODECTYPE_VP9
55 enum { kCMVideoCodecType_VP9 = 'vp09' };
56 #endif
57 
58 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
59 
60 typedef struct VTHWFrame {
61  CVPixelBufferRef pixbuf;
63 } VTHWFrame;
64 
65 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
66 {
68  av_buffer_unref(&ref->hw_frames_ctx);
69  CVPixelBufferRelease(ref->pixbuf);
70 
71  av_free(data);
72 }
73 
75  const uint8_t *buffer,
76  uint32_t size)
77 {
78  void *tmp;
79 
80  tmp = av_fast_realloc(vtctx->bitstream,
81  &vtctx->allocated_size,
82  size);
83 
84  if (!tmp)
85  return AVERROR(ENOMEM);
86 
87  vtctx->bitstream = tmp;
88  memcpy(vtctx->bitstream, buffer, size);
89  vtctx->bitstream_size = size;
90 
91  return 0;
92 }
93 
94 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
95 {
96  int ret;
97  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
98 
99  if (!ref->pixbuf) {
100  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
102  return AVERROR_EXTERNAL;
103  }
104 
105  frame->crop_right = 0;
106  frame->crop_left = 0;
107  frame->crop_top = 0;
108  frame->crop_bottom = 0;
109 
110  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
111  return ret;
112 
113  frame->data[3] = (uint8_t*)ref->pixbuf;
114 
115  if (ref->hw_frames_ctx) {
117  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
118  if (!frame->hw_frames_ctx)
119  return AVERROR(ENOMEM);
120  }
121 
122  return 0;
123 }
124 
126 {
127  size_t size = sizeof(VTHWFrame);
128  uint8_t *data = NULL;
129  AVBufferRef *buf = NULL;
131  FrameDecodeData *fdd;
132  if (ret < 0)
133  return ret;
134 
135  data = av_mallocz(size);
136  if (!data)
137  return AVERROR(ENOMEM);
139  if (!buf) {
140  av_freep(&data);
141  return AVERROR(ENOMEM);
142  }
143  frame->buf[0] = buf;
144 
147 
148  frame->width = avctx->width;
149  frame->height = avctx->height;
150  frame->format = avctx->pix_fmt;
151 
152  return 0;
153 }
154 
155 #define AV_W8(p, v) *(p) = (v)
156 
157 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
158 {
159  int i;
160  int size = src_size;
161  uint8_t* p = dst;
162 
163  for (i = 0; i < src_size; i++) {
164  if (i + 2 < src_size &&
165  src[i] == 0x00 &&
166  src[i + 1] == 0x00 &&
167  src[i + 2] <= 0x03) {
168  if (dst) {
169  *p++ = src[i++];
170  *p++ = src[i];
171  *p++ = 0x03;
172  } else {
173  i++;
174  }
175  size++;
176  } else if (dst)
177  *p++ = src[i];
178  }
179 
180  if (dst)
181  av_assert0((p - dst) == size);
182 
183  return size;
184 }
185 
187 {
188  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
189  H264Context *h = avctx->priv_data;
190  CFDataRef data = NULL;
191  uint8_t *p;
192  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
193  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
194  int vt_extradata_size;
195  uint8_t *vt_extradata;
196 
197  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
198  vt_extradata = av_malloc(vt_extradata_size);
199 
200  if (!vt_extradata)
201  return NULL;
202 
203  p = vt_extradata;
204 
205  AV_W8(p + 0, 1); /* version */
206  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
207  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
208  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
209  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
210  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
211  AV_WB16(p + 6, sps_size);
212  p += 8;
213  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
214  AV_W8(p + 0, 1); /* number of pps */
215  AV_WB16(p + 1, pps_size);
216  p += 3;
217  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
218 
219  av_assert0(p - vt_extradata == vt_extradata_size);
220 
221  // save sps header (profile/level) used to create decoder session,
222  // so we can detect changes and recreate it.
223  if (vtctx)
224  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
225 
226  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
227  av_free(vt_extradata);
228  return data;
229 }
230 
232 {
233  HEVCContext *h = avctx->priv_data;
234  int i, num_vps = 0, num_sps = 0, num_pps = 0;
235  const HEVCVPS *vps = h->ps.vps;
236  const HEVCSPS *sps = h->ps.sps;
237  const HEVCPPS *pps = h->ps.pps;
238  PTLCommon ptlc = vps->ptl.general_ptl;
239  VUI vui = sps->vui;
240  uint8_t parallelismType;
241  CFDataRef data = NULL;
242  uint8_t *p;
243  int vt_extradata_size = 23 + 3 + 3 + 3;
244  uint8_t *vt_extradata;
245 
246 #define COUNT_SIZE_PS(T, t) \
247  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
248  if (h->ps.t##ps_list[i]) { \
249  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
250  vt_extradata_size += 2 + escape_ps(NULL, lps->data, lps->data_size); \
251  num_##t##ps++; \
252  } \
253  }
254 
255  COUNT_SIZE_PS(V, v)
256  COUNT_SIZE_PS(S, s)
257  COUNT_SIZE_PS(P, p)
258 
259  vt_extradata = av_malloc(vt_extradata_size);
260  if (!vt_extradata)
261  return NULL;
262  p = vt_extradata;
263 
264  /* unsigned int(8) configurationVersion = 1; */
265  AV_W8(p + 0, 1);
266 
267  /*
268  * unsigned int(2) general_profile_space;
269  * unsigned int(1) general_tier_flag;
270  * unsigned int(5) general_profile_idc;
271  */
272  AV_W8(p + 1, ptlc.profile_space << 6 |
273  ptlc.tier_flag << 5 |
274  ptlc.profile_idc);
275 
276  /* unsigned int(32) general_profile_compatibility_flags; */
277  for (i = 0; i < 4; i++) {
278  AV_W8(p + 2 + i, ptlc.profile_compatibility_flag[i * 8] << 7 |
279  ptlc.profile_compatibility_flag[i * 8 + 1] << 6 |
280  ptlc.profile_compatibility_flag[i * 8 + 2] << 5 |
281  ptlc.profile_compatibility_flag[i * 8 + 3] << 4 |
282  ptlc.profile_compatibility_flag[i * 8 + 4] << 3 |
283  ptlc.profile_compatibility_flag[i * 8 + 5] << 2 |
284  ptlc.profile_compatibility_flag[i * 8 + 6] << 1 |
285  ptlc.profile_compatibility_flag[i * 8 + 7]);
286  }
287 
288  /* unsigned int(48) general_constraint_indicator_flags; */
289  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
290  ptlc.interlaced_source_flag << 6 |
291  ptlc.non_packed_constraint_flag << 5 |
292  ptlc.frame_only_constraint_flag << 4);
293  AV_W8(p + 7, 0);
294  AV_WN32(p + 8, 0);
295 
296  /* unsigned int(8) general_level_idc; */
297  AV_W8(p + 12, ptlc.level_idc);
298 
299  /*
300  * bit(4) reserved = ‘1111’b;
301  * unsigned int(12) min_spatial_segmentation_idc;
302  */
303  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
304  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
305 
306  /*
307  * bit(6) reserved = ‘111111’b;
308  * unsigned int(2) parallelismType;
309  */
311  parallelismType = 0;
312  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
313  parallelismType = 0;
314  else if (pps->entropy_coding_sync_enabled_flag)
315  parallelismType = 3;
316  else if (pps->tiles_enabled_flag)
317  parallelismType = 2;
318  else
319  parallelismType = 1;
320  AV_W8(p + 15, 0xfc | parallelismType);
321 
322  /*
323  * bit(6) reserved = ‘111111’b;
324  * unsigned int(2) chromaFormat;
325  */
326  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
327 
328  /*
329  * bit(5) reserved = ‘11111’b;
330  * unsigned int(3) bitDepthLumaMinus8;
331  */
332  AV_W8(p + 17, (sps->bit_depth - 8) | 0xf8);
333 
334  /*
335  * bit(5) reserved = ‘11111’b;
336  * unsigned int(3) bitDepthChromaMinus8;
337  */
338  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xf8);
339 
340  /* bit(16) avgFrameRate; */
341  AV_WB16(p + 19, 0);
342 
343  /*
344  * bit(2) constantFrameRate;
345  * bit(3) numTemporalLayers;
346  * bit(1) temporalIdNested;
347  * unsigned int(2) lengthSizeMinusOne;
348  */
349  AV_W8(p + 21, 0 << 6 |
350  sps->max_sub_layers << 3 |
351  sps->temporal_id_nesting_flag << 2 |
352  3);
353 
354  /* unsigned int(8) numOfArrays; */
355  AV_W8(p + 22, 3);
356 
357  p += 23;
358 
359 #define APPEND_PS(T, t) \
360  /* \
361  * bit(1) array_completeness; \
362  * unsigned int(1) reserved = 0; \
363  * unsigned int(6) NAL_unit_type; \
364  */ \
365  AV_W8(p, 1 << 7 | \
366  HEVC_NAL_##T##PS & 0x3f); \
367  /* unsigned int(16) numNalus; */ \
368  AV_WB16(p + 1, num_##t##ps); \
369  p += 3; \
370  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
371  if (h->ps.t##ps_list[i]) { \
372  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
373  int size = escape_ps(p + 2, lps->data, lps->data_size); \
374  /* unsigned int(16) nalUnitLength; */ \
375  AV_WB16(p, size); \
376  /* bit(8*nalUnitLength) nalUnit; */ \
377  p += 2 + size; \
378  } \
379  }
380 
381  APPEND_PS(V, v)
382  APPEND_PS(S, s)
383  APPEND_PS(P, p)
384 
385  av_assert0(p - vt_extradata == vt_extradata_size);
386 
387  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
388  av_free(vt_extradata);
389  return data;
390 }
391 
393  const uint8_t *buffer,
394  uint32_t size)
395 {
396  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
397  H264Context *h = avctx->priv_data;
398 
399  if (h->is_avc == 1) {
400  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
401  }
402 
403  return 0;
404 }
405 
407  int type,
408  const uint8_t *buffer,
409  uint32_t size)
410 {
411  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
412  H264Context *h = avctx->priv_data;
413 
414  // save sps header (profile/level) used to create decoder session
415  if (!vtctx->sps[0])
416  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
417 
418  if (type == H264_NAL_SPS) {
419  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
420  vtctx->reconfig_needed = true;
421  memcpy(vtctx->sps, buffer + 1, 3);
422  }
423  }
424 
425  // pass-through SPS/PPS changes to the decoder
427 }
428 
430  const uint8_t *buffer,
431  uint32_t size)
432 {
433  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
434  void *tmp;
435 
436  tmp = av_fast_realloc(vtctx->bitstream,
437  &vtctx->allocated_size,
438  vtctx->bitstream_size+size+4);
439  if (!tmp)
440  return AVERROR(ENOMEM);
441 
442  vtctx->bitstream = tmp;
443 
444  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
445  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
446 
447  vtctx->bitstream_size += size + 4;
448 
449  return 0;
450 }
451 
453  const uint8_t *buffer,
454  uint32_t size)
455 {
456  H264Context *h = avctx->priv_data;
457 
458  if (h->is_avc == 1)
459  return 0;
460 
462 }
463 
464 #if CONFIG_VIDEOTOOLBOX
465 // Return the AVVideotoolboxContext that matters currently. Where it comes from
466 // depends on the API used.
467 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
468 {
469  // Somewhat tricky because the user can call av_videotoolbox_default_free()
470  // at any time, even when the codec is closed.
471  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
472  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
473  if (vtctx->vt_ctx)
474  return vtctx->vt_ctx;
475  }
476  return avctx->hwaccel_context;
477 }
478 
479 static void videotoolbox_stop(AVCodecContext *avctx)
480 {
481  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
482  if (!videotoolbox)
483  return;
484 
485  if (videotoolbox->cm_fmt_desc) {
486  CFRelease(videotoolbox->cm_fmt_desc);
487  videotoolbox->cm_fmt_desc = NULL;
488  }
489 
490  if (videotoolbox->session) {
491  VTDecompressionSessionInvalidate(videotoolbox->session);
492  CFRelease(videotoolbox->session);
493  videotoolbox->session = NULL;
494  }
495 }
496 
498 {
499  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
500  if (!vtctx)
501  return 0;
502 
503  av_freep(&vtctx->bitstream);
504  if (vtctx->frame)
505  CVPixelBufferRelease(vtctx->frame);
506 
507  if (vtctx->vt_ctx)
508  videotoolbox_stop(avctx);
509 
511  av_freep(&vtctx->vt_ctx);
512 
513  return 0;
514 }
515 
516 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
517 {
518  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
519  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
520  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
521  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
522  int width = CVPixelBufferGetWidth(pixbuf);
523  int height = CVPixelBufferGetHeight(pixbuf);
524  AVHWFramesContext *cached_frames;
525  VTHWFrame *ref;
526  int ret;
527 
528  if (!frame->buf[0] || frame->data[3]) {
529  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
531  return AVERROR_EXTERNAL;
532  }
533 
534  ref = (VTHWFrame *)frame->buf[0]->data;
535 
536  if (ref->pixbuf)
537  CVPixelBufferRelease(ref->pixbuf);
538  ref->pixbuf = vtctx->frame;
539  vtctx->frame = NULL;
540 
541  // Old API code path.
542  if (!vtctx->cached_hw_frames_ctx)
543  return 0;
544 
545  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
546 
547  if (cached_frames->sw_format != sw_format ||
548  cached_frames->width != width ||
549  cached_frames->height != height) {
550  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
551  AVHWFramesContext *hw_frames;
552  AVVTFramesContext *hw_ctx;
553  if (!hw_frames_ctx)
554  return AVERROR(ENOMEM);
555 
556  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
557  hw_frames->format = cached_frames->format;
558  hw_frames->sw_format = sw_format;
559  hw_frames->width = width;
560  hw_frames->height = height;
561  hw_ctx = hw_frames->hwctx;
562  hw_ctx->color_range = avctx->color_range;
563 
564  ret = av_hwframe_ctx_init(hw_frames_ctx);
565  if (ret < 0) {
566  av_buffer_unref(&hw_frames_ctx);
567  return ret;
568  }
569 
571  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
572  }
573 
574  av_buffer_unref(&ref->hw_frames_ctx);
575  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
576  if (!ref->hw_frames_ctx)
577  return AVERROR(ENOMEM);
578 
579  return 0;
580 }
581 
582 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
583 {
584  int i;
585  uint8_t b;
586 
587  for (i = 3; i >= 0; i--) {
588  b = (length >> (i * 7)) & 0x7F;
589  if (i != 0)
590  b |= 0x80;
591 
592  bytestream2_put_byteu(pb, b);
593  }
594 }
595 
596 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
597 {
598  CFDataRef data;
599  uint8_t *rw_extradata;
600  PutByteContext pb;
601  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
602  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
603  int config_size = 13 + 5 + avctx->extradata_size;
604  int s;
605 
606  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
607  return NULL;
608 
609  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
610  bytestream2_put_byteu(&pb, 0); // version
611  bytestream2_put_ne24(&pb, 0); // flags
612 
613  // elementary stream descriptor
614  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
615  videotoolbox_write_mp4_descr_length(&pb, full_size);
616  bytestream2_put_ne16(&pb, 0); // esid
617  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
618 
619  // decoder configuration descriptor
620  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
621  videotoolbox_write_mp4_descr_length(&pb, config_size);
622  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
623  bytestream2_put_byteu(&pb, 0x11); // stream type
624  bytestream2_put_ne24(&pb, 0); // buffer size
625  bytestream2_put_ne32(&pb, 0); // max bitrate
626  bytestream2_put_ne32(&pb, 0); // avg bitrate
627 
628  // decoder specific descriptor
629  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
630  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
631 
632  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
633 
634  // SLConfigDescriptor
635  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
636  bytestream2_put_byteu(&pb, 0x01); // length
637  bytestream2_put_byteu(&pb, 0x02); //
638 
639  s = bytestream2_size_p(&pb);
640 
641  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
642 
643  av_freep(&rw_extradata);
644  return data;
645 }
646 
647 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
648  void *buffer,
649  int size)
650 {
651  OSStatus status;
652  CMBlockBufferRef block_buf;
653  CMSampleBufferRef sample_buf;
654 
655  block_buf = NULL;
656  sample_buf = NULL;
657 
658  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
659  buffer, // memoryBlock
660  size, // blockLength
661  kCFAllocatorNull, // blockAllocator
662  NULL, // customBlockSource
663  0, // offsetToData
664  size, // dataLength
665  0, // flags
666  &block_buf);
667 
668  if (!status) {
669  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
670  block_buf, // dataBuffer
671  TRUE, // dataReady
672  0, // makeDataReadyCallback
673  0, // makeDataReadyRefcon
674  fmt_desc, // formatDescription
675  1, // numSamples
676  0, // numSampleTimingEntries
677  NULL, // sampleTimingArray
678  0, // numSampleSizeEntries
679  NULL, // sampleSizeArray
680  &sample_buf);
681  }
682 
683  if (block_buf)
684  CFRelease(block_buf);
685 
686  return sample_buf;
687 }
688 
689 static void videotoolbox_decoder_callback(void *opaque,
690  void *sourceFrameRefCon,
691  OSStatus status,
692  VTDecodeInfoFlags flags,
693  CVImageBufferRef image_buffer,
694  CMTime pts,
695  CMTime duration)
696 {
697  VTContext *vtctx = opaque;
698 
699  if (vtctx->frame) {
700  CVPixelBufferRelease(vtctx->frame);
701  vtctx->frame = NULL;
702  }
703 
704  if (!image_buffer) {
706  "vt decoder cb: output image buffer is null: %i\n", status);
707  return;
708  }
709 
710  vtctx->frame = CVPixelBufferRetain(image_buffer);
711 }
712 
713 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
714 {
715  OSStatus status;
716  CMSampleBufferRef sample_buf;
717  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
718  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
719 
720  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
721  vtctx->bitstream,
722  vtctx->bitstream_size);
723 
724  if (!sample_buf)
725  return -1;
726 
727  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
728  sample_buf,
729  0, // decodeFlags
730  NULL, // sourceFrameRefCon
731  0); // infoFlagsOut
732  if (status == noErr)
733  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
734 
735  CFRelease(sample_buf);
736 
737  return status;
738 }
739 
740 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
741  CFDictionaryRef decoder_spec,
742  int width,
743  int height)
744 {
745  CMFormatDescriptionRef cm_fmt_desc;
746  OSStatus status;
747 
748  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
749  codec_type,
750  width,
751  height,
752  decoder_spec, // Dictionary of extension
753  &cm_fmt_desc);
754 
755  if (status)
756  return NULL;
757 
758  return cm_fmt_desc;
759 }
760 
761 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
762  int height,
763  OSType pix_fmt)
764 {
765  CFMutableDictionaryRef buffer_attributes;
766  CFMutableDictionaryRef io_surface_properties;
767  CFNumberRef cv_pix_fmt;
768  CFNumberRef w;
769  CFNumberRef h;
770 
771  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
772  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
773  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
774 
775  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
776  4,
777  &kCFTypeDictionaryKeyCallBacks,
778  &kCFTypeDictionaryValueCallBacks);
779  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
780  0,
781  &kCFTypeDictionaryKeyCallBacks,
782  &kCFTypeDictionaryValueCallBacks);
783 
784  if (pix_fmt)
785  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
786  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
787  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
788  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
789 #if TARGET_OS_IPHONE
790  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
791 #else
792  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
793 #endif
794 
795  CFRelease(io_surface_properties);
796  CFRelease(cv_pix_fmt);
797  CFRelease(w);
798  CFRelease(h);
799 
800  return buffer_attributes;
801 }
802 
803 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
804  AVCodecContext *avctx)
805 {
806  CFMutableDictionaryRef avc_info;
807  CFDataRef data = NULL;
808 
809  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
810  0,
811  &kCFTypeDictionaryKeyCallBacks,
812  &kCFTypeDictionaryValueCallBacks);
813 
814  CFDictionarySetValue(config_info,
818  kCFBooleanTrue);
819 
820  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
821  1,
822  &kCFTypeDictionaryKeyCallBacks,
823  &kCFTypeDictionaryValueCallBacks);
824 
825  switch (codec_type) {
826  case kCMVideoCodecType_MPEG4Video :
827  if (avctx->extradata_size)
828  data = videotoolbox_esds_extradata_create(avctx);
829  if (data)
830  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
831  break;
832  case kCMVideoCodecType_H264 :
834  if (data)
835  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
836  break;
839  if (data)
840  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
841  break;
842 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
843  case kCMVideoCodecType_VP9 :
845  if (data)
846  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
847  break;
848 #endif
849  default:
850  break;
851  }
852 
853  CFDictionarySetValue(config_info,
854  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
855  avc_info);
856 
857  if (data)
858  CFRelease(data);
859 
860  CFRelease(avc_info);
861  return config_info;
862 }
863 
864 static int videotoolbox_start(AVCodecContext *avctx)
865 {
866  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
867  OSStatus status;
868  VTDecompressionOutputCallbackRecord decoder_cb;
869  CFDictionaryRef decoder_spec;
870  CFDictionaryRef buf_attr;
871 
872  if (!videotoolbox) {
873  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
874  return -1;
875  }
876 
877  switch( avctx->codec_id ) {
878  case AV_CODEC_ID_H263 :
879  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
880  break;
881  case AV_CODEC_ID_H264 :
882  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
883  break;
884  case AV_CODEC_ID_HEVC :
885  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
886  break;
888  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
889  break;
891  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
892  break;
893  case AV_CODEC_ID_MPEG4 :
894  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
895  break;
896  case AV_CODEC_ID_PRORES :
897  switch (avctx->codec_tag) {
898  default:
899  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
900  // fall-through
901  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
902  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
903  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
904  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
905  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
906  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
907  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
908  break;
909  }
910  break;
911  case AV_CODEC_ID_VP9 :
912  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
913  break;
914  default :
915  break;
916  }
917 
918 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
919  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
920  if (__builtin_available(macOS 10.9, *)) {
921  VTRegisterProfessionalVideoWorkflowVideoDecoders();
922  }
923  }
924 #endif
925 
926 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
927  if (__builtin_available(macOS 11.0, *)) {
928  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
929  }
930 #endif
931 
932  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
933 
934  if (!decoder_spec) {
935  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
936  return -1;
937  }
938 
939  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
940  decoder_spec,
941  avctx->width,
942  avctx->height);
943  if (!videotoolbox->cm_fmt_desc) {
944  if (decoder_spec)
945  CFRelease(decoder_spec);
946 
947  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
948  return -1;
949  }
950 
951  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
952  avctx->height,
953  videotoolbox->cv_pix_fmt_type);
954 
955  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
956  decoder_cb.decompressionOutputRefCon = avctx->internal->hwaccel_priv_data;
957 
958  status = VTDecompressionSessionCreate(NULL, // allocator
959  videotoolbox->cm_fmt_desc, // videoFormatDescription
960  decoder_spec, // videoDecoderSpecification
961  buf_attr, // destinationImageBufferAttributes
962  &decoder_cb, // outputCallback
963  &videotoolbox->session); // decompressionSessionOut
964 
965  if (decoder_spec)
966  CFRelease(decoder_spec);
967  if (buf_attr)
968  CFRelease(buf_attr);
969 
970  switch (status) {
971  case kVTVideoDecoderNotAvailableNowErr:
972  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
973  return AVERROR(ENOSYS);
974  case kVTVideoDecoderUnsupportedDataFormatErr:
975  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
976  return AVERROR(ENOSYS);
977  case kVTCouldNotFindVideoDecoderErr:
978  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
979  return AVERROR(ENOSYS);
980  case kVTVideoDecoderMalfunctionErr:
981  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
982  return AVERROR(EINVAL);
983  case kVTVideoDecoderBadDataErr:
984  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
985  return AVERROR_INVALIDDATA;
986  case 0:
987  return 0;
988  default:
989  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
990  return AVERROR_UNKNOWN;
991  }
992 }
993 
994 static const char *videotoolbox_error_string(OSStatus status)
995 {
996  switch (status) {
997  case kVTVideoDecoderBadDataErr:
998  return "bad data";
999  case kVTVideoDecoderMalfunctionErr:
1000  return "decoder malfunction";
1001  case kVTInvalidSessionErr:
1002  return "invalid session";
1003  }
1004  return "unknown";
1005 }
1006 
1008 {
1009  OSStatus status;
1010  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
1011  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1012 
1013  if (vtctx->reconfig_needed == true) {
1014  vtctx->reconfig_needed = false;
1015  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1016  videotoolbox_stop(avctx);
1017  if (videotoolbox_start(avctx) != 0) {
1018  return AVERROR_EXTERNAL;
1019  }
1020  }
1021 
1022  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1023  return AVERROR_INVALIDDATA;
1024 
1025  status = videotoolbox_session_decode_frame(avctx);
1026  if (status != noErr) {
1027  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1028  vtctx->reconfig_needed = true;
1029  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1030  return AVERROR_UNKNOWN;
1031  }
1032 
1033  if (!vtctx->frame) {
1034  vtctx->reconfig_needed = true;
1035  return AVERROR_UNKNOWN;
1036  }
1037 
1038  return videotoolbox_buffer_create(avctx, frame);
1039 }
1040 
1041 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1042 {
1043  H264Context *h = avctx->priv_data;
1044  AVFrame *frame = h->cur_pic_ptr->f;
1045  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1047  vtctx->bitstream_size = 0;
1048  return ret;
1049 }
1050 
1051 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1052  const uint8_t *buffer,
1053  uint32_t size)
1054 {
1055  return 0;
1056 }
1057 
1058 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1059  const uint8_t *buffer,
1060  uint32_t size)
1061 {
1063 }
1064 
1065 
1066 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1067  int type,
1068  const uint8_t *buffer,
1069  uint32_t size)
1070 {
1072 }
1073 
1074 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1075 {
1076  HEVCContext *h = avctx->priv_data;
1077  AVFrame *frame = h->ref->frame;
1078  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1079  int ret;
1080 
1081  h->output_frame->crop_right = 0;
1082  h->output_frame->crop_left = 0;
1083  h->output_frame->crop_top = 0;
1084  h->output_frame->crop_bottom = 0;
1085 
1087  vtctx->bitstream_size = 0;
1088  return ret;
1089 }
1090 
1091 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1092  const uint8_t *buffer,
1093  uint32_t size)
1094 {
1095  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1096 
1097  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1098 }
1099 
1100 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1101  const uint8_t *buffer,
1102  uint32_t size)
1103 {
1104  return 0;
1105 }
1106 
1107 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1108 {
1109  MpegEncContext *s = avctx->priv_data;
1110  AVFrame *frame = s->current_picture_ptr->f;
1111 
1112  return ff_videotoolbox_common_end_frame(avctx, frame);
1113 }
1114 
1115 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1116  const uint8_t *buffer,
1117  uint32_t size)
1118 {
1119  return 0;
1120 }
1121 
1122 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1123  const uint8_t *buffer,
1124  uint32_t size)
1125 {
1126  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1127 
1128  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1129 }
1130 
1131 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1132 {
1133  ProresContext *ctx = avctx->priv_data;
1134  AVFrame *frame = ctx->frame;
1135 
1136  return ff_videotoolbox_common_end_frame(avctx, frame);
1137 }
1138 
1139 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1140  int depth;
1141  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1142  if (!descriptor)
1143  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1144 
1145 
1146  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1147  return AV_PIX_FMT_AYUV64;
1148 
1149  depth = descriptor->comp[0].depth;
1150 
1151 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1152  if (depth > 10)
1153  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1154 #endif
1155 
1156 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1157  if (descriptor->log2_chroma_w == 0) {
1158 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1159  if (depth <= 8)
1160  return AV_PIX_FMT_NV24;
1161 #endif
1162  return AV_PIX_FMT_P410;
1163  }
1164 #endif
1165 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1166  if (descriptor->log2_chroma_h == 0) {
1167 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1168  if (depth <= 8)
1169  return AV_PIX_FMT_NV16;
1170 #endif
1171  return AV_PIX_FMT_P210;
1172  }
1173 #endif
1174 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1175  if (depth > 8) {
1176  return AV_PIX_FMT_P010;
1177  }
1178 #endif
1179 
1180  return AV_PIX_FMT_NV12;
1181 }
1182 
1183 static AVVideotoolboxContext *videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1184  bool full_range)
1185 {
1186  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1187 
1188  if (ret) {
1189  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1190  if (cv_pix_fmt_type == 0) {
1191  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1192  }
1193  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1194  }
1195 
1196  return ret;
1197 }
1198 
1200 {
1201  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1202  AVHWFramesContext *hw_frames;
1203  AVVTFramesContext *hw_ctx;
1204  int err;
1205  bool full_range;
1206 
1207  vtctx->logctx = avctx;
1208 
1209  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx &&
1210  avctx->hwaccel_context)
1211  return videotoolbox_start(avctx);
1212 
1213  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1214  av_log(avctx, AV_LOG_ERROR,
1215  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1216  return AVERROR(EINVAL);
1217  }
1218 
1219  vtctx->vt_ctx = videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1220  if (!vtctx->vt_ctx) {
1221  err = AVERROR(ENOMEM);
1222  goto fail;
1223  }
1224 
1225  if (avctx->hw_frames_ctx) {
1226  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1227  } else {
1229  if (!avctx->hw_frames_ctx) {
1230  err = AVERROR(ENOMEM);
1231  goto fail;
1232  }
1233 
1234  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1235  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1236  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1237  hw_frames->width = avctx->width;
1238  hw_frames->height = avctx->height;
1239  hw_ctx = hw_frames->hwctx;
1240  hw_ctx->color_range = avctx->color_range;
1241 
1242  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1243  if (err < 0) {
1244  av_buffer_unref(&avctx->hw_frames_ctx);
1245  goto fail;
1246  }
1247  }
1248 
1250  if (!vtctx->cached_hw_frames_ctx) {
1251  err = AVERROR(ENOMEM);
1252  goto fail;
1253  }
1254 
1256  vtctx->vt_ctx->cv_pix_fmt_type =
1258  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1259  const AVPixFmtDescriptor *attempted_format =
1260  av_pix_fmt_desc_get(hw_frames->sw_format);
1261  av_log(avctx, AV_LOG_ERROR,
1262  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1263  "a VideoToolbox format!\n",
1264  attempted_format ? attempted_format->name : "<unknown>",
1266  err = AVERROR(EINVAL);
1267  goto fail;
1268  }
1269 
1270  err = videotoolbox_start(avctx);
1271  if (err < 0)
1272  goto fail;
1273 
1274  return 0;
1275 
1276 fail:
1277  ff_videotoolbox_uninit(avctx);
1278  return err;
1279 }
1280 
1282  AVBufferRef *hw_frames_ctx)
1283 {
1284  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1285 
1286  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1287  frames_ctx->width = avctx->coded_width;
1288  frames_ctx->height = avctx->coded_height;
1289  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1290 
1291  return 0;
1292 }
1293 
1295  .p.name = "h263_videotoolbox",
1296  .p.type = AVMEDIA_TYPE_VIDEO,
1297  .p.id = AV_CODEC_ID_H263,
1298  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1299  .alloc_frame = ff_videotoolbox_alloc_frame,
1300  .start_frame = videotoolbox_mpeg_start_frame,
1301  .decode_slice = videotoolbox_mpeg_decode_slice,
1302  .end_frame = videotoolbox_mpeg_end_frame,
1303  .frame_params = ff_videotoolbox_frame_params,
1305  .uninit = ff_videotoolbox_uninit,
1306  .priv_data_size = sizeof(VTContext),
1307 };
1308 
1310  .p.name = "hevc_videotoolbox",
1311  .p.type = AVMEDIA_TYPE_VIDEO,
1312  .p.id = AV_CODEC_ID_HEVC,
1313  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1314  .alloc_frame = ff_videotoolbox_alloc_frame,
1315  .start_frame = videotoolbox_hevc_start_frame,
1316  .decode_slice = videotoolbox_hevc_decode_slice,
1317  .decode_params = videotoolbox_hevc_decode_params,
1318  .end_frame = videotoolbox_hevc_end_frame,
1319  .frame_params = ff_videotoolbox_frame_params,
1321  .uninit = ff_videotoolbox_uninit,
1322  .priv_data_size = sizeof(VTContext),
1323 };
1324 
1326  .p.name = "h264_videotoolbox",
1327  .p.type = AVMEDIA_TYPE_VIDEO,
1328  .p.id = AV_CODEC_ID_H264,
1329  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1330  .alloc_frame = ff_videotoolbox_alloc_frame,
1331  .start_frame = ff_videotoolbox_h264_start_frame,
1332  .decode_slice = ff_videotoolbox_h264_decode_slice,
1333  .decode_params = videotoolbox_h264_decode_params,
1334  .end_frame = videotoolbox_h264_end_frame,
1335  .frame_params = ff_videotoolbox_frame_params,
1337  .uninit = ff_videotoolbox_uninit,
1338  .priv_data_size = sizeof(VTContext),
1339 };
1340 
1342  .p.name = "mpeg1_videotoolbox",
1343  .p.type = AVMEDIA_TYPE_VIDEO,
1344  .p.id = AV_CODEC_ID_MPEG1VIDEO,
1345  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1346  .alloc_frame = ff_videotoolbox_alloc_frame,
1347  .start_frame = videotoolbox_mpeg_start_frame,
1348  .decode_slice = videotoolbox_mpeg_decode_slice,
1349  .end_frame = videotoolbox_mpeg_end_frame,
1350  .frame_params = ff_videotoolbox_frame_params,
1352  .uninit = ff_videotoolbox_uninit,
1353  .priv_data_size = sizeof(VTContext),
1354 };
1355 
1357  .p.name = "mpeg2_videotoolbox",
1358  .p.type = AVMEDIA_TYPE_VIDEO,
1359  .p.id = AV_CODEC_ID_MPEG2VIDEO,
1360  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1361  .alloc_frame = ff_videotoolbox_alloc_frame,
1362  .start_frame = videotoolbox_mpeg_start_frame,
1363  .decode_slice = videotoolbox_mpeg_decode_slice,
1364  .end_frame = videotoolbox_mpeg_end_frame,
1365  .frame_params = ff_videotoolbox_frame_params,
1367  .uninit = ff_videotoolbox_uninit,
1368  .priv_data_size = sizeof(VTContext),
1369 };
1370 
1372  .p.name = "mpeg4_videotoolbox",
1373  .p.type = AVMEDIA_TYPE_VIDEO,
1374  .p.id = AV_CODEC_ID_MPEG4,
1375  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1376  .alloc_frame = ff_videotoolbox_alloc_frame,
1377  .start_frame = videotoolbox_mpeg_start_frame,
1378  .decode_slice = videotoolbox_mpeg_decode_slice,
1379  .end_frame = videotoolbox_mpeg_end_frame,
1380  .frame_params = ff_videotoolbox_frame_params,
1382  .uninit = ff_videotoolbox_uninit,
1383  .priv_data_size = sizeof(VTContext),
1384 };
1385 
1387  .p.name = "prores_videotoolbox",
1388  .p.type = AVMEDIA_TYPE_VIDEO,
1389  .p.id = AV_CODEC_ID_PRORES,
1390  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1391  .alloc_frame = ff_videotoolbox_alloc_frame,
1392  .start_frame = videotoolbox_prores_start_frame,
1393  .decode_slice = videotoolbox_prores_decode_slice,
1394  .end_frame = videotoolbox_prores_end_frame,
1395  .frame_params = ff_videotoolbox_frame_params,
1397  .uninit = ff_videotoolbox_uninit,
1398  .priv_data_size = sizeof(VTContext),
1399 };
1400 
1401 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:65
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:78
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1451
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:197
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:33
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:322
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::width
int width
Definition: frame.h:412
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:41
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:603
data
const char data[16]
Definition: mxf.c:148
ProresContext
Definition: proresdec.h:43
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:155
PTLCommon::profile_space
uint8_t profile_space
Definition: hevc_ps.h:123
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_mpeg2_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg2_videotoolbox_hwaccel
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:217
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:553
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:57
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:126
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:157
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: hevc_ps.h:127
ff_hevc_videotoolbox_hwaccel
const struct FFHWAccel ff_hevc_videotoolbox_hwaccel
FFHWAccel
Definition: hwaccel_internal.h:34
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: hevc_ps.h:128
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:186
fail
#define fail()
Definition: checkasm.h:179
ff_h263_videotoolbox_hwaccel
const struct FFHWAccel ff_h263_videotoolbox_hwaccel
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:643
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:130
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:388
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:47
AVHWFramesContext::height
int height
Definition: hwcontext.h:217
duration
int64_t duration
Definition: movenc.c:64
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:46
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:495
width
#define width
vt_internal.h
PTLCommon
Definition: hevc_ps.h:122
s
#define s(width, name)
Definition: cbs_vp9.c:198
VTHWFrame
Definition: videotoolbox.c:60
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
ff_mpeg1_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg1_videotoolbox_hwaccel
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:51
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:64
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:129
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: hevc_ps.h:125
AVFrame::crop_right
size_t crop_right
Definition: frame.h:720
AVVTFramesContext
Definition: hwcontext_videotoolbox.h:45
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
frame
static AVFrame * frame
Definition: demux_decode.c:54
PTLCommon::tier_flag
uint8_t tier_flag
Definition: hevc_ps.h:124
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3278
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:210
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
hwaccel_internal.h
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:403
AVVTFramesContext::color_range
enum AVColorRange color_range
Definition: hwcontext_videotoolbox.h:46
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:126
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
V
#define V
Definition: avdct.c:30
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:540
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:61
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:448
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:425
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:718
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:61
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:417
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
AVFrame::crop_left
size_t crop_left
Definition: frame.h:719
ff_prores_videotoolbox_hwaccel
const struct FFHWAccel ff_prores_videotoolbox_hwaccel
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:231
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
hevcdec.h
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:374
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:44
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:121
P
#define P
av_bswap32
#define av_bswap32
Definition: bswap.h:28
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
VUI
Definition: hevc_ps.h:93
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:527
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:73
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:198
height
#define height
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:543
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:539
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:55
VTContext
Definition: vt_internal.h:25
AVFrame::private_ref
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:736
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2081
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:44
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:332
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:371
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:534
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1497
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1475
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:115
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:125
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:150
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:58
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:142
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:691
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:412
status
ov_status_e status
Definition: dnn_backend_openvino.c:120
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_mpeg4_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg4_videotoolbox_hwaccel
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
HEVCContext
Definition: hevcdec.h:440
PTLCommon::level_idc
uint8_t level_idc
Definition: hevc_ps.h:142
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:94
VTContext::logctx
void * logctx
Definition: vt_internal.h:49
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:62
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:528
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: hevc_ps.h:115
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avutil.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: hevc_ps.h:153
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1544
HEVCSPS
Definition: hevc_ps.h:186
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: hevc_ps.h:303
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:74
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:544
ff_h264_videotoolbox_hwaccel
const struct FFHWAccel ff_h264_videotoolbox_hwaccel
AVFrame::crop_top
size_t crop_top
Definition: frame.h:717
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:68
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:176
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:664
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:402
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:200