FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "config_components.h"
25 #include "videotoolbox.h"
27 #include "vt_internal.h"
28 #include "libavutil/avutil.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/pixdesc.h"
31 #include "bytestream.h"
32 #include "decode.h"
33 #include "internal.h"
34 #include "h264dec.h"
35 #include "hevcdec.h"
36 #include "mpegvideo.h"
37 #include "proresdec.h"
38 #include <Availability.h>
39 #include <AvailabilityMacros.h>
40 #include <TargetConditionals.h>
41 
42 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
43 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
44 #endif
45 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
46 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
47 #endif
48 
49 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
50 enum { kCMVideoCodecType_HEVC = 'hvc1' };
51 #endif
52 
53 #if !HAVE_KCMVIDEOCODECTYPE_VP9
54 enum { kCMVideoCodecType_VP9 = 'vp09' };
55 #endif
56 
57 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
58 
59 typedef struct VTHWFrame {
60  CVPixelBufferRef pixbuf;
62 } VTHWFrame;
63 
64 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
65 {
67  av_buffer_unref(&ref->hw_frames_ctx);
68  CVPixelBufferRelease(ref->pixbuf);
69 
70  av_free(data);
71 }
72 
74  const uint8_t *buffer,
75  uint32_t size)
76 {
77  void *tmp;
78 
79  tmp = av_fast_realloc(vtctx->bitstream,
80  &vtctx->allocated_size,
81  size);
82 
83  if (!tmp)
84  return AVERROR(ENOMEM);
85 
86  vtctx->bitstream = tmp;
87  memcpy(vtctx->bitstream, buffer, size);
88  vtctx->bitstream_size = size;
89 
90  return 0;
91 }
92 
93 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
94 {
95  int ret;
96  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
97 
98  if (!ref->pixbuf) {
99  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
101  return AVERROR_EXTERNAL;
102  }
103 
104  frame->crop_right = 0;
105  frame->crop_left = 0;
106  frame->crop_top = 0;
107  frame->crop_bottom = 0;
108 
109  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
110  return ret;
111 
112  frame->data[3] = (uint8_t*)ref->pixbuf;
113 
114  if (ref->hw_frames_ctx) {
115  av_buffer_unref(&frame->hw_frames_ctx);
116  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
117  if (!frame->hw_frames_ctx)
118  return AVERROR(ENOMEM);
119  }
120 
121  return 0;
122 }
123 
125 {
126  size_t size = sizeof(VTHWFrame);
127  uint8_t *data = NULL;
128  AVBufferRef *buf = NULL;
130  FrameDecodeData *fdd;
131  if (ret < 0)
132  return ret;
133 
134  data = av_mallocz(size);
135  if (!data)
136  return AVERROR(ENOMEM);
138  if (!buf) {
139  av_freep(&data);
140  return AVERROR(ENOMEM);
141  }
142  frame->buf[0] = buf;
143 
144  fdd = (FrameDecodeData*)frame->private_ref->data;
146 
147  frame->width = avctx->width;
148  frame->height = avctx->height;
149  frame->format = avctx->pix_fmt;
150 
151  return 0;
152 }
153 
154 #define AV_W8(p, v) *(p) = (v)
155 
156 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
157 {
158  int i;
159  int size = src_size;
160  uint8_t* p = dst;
161 
162  for (i = 0; i < src_size; i++) {
163  if (i + 2 < src_size &&
164  src[i] == 0x00 &&
165  src[i + 1] == 0x00 &&
166  src[i + 2] <= 0x03) {
167  if (dst) {
168  *p++ = src[i++];
169  *p++ = src[i++];
170  *p++ = 0x03;
171  } else {
172  i += 2;
173  }
174  size++;
175  }
176  if (dst)
177  *p++ = src[i];
178  }
179 
180  if (dst)
181  av_assert0((p - dst) == size);
182 
183  return size;
184 }
185 
187 {
188  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
189  H264Context *h = avctx->priv_data;
190  CFDataRef data = NULL;
191  uint8_t *p;
192  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
193  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
194  int vt_extradata_size;
195  uint8_t *vt_extradata;
196 
197  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
198  vt_extradata = av_malloc(vt_extradata_size);
199 
200  if (!vt_extradata)
201  return NULL;
202 
203  p = vt_extradata;
204 
205  AV_W8(p + 0, 1); /* version */
206  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
207  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
208  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
209  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
210  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
211  AV_WB16(p + 6, sps_size);
212  p += 8;
213  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
214  AV_W8(p + 0, 1); /* number of pps */
215  AV_WB16(p + 1, pps_size);
216  p += 3;
217  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
218 
219  av_assert0(p - vt_extradata == vt_extradata_size);
220 
221  // save sps header (profile/level) used to create decoder session,
222  // so we can detect changes and recreate it.
223  if (vtctx)
224  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
225 
226  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
227  av_free(vt_extradata);
228  return data;
229 }
230 
232 {
233  HEVCContext *h = avctx->priv_data;
234  int i, num_vps = 0, num_sps = 0, num_pps = 0;
235  const HEVCVPS *vps = h->ps.vps;
236  const HEVCSPS *sps = h->ps.sps;
237  const HEVCPPS *pps = h->ps.pps;
238  PTLCommon ptlc = vps->ptl.general_ptl;
239  VUI vui = sps->vui;
240  uint8_t parallelismType;
241  CFDataRef data = NULL;
242  uint8_t *p;
243  int vt_extradata_size = 23 + 3 + 3 + 3;
244  uint8_t *vt_extradata;
245 
246 #define COUNT_SIZE_PS(T, t) \
247  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
248  if (h->ps.t##ps_list[i]) { \
249  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
250  vt_extradata_size += 2 + lps->data_size; \
251  num_##t##ps++; \
252  } \
253  }
254 
255  COUNT_SIZE_PS(V, v)
256  COUNT_SIZE_PS(S, s)
257  COUNT_SIZE_PS(P, p)
258 
259  vt_extradata = av_malloc(vt_extradata_size);
260  if (!vt_extradata)
261  return NULL;
262  p = vt_extradata;
263 
264  /* unsigned int(8) configurationVersion = 1; */
265  AV_W8(p + 0, 1);
266 
267  /*
268  * unsigned int(2) general_profile_space;
269  * unsigned int(1) general_tier_flag;
270  * unsigned int(5) general_profile_idc;
271  */
272  AV_W8(p + 1, ptlc.profile_space << 6 |
273  ptlc.tier_flag << 5 |
274  ptlc.profile_idc);
275 
276  /* unsigned int(32) general_profile_compatibility_flags; */
277  memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
278 
279  /* unsigned int(48) general_constraint_indicator_flags; */
280  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
281  ptlc.interlaced_source_flag << 6 |
282  ptlc.non_packed_constraint_flag << 5 |
283  ptlc.frame_only_constraint_flag << 4);
284  AV_W8(p + 7, 0);
285  AV_WN32(p + 8, 0);
286 
287  /* unsigned int(8) general_level_idc; */
288  AV_W8(p + 12, ptlc.level_idc);
289 
290  /*
291  * bit(4) reserved = ‘1111’b;
292  * unsigned int(12) min_spatial_segmentation_idc;
293  */
294  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
295  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
296 
297  /*
298  * bit(6) reserved = ‘111111’b;
299  * unsigned int(2) parallelismType;
300  */
302  parallelismType = 0;
303  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
304  parallelismType = 0;
305  else if (pps->entropy_coding_sync_enabled_flag)
306  parallelismType = 3;
307  else if (pps->tiles_enabled_flag)
308  parallelismType = 2;
309  else
310  parallelismType = 1;
311  AV_W8(p + 15, 0xfc | parallelismType);
312 
313  /*
314  * bit(6) reserved = ‘111111’b;
315  * unsigned int(2) chromaFormat;
316  */
317  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
318 
319  /*
320  * bit(5) reserved = ‘11111’b;
321  * unsigned int(3) bitDepthLumaMinus8;
322  */
323  AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
324 
325  /*
326  * bit(5) reserved = ‘11111’b;
327  * unsigned int(3) bitDepthChromaMinus8;
328  */
329  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
330 
331  /* bit(16) avgFrameRate; */
332  AV_WB16(p + 19, 0);
333 
334  /*
335  * bit(2) constantFrameRate;
336  * bit(3) numTemporalLayers;
337  * bit(1) temporalIdNested;
338  * unsigned int(2) lengthSizeMinusOne;
339  */
340  AV_W8(p + 21, 0 << 6 |
341  sps->max_sub_layers << 3 |
342  sps->temporal_id_nesting_flag << 2 |
343  3);
344 
345  /* unsigned int(8) numOfArrays; */
346  AV_W8(p + 22, 3);
347 
348  p += 23;
349 
350 #define APPEND_PS(T, t) \
351  /* \
352  * bit(1) array_completeness; \
353  * unsigned int(1) reserved = 0; \
354  * unsigned int(6) NAL_unit_type; \
355  */ \
356  AV_W8(p, 1 << 7 | \
357  HEVC_NAL_##T##PS & 0x3f); \
358  /* unsigned int(16) numNalus; */ \
359  AV_WB16(p + 1, num_##t##ps); \
360  p += 3; \
361  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
362  if (h->ps.t##ps_list[i]) { \
363  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
364  /* unsigned int(16) nalUnitLength; */ \
365  AV_WB16(p, lps->data_size); \
366  /* bit(8*nalUnitLength) nalUnit; */ \
367  memcpy(p + 2, lps->data, lps->data_size); \
368  p += 2 + lps->data_size; \
369  } \
370  }
371 
372  APPEND_PS(V, v)
373  APPEND_PS(S, s)
374  APPEND_PS(P, p)
375 
376  av_assert0(p - vt_extradata == vt_extradata_size);
377 
378  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
379  av_free(vt_extradata);
380  return data;
381 }
382 
384  const uint8_t *buffer,
385  uint32_t size)
386 {
387  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
388  H264Context *h = avctx->priv_data;
389 
390  if (h->is_avc == 1) {
391  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
392  }
393 
394  return 0;
395 }
396 
398  int type,
399  const uint8_t *buffer,
400  uint32_t size)
401 {
402  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
403  H264Context *h = avctx->priv_data;
404 
405  // save sps header (profile/level) used to create decoder session
406  if (!vtctx->sps[0])
407  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
408 
409  if (type == H264_NAL_SPS) {
410  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
411  vtctx->reconfig_needed = true;
412  memcpy(vtctx->sps, buffer + 1, 3);
413  }
414  }
415 
416  // pass-through SPS/PPS changes to the decoder
418 }
419 
421  const uint8_t *buffer,
422  uint32_t size)
423 {
424  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
425  void *tmp;
426 
427  tmp = av_fast_realloc(vtctx->bitstream,
428  &vtctx->allocated_size,
429  vtctx->bitstream_size+size+4);
430  if (!tmp)
431  return AVERROR(ENOMEM);
432 
433  vtctx->bitstream = tmp;
434 
435  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
436  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
437 
438  vtctx->bitstream_size += size + 4;
439 
440  return 0;
441 }
442 
444  const uint8_t *buffer,
445  uint32_t size)
446 {
447  H264Context *h = avctx->priv_data;
448 
449  if (h->is_avc == 1)
450  return 0;
451 
453 }
454 
455 #if CONFIG_VIDEOTOOLBOX
456 // Return the AVVideotoolboxContext that matters currently. Where it comes from
457 // depends on the API used.
458 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
459 {
460  // Somewhat tricky because the user can call av_videotoolbox_default_free()
461  // at any time, even when the codec is closed.
462  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
463  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
464  if (vtctx->vt_ctx)
465  return vtctx->vt_ctx;
466  }
467  return avctx->hwaccel_context;
468 }
469 
470 static void videotoolbox_stop(AVCodecContext *avctx)
471 {
472  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
473  if (!videotoolbox)
474  return;
475 
476  if (videotoolbox->cm_fmt_desc) {
477  CFRelease(videotoolbox->cm_fmt_desc);
478  videotoolbox->cm_fmt_desc = NULL;
479  }
480 
481  if (videotoolbox->session) {
482  VTDecompressionSessionInvalidate(videotoolbox->session);
483  CFRelease(videotoolbox->session);
484  videotoolbox->session = NULL;
485  }
486 }
487 
489 {
490  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
491  if (!vtctx)
492  return 0;
493 
494  av_freep(&vtctx->bitstream);
495  if (vtctx->frame)
496  CVPixelBufferRelease(vtctx->frame);
497 
498  if (vtctx->vt_ctx)
499  videotoolbox_stop(avctx);
500 
502  av_freep(&vtctx->vt_ctx);
503 
504  return 0;
505 }
506 
507 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
508 {
509  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
510  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
511  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
512  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
513  int width = CVPixelBufferGetWidth(pixbuf);
514  int height = CVPixelBufferGetHeight(pixbuf);
515  AVHWFramesContext *cached_frames;
516  VTHWFrame *ref;
517  int ret;
518 
519  if (!frame->buf[0] || frame->data[3]) {
520  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
522  return AVERROR_EXTERNAL;
523  }
524 
525  ref = (VTHWFrame *)frame->buf[0]->data;
526 
527  if (ref->pixbuf)
528  CVPixelBufferRelease(ref->pixbuf);
529  ref->pixbuf = vtctx->frame;
530  vtctx->frame = NULL;
531 
532  // Old API code path.
533  if (!vtctx->cached_hw_frames_ctx)
534  return 0;
535 
536  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
537 
538  if (cached_frames->sw_format != sw_format ||
539  cached_frames->width != width ||
540  cached_frames->height != height) {
541  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
542  AVHWFramesContext *hw_frames;
543  if (!hw_frames_ctx)
544  return AVERROR(ENOMEM);
545 
546  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
547  hw_frames->format = cached_frames->format;
548  hw_frames->sw_format = sw_format;
549  hw_frames->width = width;
550  hw_frames->height = height;
551 
552  ret = av_hwframe_ctx_init(hw_frames_ctx);
553  if (ret < 0) {
554  av_buffer_unref(&hw_frames_ctx);
555  return ret;
556  }
557 
559  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
560  }
561 
562  av_buffer_unref(&ref->hw_frames_ctx);
563  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
564  if (!ref->hw_frames_ctx)
565  return AVERROR(ENOMEM);
566 
567  return 0;
568 }
569 
570 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
571 {
572  int i;
573  uint8_t b;
574 
575  for (i = 3; i >= 0; i--) {
576  b = (length >> (i * 7)) & 0x7F;
577  if (i != 0)
578  b |= 0x80;
579 
580  bytestream2_put_byteu(pb, b);
581  }
582 }
583 
584 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
585 {
586  CFDataRef data;
587  uint8_t *rw_extradata;
588  PutByteContext pb;
589  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
590  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
591  int config_size = 13 + 5 + avctx->extradata_size;
592  int s;
593 
594  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
595  return NULL;
596 
597  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
598  bytestream2_put_byteu(&pb, 0); // version
599  bytestream2_put_ne24(&pb, 0); // flags
600 
601  // elementary stream descriptor
602  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
603  videotoolbox_write_mp4_descr_length(&pb, full_size);
604  bytestream2_put_ne16(&pb, 0); // esid
605  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
606 
607  // decoder configuration descriptor
608  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
609  videotoolbox_write_mp4_descr_length(&pb, config_size);
610  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
611  bytestream2_put_byteu(&pb, 0x11); // stream type
612  bytestream2_put_ne24(&pb, 0); // buffer size
613  bytestream2_put_ne32(&pb, 0); // max bitrate
614  bytestream2_put_ne32(&pb, 0); // avg bitrate
615 
616  // decoder specific descriptor
617  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
618  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
619 
620  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
621 
622  // SLConfigDescriptor
623  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
624  bytestream2_put_byteu(&pb, 0x01); // length
625  bytestream2_put_byteu(&pb, 0x02); //
626 
627  s = bytestream2_size_p(&pb);
628 
629  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
630 
631  av_freep(&rw_extradata);
632  return data;
633 }
634 
635 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
636  void *buffer,
637  int size)
638 {
639  OSStatus status;
640  CMBlockBufferRef block_buf;
641  CMSampleBufferRef sample_buf;
642 
643  block_buf = NULL;
644  sample_buf = NULL;
645 
646  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
647  buffer, // memoryBlock
648  size, // blockLength
649  kCFAllocatorNull, // blockAllocator
650  NULL, // customBlockSource
651  0, // offsetToData
652  size, // dataLength
653  0, // flags
654  &block_buf);
655 
656  if (!status) {
657  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
658  block_buf, // dataBuffer
659  TRUE, // dataReady
660  0, // makeDataReadyCallback
661  0, // makeDataReadyRefcon
662  fmt_desc, // formatDescription
663  1, // numSamples
664  0, // numSampleTimingEntries
665  NULL, // sampleTimingArray
666  0, // numSampleSizeEntries
667  NULL, // sampleSizeArray
668  &sample_buf);
669  }
670 
671  if (block_buf)
672  CFRelease(block_buf);
673 
674  return sample_buf;
675 }
676 
677 static void videotoolbox_decoder_callback(void *opaque,
678  void *sourceFrameRefCon,
679  OSStatus status,
680  VTDecodeInfoFlags flags,
681  CVImageBufferRef image_buffer,
682  CMTime pts,
683  CMTime duration)
684 {
685  AVCodecContext *avctx = opaque;
686  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
687 
688  if (vtctx->frame) {
689  CVPixelBufferRelease(vtctx->frame);
690  vtctx->frame = NULL;
691  }
692 
693  if (!image_buffer) {
694  av_log(avctx, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
695  return;
696  }
697 
698  vtctx->frame = CVPixelBufferRetain(image_buffer);
699 }
700 
701 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
702 {
703  OSStatus status;
704  CMSampleBufferRef sample_buf;
705  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
706  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
707 
708  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
709  vtctx->bitstream,
710  vtctx->bitstream_size);
711 
712  if (!sample_buf)
713  return -1;
714 
715  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
716  sample_buf,
717  0, // decodeFlags
718  NULL, // sourceFrameRefCon
719  0); // infoFlagsOut
720  if (status == noErr)
721  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
722 
723  CFRelease(sample_buf);
724 
725  return status;
726 }
727 
728 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
729  CFDictionaryRef decoder_spec,
730  int width,
731  int height)
732 {
733  CMFormatDescriptionRef cm_fmt_desc;
734  OSStatus status;
735 
736  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
737  codec_type,
738  width,
739  height,
740  decoder_spec, // Dictionary of extension
741  &cm_fmt_desc);
742 
743  if (status)
744  return NULL;
745 
746  return cm_fmt_desc;
747 }
748 
749 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
750  int height,
751  OSType pix_fmt)
752 {
753  CFMutableDictionaryRef buffer_attributes;
754  CFMutableDictionaryRef io_surface_properties;
755  CFNumberRef cv_pix_fmt;
756  CFNumberRef w;
757  CFNumberRef h;
758 
759  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
760  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
761  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
762 
763  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
764  4,
765  &kCFTypeDictionaryKeyCallBacks,
766  &kCFTypeDictionaryValueCallBacks);
767  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
768  0,
769  &kCFTypeDictionaryKeyCallBacks,
770  &kCFTypeDictionaryValueCallBacks);
771 
772  if (pix_fmt)
773  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
774  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
775  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
776  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
777 #if TARGET_OS_IPHONE
778  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
779 #else
780  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
781 #endif
782 
783  CFRelease(io_surface_properties);
784  CFRelease(cv_pix_fmt);
785  CFRelease(w);
786  CFRelease(h);
787 
788  return buffer_attributes;
789 }
790 
791 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
792  AVCodecContext *avctx)
793 {
794  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
795  0,
796  &kCFTypeDictionaryKeyCallBacks,
797  &kCFTypeDictionaryValueCallBacks);
798 
799  CFDictionarySetValue(config_info,
803  kCFBooleanTrue);
804 
805  CFMutableDictionaryRef avc_info;
806  CFDataRef data = NULL;
807 
808  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
809  1,
810  &kCFTypeDictionaryKeyCallBacks,
811  &kCFTypeDictionaryValueCallBacks);
812 
813  switch (codec_type) {
814  case kCMVideoCodecType_MPEG4Video :
815  if (avctx->extradata_size)
816  data = videotoolbox_esds_extradata_create(avctx);
817  if (data)
818  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
819  break;
820  case kCMVideoCodecType_H264 :
822  if (data)
823  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
824  break;
827  if (data)
828  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
829  break;
830 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
831  case kCMVideoCodecType_VP9 :
833  if (data)
834  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
835  break;
836 #endif
837  default:
838  break;
839  }
840 
841  CFDictionarySetValue(config_info,
842  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
843  avc_info);
844 
845  if (data)
846  CFRelease(data);
847 
848  CFRelease(avc_info);
849  return config_info;
850 }
851 
852 static int videotoolbox_start(AVCodecContext *avctx)
853 {
854  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
855  OSStatus status;
856  VTDecompressionOutputCallbackRecord decoder_cb;
857  CFDictionaryRef decoder_spec;
858  CFDictionaryRef buf_attr;
859 
860  if (!videotoolbox) {
861  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
862  return -1;
863  }
864 
865  switch( avctx->codec_id ) {
866  case AV_CODEC_ID_H263 :
867  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
868  break;
869  case AV_CODEC_ID_H264 :
870  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
871  break;
872  case AV_CODEC_ID_HEVC :
873  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
874  break;
876  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
877  break;
879  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
880  break;
881  case AV_CODEC_ID_MPEG4 :
882  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
883  break;
884  case AV_CODEC_ID_PRORES :
885  switch (avctx->codec_tag) {
886  default:
887  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
888  // fall-through
889  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
890  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
891  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
892  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
893  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
894  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
895  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
896  break;
897  }
898  break;
899  case AV_CODEC_ID_VP9 :
900  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
901  break;
902  default :
903  break;
904  }
905 
906 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
907  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
908  if (__builtin_available(macOS 10.9, *)) {
909  VTRegisterProfessionalVideoWorkflowVideoDecoders();
910  }
911  }
912 #endif
913 
914 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
915  if (__builtin_available(macOS 11.0, *)) {
916  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
917  }
918 #endif
919 
920  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
921 
922  if (!decoder_spec) {
923  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
924  return -1;
925  }
926 
927  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
928  decoder_spec,
929  avctx->width,
930  avctx->height);
931  if (!videotoolbox->cm_fmt_desc) {
932  if (decoder_spec)
933  CFRelease(decoder_spec);
934 
935  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
936  return -1;
937  }
938 
939  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
940  avctx->height,
941  videotoolbox->cv_pix_fmt_type);
942 
943  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
944  decoder_cb.decompressionOutputRefCon = avctx;
945 
946  status = VTDecompressionSessionCreate(NULL, // allocator
947  videotoolbox->cm_fmt_desc, // videoFormatDescription
948  decoder_spec, // videoDecoderSpecification
949  buf_attr, // destinationImageBufferAttributes
950  &decoder_cb, // outputCallback
951  &videotoolbox->session); // decompressionSessionOut
952 
953  if (decoder_spec)
954  CFRelease(decoder_spec);
955  if (buf_attr)
956  CFRelease(buf_attr);
957 
958  switch (status) {
959  case kVTVideoDecoderNotAvailableNowErr:
960  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
961  return AVERROR(ENOSYS);
962  case kVTVideoDecoderUnsupportedDataFormatErr:
963  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
964  return AVERROR(ENOSYS);
965  case kVTCouldNotFindVideoDecoderErr:
966  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
967  return AVERROR(ENOSYS);
968  case kVTVideoDecoderMalfunctionErr:
969  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
970  return AVERROR(EINVAL);
971  case kVTVideoDecoderBadDataErr:
972  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
973  return AVERROR_INVALIDDATA;
974  case 0:
975  return 0;
976  default:
977  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
978  return AVERROR_UNKNOWN;
979  }
980 }
981 
982 static const char *videotoolbox_error_string(OSStatus status)
983 {
984  switch (status) {
985  case kVTVideoDecoderBadDataErr:
986  return "bad data";
987  case kVTVideoDecoderMalfunctionErr:
988  return "decoder malfunction";
989  case kVTInvalidSessionErr:
990  return "invalid session";
991  }
992  return "unknown";
993 }
994 
996 {
997  OSStatus status;
998  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
999  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1000 
1001  if (vtctx->reconfig_needed == true) {
1002  vtctx->reconfig_needed = false;
1003  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1004  videotoolbox_stop(avctx);
1005  if (videotoolbox_start(avctx) != 0) {
1006  return AVERROR_EXTERNAL;
1007  }
1008  }
1009 
1010  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1011  return AVERROR_INVALIDDATA;
1012 
1013  status = videotoolbox_session_decode_frame(avctx);
1014  if (status != noErr) {
1015  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1016  vtctx->reconfig_needed = true;
1017  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1018  return AVERROR_UNKNOWN;
1019  }
1020 
1021  if (!vtctx->frame) {
1022  vtctx->reconfig_needed = true;
1023  return AVERROR_UNKNOWN;
1024  }
1025 
1026  return videotoolbox_buffer_create(avctx, frame);
1027 }
1028 
1029 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1030 {
1031  H264Context *h = avctx->priv_data;
1032  AVFrame *frame = h->cur_pic_ptr->f;
1033  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1035  vtctx->bitstream_size = 0;
1036  return ret;
1037 }
1038 
1039 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1040  const uint8_t *buffer,
1041  uint32_t size)
1042 {
1043  return 0;
1044 }
1045 
1046 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1047  const uint8_t *buffer,
1048  uint32_t size)
1049 {
1051 }
1052 
1053 
1054 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1055  int type,
1056  const uint8_t *buffer,
1057  uint32_t size)
1058 {
1060 }
1061 
1062 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1063 {
1064  HEVCContext *h = avctx->priv_data;
1065  AVFrame *frame = h->ref->frame;
1066  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1067 
1068  h->output_frame->crop_right = 0;
1069  h->output_frame->crop_left = 0;
1070  h->output_frame->crop_top = 0;
1071  h->output_frame->crop_bottom = 0;
1072 
1074  vtctx->bitstream_size = 0;
1075  return ret;
1076 }
1077 
1078 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1079  const uint8_t *buffer,
1080  uint32_t size)
1081 {
1082  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1083 
1084  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1085 }
1086 
1087 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1088  const uint8_t *buffer,
1089  uint32_t size)
1090 {
1091  return 0;
1092 }
1093 
1094 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1095 {
1096  MpegEncContext *s = avctx->priv_data;
1097  AVFrame *frame = s->current_picture_ptr->f;
1098 
1099  return ff_videotoolbox_common_end_frame(avctx, frame);
1100 }
1101 
1102 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1103  const uint8_t *buffer,
1104  uint32_t size)
1105 {
1106  return 0;
1107 }
1108 
1109 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1110  const uint8_t *buffer,
1111  uint32_t size)
1112 {
1113  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1114 
1115  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1116 }
1117 
1118 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1119 {
1120  ProresContext *ctx = avctx->priv_data;
1121  AVFrame *frame = ctx->frame;
1122 
1123  return ff_videotoolbox_common_end_frame(avctx, frame);
1124 }
1125 
1126 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1127  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1128  if (!descriptor)
1129  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1130 
1131  int depth = descriptor->comp[0].depth;
1132 
1133  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1134  return AV_PIX_FMT_AYUV64;
1135 
1136 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1137  if (depth > 10)
1138  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1139 #endif
1140 
1141 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1142  if (descriptor->log2_chroma_w == 0) {
1143 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1144  if (depth <= 8)
1145  return AV_PIX_FMT_NV24;
1146 #endif
1147  return AV_PIX_FMT_P410;
1148  }
1149 #endif
1150 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1151  if (descriptor->log2_chroma_h == 0) {
1152 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1153  if (depth <= 8)
1154  return AV_PIX_FMT_NV16;
1155 #endif
1156  return AV_PIX_FMT_P210;
1157  }
1158 #endif
1159 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1160  if (depth > 8) {
1161  return AV_PIX_FMT_P010;
1162  }
1163 #endif
1164 
1165  return AV_PIX_FMT_NV12;
1166 }
1167 
1169 {
1170  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1171  AVHWFramesContext *hw_frames;
1172  int err;
1173 
1174  // Old API - do nothing.
1175  if (avctx->hwaccel_context)
1176  return 0;
1177 
1178  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1179  av_log(avctx, AV_LOG_ERROR,
1180  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1181  return AVERROR(EINVAL);
1182  }
1183 
1185  if (!vtctx->vt_ctx) {
1186  err = AVERROR(ENOMEM);
1187  goto fail;
1188  }
1189 
1190  if (avctx->hw_frames_ctx) {
1191  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1192  } else {
1194  if (!avctx->hw_frames_ctx) {
1195  err = AVERROR(ENOMEM);
1196  goto fail;
1197  }
1198 
1199  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1200  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1201  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1202  hw_frames->width = avctx->width;
1203  hw_frames->height = avctx->height;
1204 
1205  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1206  if (err < 0) {
1207  av_buffer_unref(&avctx->hw_frames_ctx);
1208  goto fail;
1209  }
1210  }
1211 
1213  if (!vtctx->cached_hw_frames_ctx) {
1214  err = AVERROR(ENOMEM);
1215  goto fail;
1216  }
1217 
1218  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1219  vtctx->vt_ctx->cv_pix_fmt_type =
1221  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1222  const AVPixFmtDescriptor *attempted_format =
1223  av_pix_fmt_desc_get(hw_frames->sw_format);
1224  av_log(avctx, AV_LOG_ERROR,
1225  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1226  "a VideoToolbox format!\n",
1227  attempted_format ? attempted_format->name : "<unknown>",
1229  err = AVERROR(EINVAL);
1230  goto fail;
1231  }
1232 
1233  err = videotoolbox_start(avctx);
1234  if (err < 0)
1235  goto fail;
1236 
1237  return 0;
1238 
1239 fail:
1240  ff_videotoolbox_uninit(avctx);
1241  return err;
1242 }
1243 
1245  AVBufferRef *hw_frames_ctx)
1246 {
1247  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1248 
1249  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1250  frames_ctx->width = avctx->coded_width;
1251  frames_ctx->height = avctx->coded_height;
1252  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1253 
1254  return 0;
1255 }
1256 
1258  .name = "h263_videotoolbox",
1259  .type = AVMEDIA_TYPE_VIDEO,
1260  .id = AV_CODEC_ID_H263,
1261  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1262  .alloc_frame = ff_videotoolbox_alloc_frame,
1263  .start_frame = videotoolbox_mpeg_start_frame,
1264  .decode_slice = videotoolbox_mpeg_decode_slice,
1265  .end_frame = videotoolbox_mpeg_end_frame,
1266  .frame_params = ff_videotoolbox_frame_params,
1268  .uninit = ff_videotoolbox_uninit,
1269  .priv_data_size = sizeof(VTContext),
1270 };
1271 
1273  .name = "hevc_videotoolbox",
1274  .type = AVMEDIA_TYPE_VIDEO,
1275  .id = AV_CODEC_ID_HEVC,
1276  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1277  .alloc_frame = ff_videotoolbox_alloc_frame,
1278  .start_frame = videotoolbox_hevc_start_frame,
1279  .decode_slice = videotoolbox_hevc_decode_slice,
1280  .decode_params = videotoolbox_hevc_decode_params,
1281  .end_frame = videotoolbox_hevc_end_frame,
1282  .frame_params = ff_videotoolbox_frame_params,
1284  .uninit = ff_videotoolbox_uninit,
1285  .priv_data_size = sizeof(VTContext),
1286 };
1287 
1289  .name = "h264_videotoolbox",
1290  .type = AVMEDIA_TYPE_VIDEO,
1291  .id = AV_CODEC_ID_H264,
1292  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1293  .alloc_frame = ff_videotoolbox_alloc_frame,
1294  .start_frame = ff_videotoolbox_h264_start_frame,
1295  .decode_slice = ff_videotoolbox_h264_decode_slice,
1296  .decode_params = videotoolbox_h264_decode_params,
1297  .end_frame = videotoolbox_h264_end_frame,
1298  .frame_params = ff_videotoolbox_frame_params,
1300  .uninit = ff_videotoolbox_uninit,
1301  .priv_data_size = sizeof(VTContext),
1302 };
1303 
1305  .name = "mpeg1_videotoolbox",
1306  .type = AVMEDIA_TYPE_VIDEO,
1307  .id = AV_CODEC_ID_MPEG1VIDEO,
1308  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1309  .alloc_frame = ff_videotoolbox_alloc_frame,
1310  .start_frame = videotoolbox_mpeg_start_frame,
1311  .decode_slice = videotoolbox_mpeg_decode_slice,
1312  .end_frame = videotoolbox_mpeg_end_frame,
1313  .frame_params = ff_videotoolbox_frame_params,
1315  .uninit = ff_videotoolbox_uninit,
1316  .priv_data_size = sizeof(VTContext),
1317 };
1318 
1320  .name = "mpeg2_videotoolbox",
1321  .type = AVMEDIA_TYPE_VIDEO,
1322  .id = AV_CODEC_ID_MPEG2VIDEO,
1323  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1324  .alloc_frame = ff_videotoolbox_alloc_frame,
1325  .start_frame = videotoolbox_mpeg_start_frame,
1326  .decode_slice = videotoolbox_mpeg_decode_slice,
1327  .end_frame = videotoolbox_mpeg_end_frame,
1328  .frame_params = ff_videotoolbox_frame_params,
1330  .uninit = ff_videotoolbox_uninit,
1331  .priv_data_size = sizeof(VTContext),
1332 };
1333 
1335  .name = "mpeg4_videotoolbox",
1336  .type = AVMEDIA_TYPE_VIDEO,
1337  .id = AV_CODEC_ID_MPEG4,
1338  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1339  .alloc_frame = ff_videotoolbox_alloc_frame,
1340  .start_frame = videotoolbox_mpeg_start_frame,
1341  .decode_slice = videotoolbox_mpeg_decode_slice,
1342  .end_frame = videotoolbox_mpeg_end_frame,
1343  .frame_params = ff_videotoolbox_frame_params,
1345  .uninit = ff_videotoolbox_uninit,
1346  .priv_data_size = sizeof(VTContext),
1347 };
1348 
1350  .name = "prores_videotoolbox",
1351  .type = AVMEDIA_TYPE_VIDEO,
1352  .id = AV_CODEC_ID_PRORES,
1353  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1354  .alloc_frame = ff_videotoolbox_alloc_frame,
1355  .start_frame = videotoolbox_prores_start_frame,
1356  .decode_slice = videotoolbox_prores_decode_slice,
1357  .end_frame = videotoolbox_prores_end_frame,
1358  .frame_params = ff_videotoolbox_frame_params,
1360  .uninit = ff_videotoolbox_uninit,
1361  .priv_data_size = sizeof(VTContext),
1362 };
1363 
1364 static AVVideotoolboxContext *av_videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1365  bool full_range)
1366 {
1367  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1368 
1369  if (ret) {
1370  ret->output_callback = videotoolbox_decoder_callback;
1371 
1372  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1373  if (cv_pix_fmt_type == 0) {
1374  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1375  }
1376  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1377  }
1378 
1379  return ret;
1380 }
1381 
1383 {
1384  return av_videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1385 }
1386 
1388 {
1389  return av_videotoolbox_default_init2(avctx, NULL);
1390 }
1391 
1393 {
1394  enum AVPixelFormat pix_fmt = videotoolbox_best_pixel_format(avctx);
1395  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1396  avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context_with_pix_fmt(pix_fmt, full_range);
1397  if (!avctx->hwaccel_context)
1398  return AVERROR(ENOMEM);
1399  return videotoolbox_start(avctx);
1400 }
1401 
1403 {
1404 
1405  videotoolbox_stop(avctx);
1406  av_freep(&avctx->hwaccel_context);
1407 }
1408 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:64
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:76
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:54
AVCodecContext::hwaccel_context
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:1390
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
av_videotoolbox_alloc_context
AVVideotoolboxContext * av_videotoolbox_alloc_context(void)
Allocate and initialize a Videotoolbox context.
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
ff_hevc_videotoolbox_hwaccel
const AVHWAccel ff_hevc_videotoolbox_hwaccel
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
ff_h264_videotoolbox_hwaccel
const AVHWAccel ff_h264_videotoolbox_hwaccel
av_videotoolbox_default_free
void av_videotoolbox_default_free(AVCodecContext *avctx)
This function must be called to free the Videotoolbox context initialized with av_videotoolbox_defaul...
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
ff_prores_videotoolbox_hwaccel
const AVHWAccel ff_prores_videotoolbox_hwaccel
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:599
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:34
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:583
data
const char data[16]
Definition: mxf.c:143
ProresContext
Definition: proresdec.h:38
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:154
PTLCommon::profile_space
uint8_t profile_space
Definition: hevc_ps.h:93
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:46
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:96
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:156
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: hevc_ps.h:97
ff_mpeg1_videotoolbox_hwaccel
const AVHWAccel ff_mpeg1_videotoolbox_hwaccel
AVHWAccel
Definition: avcodec.h:2068
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: hevc_ps.h:98
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:186
fail
#define fail()
Definition: checkasm.h:130
av_videotoolbox_default_init
int av_videotoolbox_default_init(AVCodecContext *avctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:654
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:577
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:100
av_bswap32
#define av_bswap32
Definition: bswap.h:33
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:379
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:46
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
ff_h263_videotoolbox_hwaccel
const AVHWAccel ff_h263_videotoolbox_hwaccel
duration
int64_t duration
Definition: movenc.c:64
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:491
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:42
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:505
width
#define width
vt_internal.h
PTLCommon
Definition: hevc_ps.h:92
s
#define s(width, name)
Definition: cbs_vp9.c:256
VTHWFrame
Definition: videotoolbox.c:59
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:62
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:99
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: hevc_ps.h:95
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
PTLCommon::tier_flag
uint8_t tier_flag
Definition: hevc_ps.h:94
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2990
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:973
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:141
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:424
V
#define V
Definition: avdct.c:30
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:463
ff_mpeg2_videotoolbox_hwaccel
const AVHWAccel ff_mpeg2_videotoolbox_hwaccel
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:51
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:439
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:51
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:416
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:60
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:231
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
hevcdec.h
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:120
P
#define P
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:54
size
int size
Definition: twinvq_data.h:10344
VUI
Definition: hevc_ps.h:49
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:454
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:70
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:191
height
#define height
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:464
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:462
VTContext
Definition: vt_internal.h:25
av_videotoolbox_default_init2
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2074
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:43
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:274
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:330
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:490
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:340
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1931
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1881
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:124
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:57
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:50
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:137
AVCodecContext
main external API structure.
Definition: avcodec.h:389
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
HEVCContext
Definition: hevcdec.h:467
PTLCommon::level_idc
uint8_t level_idc
Definition: hevc_ps.h:112
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:93
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:61
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:455
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: hevc_ps.h:85
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:577
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avutil.h
ff_mpeg4_videotoolbox_hwaccel
const AVHWAccel ff_mpeg4_videotoolbox_hwaccel
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: hevc_ps.h:123
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1379
HEVCSPS
Definition: hevc_ps.h:153
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: hevc_ps.h:249
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:73
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:414
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:465
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:64
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:152
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1740
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:393
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:198