FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "videotoolbox.h"
26 #include "vt_internal.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/pixdesc.h"
30 #include "bytestream.h"
31 #include "decode.h"
32 #include "h264dec.h"
33 #include "hevcdec.h"
34 #include "mpegvideo.h"
35 #include "proresdec.h"
36 #include <Availability.h>
37 #include <AvailabilityMacros.h>
38 #include <TargetConditionals.h>
39 
40 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
41 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
42 #endif
43 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
44 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
45 #endif
46 
47 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
48 enum { kCMVideoCodecType_HEVC = 'hvc1' };
49 #endif
50 
51 #if !HAVE_KCMVIDEOCODECTYPE_VP9
52 enum { kCMVideoCodecType_VP9 = 'vp09' };
53 #endif
54 
55 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
56 
57 typedef struct VTHWFrame {
58  CVPixelBufferRef pixbuf;
60 } VTHWFrame;
61 
62 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
63 {
65  av_buffer_unref(&ref->hw_frames_ctx);
66  CVPixelBufferRelease(ref->pixbuf);
67 
68  av_free(data);
69 }
70 
72  const uint8_t *buffer,
73  uint32_t size)
74 {
75  void *tmp;
76 
77  tmp = av_fast_realloc(vtctx->bitstream,
78  &vtctx->allocated_size,
79  size);
80 
81  if (!tmp)
82  return AVERROR(ENOMEM);
83 
84  vtctx->bitstream = tmp;
85  memcpy(vtctx->bitstream, buffer, size);
86  vtctx->bitstream_size = size;
87 
88  return 0;
89 }
90 
91 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
92 {
93  int ret;
94  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
95 
96  if (!ref->pixbuf) {
97  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
99  return AVERROR_EXTERNAL;
100  }
101 
102  frame->crop_right = 0;
103  frame->crop_left = 0;
104  frame->crop_top = 0;
105  frame->crop_bottom = 0;
106 
107  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
108  return ret;
109 
110  frame->data[3] = (uint8_t*)ref->pixbuf;
111 
112  if (ref->hw_frames_ctx) {
113  av_buffer_unref(&frame->hw_frames_ctx);
114  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
115  if (!frame->hw_frames_ctx)
116  return AVERROR(ENOMEM);
117  }
118 
119  return 0;
120 }
121 
123 {
124  size_t size = sizeof(VTHWFrame);
125  uint8_t *data = NULL;
126  AVBufferRef *buf = NULL;
128  FrameDecodeData *fdd;
129  if (ret < 0)
130  return ret;
131 
132  data = av_mallocz(size);
133  if (!data)
134  return AVERROR(ENOMEM);
136  if (!buf) {
137  av_freep(&data);
138  return AVERROR(ENOMEM);
139  }
140  frame->buf[0] = buf;
141 
142  fdd = (FrameDecodeData*)frame->private_ref->data;
144 
145  frame->width = avctx->width;
146  frame->height = avctx->height;
147  frame->format = avctx->pix_fmt;
148 
149  return 0;
150 }
151 
152 #define AV_W8(p, v) *(p) = (v)
153 
154 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
155 {
156  int i;
157  int size = src_size;
158  uint8_t* p = dst;
159 
160  for (i = 0; i < src_size; i++) {
161  if (i + 2 < src_size &&
162  src[i] == 0x00 &&
163  src[i + 1] == 0x00 &&
164  src[i + 2] <= 0x03) {
165  if (dst) {
166  *p++ = src[i++];
167  *p++ = src[i++];
168  *p++ = 0x03;
169  } else {
170  i += 2;
171  }
172  size++;
173  }
174  if (dst)
175  *p++ = src[i];
176  }
177 
178  if (dst)
179  av_assert0((p - dst) == size);
180 
181  return size;
182 }
183 
185 {
186  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
187  H264Context *h = avctx->priv_data;
188  CFDataRef data = NULL;
189  uint8_t *p;
190  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
191  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
192  int vt_extradata_size;
193  uint8_t *vt_extradata;
194 
195  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
196  vt_extradata = av_malloc(vt_extradata_size);
197 
198  if (!vt_extradata)
199  return NULL;
200 
201  p = vt_extradata;
202 
203  AV_W8(p + 0, 1); /* version */
204  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
205  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
206  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
207  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
208  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
209  AV_WB16(p + 6, sps_size);
210  p += 8;
211  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
212  AV_W8(p + 0, 1); /* number of pps */
213  AV_WB16(p + 1, pps_size);
214  p += 3;
215  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
216 
217  av_assert0(p - vt_extradata == vt_extradata_size);
218 
219  // save sps header (profile/level) used to create decoder session,
220  // so we can detect changes and recreate it.
221  if (vtctx)
222  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
223 
224  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
225  av_free(vt_extradata);
226  return data;
227 }
228 
230 {
231  HEVCContext *h = avctx->priv_data;
232  int i, num_vps = 0, num_sps = 0, num_pps = 0;
233  const HEVCVPS *vps = h->ps.vps;
234  const HEVCSPS *sps = h->ps.sps;
235  const HEVCPPS *pps = h->ps.pps;
236  PTLCommon ptlc = vps->ptl.general_ptl;
237  VUI vui = sps->vui;
238  uint8_t parallelismType;
239  CFDataRef data = NULL;
240  uint8_t *p;
241  int vt_extradata_size = 23 + 3 + 3 + 3;
242  uint8_t *vt_extradata;
243 
244 #define COUNT_SIZE_PS(T, t) \
245  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
246  if (h->ps.t##ps_list[i]) { \
247  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
248  vt_extradata_size += 2 + lps->data_size; \
249  num_##t##ps++; \
250  } \
251  }
252 
253  COUNT_SIZE_PS(V, v)
254  COUNT_SIZE_PS(S, s)
255  COUNT_SIZE_PS(P, p)
256 
257  vt_extradata = av_malloc(vt_extradata_size);
258  if (!vt_extradata)
259  return NULL;
260  p = vt_extradata;
261 
262  /* unsigned int(8) configurationVersion = 1; */
263  AV_W8(p + 0, 1);
264 
265  /*
266  * unsigned int(2) general_profile_space;
267  * unsigned int(1) general_tier_flag;
268  * unsigned int(5) general_profile_idc;
269  */
270  AV_W8(p + 1, ptlc.profile_space << 6 |
271  ptlc.tier_flag << 5 |
272  ptlc.profile_idc);
273 
274  /* unsigned int(32) general_profile_compatibility_flags; */
275  memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
276 
277  /* unsigned int(48) general_constraint_indicator_flags; */
278  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
279  ptlc.interlaced_source_flag << 6 |
280  ptlc.non_packed_constraint_flag << 5 |
281  ptlc.frame_only_constraint_flag << 4);
282  AV_W8(p + 7, 0);
283  AV_WN32(p + 8, 0);
284 
285  /* unsigned int(8) general_level_idc; */
286  AV_W8(p + 12, ptlc.level_idc);
287 
288  /*
289  * bit(4) reserved = ‘1111’b;
290  * unsigned int(12) min_spatial_segmentation_idc;
291  */
292  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
293  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
294 
295  /*
296  * bit(6) reserved = ‘111111’b;
297  * unsigned int(2) parallelismType;
298  */
300  parallelismType = 0;
301  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
302  parallelismType = 0;
303  else if (pps->entropy_coding_sync_enabled_flag)
304  parallelismType = 3;
305  else if (pps->tiles_enabled_flag)
306  parallelismType = 2;
307  else
308  parallelismType = 1;
309  AV_W8(p + 15, 0xfc | parallelismType);
310 
311  /*
312  * bit(6) reserved = ‘111111’b;
313  * unsigned int(2) chromaFormat;
314  */
315  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
316 
317  /*
318  * bit(5) reserved = ‘11111’b;
319  * unsigned int(3) bitDepthLumaMinus8;
320  */
321  AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
322 
323  /*
324  * bit(5) reserved = ‘11111’b;
325  * unsigned int(3) bitDepthChromaMinus8;
326  */
327  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
328 
329  /* bit(16) avgFrameRate; */
330  AV_WB16(p + 19, 0);
331 
332  /*
333  * bit(2) constantFrameRate;
334  * bit(3) numTemporalLayers;
335  * bit(1) temporalIdNested;
336  * unsigned int(2) lengthSizeMinusOne;
337  */
338  AV_W8(p + 21, 0 << 6 |
339  sps->max_sub_layers << 3 |
340  sps->temporal_id_nesting_flag << 2 |
341  3);
342 
343  /* unsigned int(8) numOfArrays; */
344  AV_W8(p + 22, 3);
345 
346  p += 23;
347 
348 #define APPEND_PS(T, t) \
349  /* \
350  * bit(1) array_completeness; \
351  * unsigned int(1) reserved = 0; \
352  * unsigned int(6) NAL_unit_type; \
353  */ \
354  AV_W8(p, 1 << 7 | \
355  HEVC_NAL_##T##PS & 0x3f); \
356  /* unsigned int(16) numNalus; */ \
357  AV_WB16(p + 1, num_##t##ps); \
358  p += 3; \
359  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
360  if (h->ps.t##ps_list[i]) { \
361  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
362  /* unsigned int(16) nalUnitLength; */ \
363  AV_WB16(p, lps->data_size); \
364  /* bit(8*nalUnitLength) nalUnit; */ \
365  memcpy(p + 2, lps->data, lps->data_size); \
366  p += 2 + lps->data_size; \
367  } \
368  }
369 
370  APPEND_PS(V, v)
371  APPEND_PS(S, s)
372  APPEND_PS(P, p)
373 
374  av_assert0(p - vt_extradata == vt_extradata_size);
375 
376  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
377  av_free(vt_extradata);
378  return data;
379 }
380 
382  const uint8_t *buffer,
383  uint32_t size)
384 {
385  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
386  H264Context *h = avctx->priv_data;
387 
388  if (h->is_avc == 1) {
389  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
390  }
391 
392  return 0;
393 }
394 
396  int type,
397  const uint8_t *buffer,
398  uint32_t size)
399 {
400  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
401  H264Context *h = avctx->priv_data;
402 
403  // save sps header (profile/level) used to create decoder session
404  if (!vtctx->sps[0])
405  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
406 
407  if (type == H264_NAL_SPS) {
408  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
409  vtctx->reconfig_needed = true;
410  memcpy(vtctx->sps, buffer + 1, 3);
411  }
412  }
413 
414  // pass-through SPS/PPS changes to the decoder
416 }
417 
419  const uint8_t *buffer,
420  uint32_t size)
421 {
422  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
423  void *tmp;
424 
425  tmp = av_fast_realloc(vtctx->bitstream,
426  &vtctx->allocated_size,
427  vtctx->bitstream_size+size+4);
428  if (!tmp)
429  return AVERROR(ENOMEM);
430 
431  vtctx->bitstream = tmp;
432 
433  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
434  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
435 
436  vtctx->bitstream_size += size + 4;
437 
438  return 0;
439 }
440 
442  const uint8_t *buffer,
443  uint32_t size)
444 {
445  H264Context *h = avctx->priv_data;
446 
447  if (h->is_avc == 1)
448  return 0;
449 
451 }
452 
453 #if CONFIG_VIDEOTOOLBOX
454 // Return the AVVideotoolboxContext that matters currently. Where it comes from
455 // depends on the API used.
456 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
457 {
458  // Somewhat tricky because the user can call av_videotoolbox_default_free()
459  // at any time, even when the codec is closed.
460  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
461  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
462  if (vtctx->vt_ctx)
463  return vtctx->vt_ctx;
464  }
465  return avctx->hwaccel_context;
466 }
467 
468 static void videotoolbox_stop(AVCodecContext *avctx)
469 {
470  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
471  if (!videotoolbox)
472  return;
473 
474  if (videotoolbox->cm_fmt_desc) {
475  CFRelease(videotoolbox->cm_fmt_desc);
476  videotoolbox->cm_fmt_desc = NULL;
477  }
478 
479  if (videotoolbox->session) {
480  VTDecompressionSessionInvalidate(videotoolbox->session);
481  CFRelease(videotoolbox->session);
482  videotoolbox->session = NULL;
483  }
484 }
485 
487 {
488  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
489  if (!vtctx)
490  return 0;
491 
492  av_freep(&vtctx->bitstream);
493  if (vtctx->frame)
494  CVPixelBufferRelease(vtctx->frame);
495 
496  if (vtctx->vt_ctx)
497  videotoolbox_stop(avctx);
498 
500  av_freep(&vtctx->vt_ctx);
501 
502  return 0;
503 }
504 
505 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
506 {
507  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
508  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
509  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
510  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
511  int width = CVPixelBufferGetWidth(pixbuf);
512  int height = CVPixelBufferGetHeight(pixbuf);
513  AVHWFramesContext *cached_frames;
514  VTHWFrame *ref;
515  int ret;
516 
517  if (!frame->buf[0] || frame->data[3]) {
518  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
520  return AVERROR_EXTERNAL;
521  }
522 
523  ref = (VTHWFrame *)frame->buf[0]->data;
524 
525  if (ref->pixbuf)
526  CVPixelBufferRelease(ref->pixbuf);
527  ref->pixbuf = vtctx->frame;
528  vtctx->frame = NULL;
529 
530  // Old API code path.
531  if (!vtctx->cached_hw_frames_ctx)
532  return 0;
533 
534  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
535 
536  if (cached_frames->sw_format != sw_format ||
537  cached_frames->width != width ||
538  cached_frames->height != height) {
539  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
540  AVHWFramesContext *hw_frames;
541  if (!hw_frames_ctx)
542  return AVERROR(ENOMEM);
543 
544  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
545  hw_frames->format = cached_frames->format;
546  hw_frames->sw_format = sw_format;
547  hw_frames->width = width;
548  hw_frames->height = height;
549 
550  ret = av_hwframe_ctx_init(hw_frames_ctx);
551  if (ret < 0) {
552  av_buffer_unref(&hw_frames_ctx);
553  return ret;
554  }
555 
557  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
558  }
559 
560  av_buffer_unref(&ref->hw_frames_ctx);
561  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
562  if (!ref->hw_frames_ctx)
563  return AVERROR(ENOMEM);
564 
565  return 0;
566 }
567 
568 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
569 {
570  int i;
571  uint8_t b;
572 
573  for (i = 3; i >= 0; i--) {
574  b = (length >> (i * 7)) & 0x7F;
575  if (i != 0)
576  b |= 0x80;
577 
578  bytestream2_put_byteu(pb, b);
579  }
580 }
581 
582 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
583 {
584  CFDataRef data;
585  uint8_t *rw_extradata;
586  PutByteContext pb;
587  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
588  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
589  int config_size = 13 + 5 + avctx->extradata_size;
590  int s;
591 
592  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
593  return NULL;
594 
595  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
596  bytestream2_put_byteu(&pb, 0); // version
597  bytestream2_put_ne24(&pb, 0); // flags
598 
599  // elementary stream descriptor
600  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
601  videotoolbox_write_mp4_descr_length(&pb, full_size);
602  bytestream2_put_ne16(&pb, 0); // esid
603  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
604 
605  // decoder configuration descriptor
606  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
607  videotoolbox_write_mp4_descr_length(&pb, config_size);
608  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
609  bytestream2_put_byteu(&pb, 0x11); // stream type
610  bytestream2_put_ne24(&pb, 0); // buffer size
611  bytestream2_put_ne32(&pb, 0); // max bitrate
612  bytestream2_put_ne32(&pb, 0); // avg bitrate
613 
614  // decoder specific descriptor
615  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
616  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
617 
618  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
619 
620  // SLConfigDescriptor
621  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
622  bytestream2_put_byteu(&pb, 0x01); // length
623  bytestream2_put_byteu(&pb, 0x02); //
624 
625  s = bytestream2_size_p(&pb);
626 
627  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
628 
629  av_freep(&rw_extradata);
630  return data;
631 }
632 
633 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
634  void *buffer,
635  int size)
636 {
637  OSStatus status;
638  CMBlockBufferRef block_buf;
639  CMSampleBufferRef sample_buf;
640 
641  block_buf = NULL;
642  sample_buf = NULL;
643 
644  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
645  buffer, // memoryBlock
646  size, // blockLength
647  kCFAllocatorNull, // blockAllocator
648  NULL, // customBlockSource
649  0, // offsetToData
650  size, // dataLength
651  0, // flags
652  &block_buf);
653 
654  if (!status) {
655  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
656  block_buf, // dataBuffer
657  TRUE, // dataReady
658  0, // makeDataReadyCallback
659  0, // makeDataReadyRefcon
660  fmt_desc, // formatDescription
661  1, // numSamples
662  0, // numSampleTimingEntries
663  NULL, // sampleTimingArray
664  0, // numSampleSizeEntries
665  NULL, // sampleSizeArray
666  &sample_buf);
667  }
668 
669  if (block_buf)
670  CFRelease(block_buf);
671 
672  return sample_buf;
673 }
674 
675 static void videotoolbox_decoder_callback(void *opaque,
676  void *sourceFrameRefCon,
677  OSStatus status,
678  VTDecodeInfoFlags flags,
679  CVImageBufferRef image_buffer,
680  CMTime pts,
681  CMTime duration)
682 {
683  AVCodecContext *avctx = opaque;
684  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
685 
686  if (vtctx->frame) {
687  CVPixelBufferRelease(vtctx->frame);
688  vtctx->frame = NULL;
689  }
690 
691  if (!image_buffer) {
692  av_log(avctx, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
693  return;
694  }
695 
696  vtctx->frame = CVPixelBufferRetain(image_buffer);
697 }
698 
699 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
700 {
701  OSStatus status;
702  CMSampleBufferRef sample_buf;
703  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
704  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
705 
706  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
707  vtctx->bitstream,
708  vtctx->bitstream_size);
709 
710  if (!sample_buf)
711  return -1;
712 
713  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
714  sample_buf,
715  0, // decodeFlags
716  NULL, // sourceFrameRefCon
717  0); // infoFlagsOut
718  if (status == noErr)
719  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
720 
721  CFRelease(sample_buf);
722 
723  return status;
724 }
725 
726 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
727  CFDictionaryRef decoder_spec,
728  int width,
729  int height)
730 {
731  CMFormatDescriptionRef cm_fmt_desc;
732  OSStatus status;
733 
734  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
735  codec_type,
736  width,
737  height,
738  decoder_spec, // Dictionary of extension
739  &cm_fmt_desc);
740 
741  if (status)
742  return NULL;
743 
744  return cm_fmt_desc;
745 }
746 
747 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
748  int height,
749  OSType pix_fmt)
750 {
751  CFMutableDictionaryRef buffer_attributes;
752  CFMutableDictionaryRef io_surface_properties;
753  CFNumberRef cv_pix_fmt;
754  CFNumberRef w;
755  CFNumberRef h;
756 
757  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
758  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
759  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
760 
761  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
762  4,
763  &kCFTypeDictionaryKeyCallBacks,
764  &kCFTypeDictionaryValueCallBacks);
765  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
766  0,
767  &kCFTypeDictionaryKeyCallBacks,
768  &kCFTypeDictionaryValueCallBacks);
769 
770  if (pix_fmt)
771  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
772  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
773  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
774  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
775 #if TARGET_OS_IPHONE
776  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
777 #else
778  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
779 #endif
780 
781  CFRelease(io_surface_properties);
782  CFRelease(cv_pix_fmt);
783  CFRelease(w);
784  CFRelease(h);
785 
786  return buffer_attributes;
787 }
788 
789 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
790  AVCodecContext *avctx)
791 {
792  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
793  0,
794  &kCFTypeDictionaryKeyCallBacks,
795  &kCFTypeDictionaryValueCallBacks);
796 
797  CFDictionarySetValue(config_info,
801  kCFBooleanTrue);
802 
803  CFMutableDictionaryRef avc_info;
804  CFDataRef data = NULL;
805 
806  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
807  1,
808  &kCFTypeDictionaryKeyCallBacks,
809  &kCFTypeDictionaryValueCallBacks);
810 
811  switch (codec_type) {
812  case kCMVideoCodecType_MPEG4Video :
813  if (avctx->extradata_size)
814  data = videotoolbox_esds_extradata_create(avctx);
815  if (data)
816  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
817  break;
818  case kCMVideoCodecType_H264 :
820  if (data)
821  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
822  break;
825  if (data)
826  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
827  break;
828 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
829  case kCMVideoCodecType_VP9 :
831  if (data)
832  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
833  break;
834 #endif
835  default:
836  break;
837  }
838 
839  CFDictionarySetValue(config_info,
840  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
841  avc_info);
842 
843  if (data)
844  CFRelease(data);
845 
846  CFRelease(avc_info);
847  return config_info;
848 }
849 
850 static int videotoolbox_start(AVCodecContext *avctx)
851 {
852  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
853  OSStatus status;
854  VTDecompressionOutputCallbackRecord decoder_cb;
855  CFDictionaryRef decoder_spec;
856  CFDictionaryRef buf_attr;
857 
858  if (!videotoolbox) {
859  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
860  return -1;
861  }
862 
863  switch( avctx->codec_id ) {
864  case AV_CODEC_ID_H263 :
865  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
866  break;
867  case AV_CODEC_ID_H264 :
868  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
869  break;
870  case AV_CODEC_ID_HEVC :
871  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
872  break;
874  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
875  break;
877  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
878  break;
879  case AV_CODEC_ID_MPEG4 :
880  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
881  break;
882  case AV_CODEC_ID_PRORES :
883  switch (avctx->codec_tag) {
884  default:
885  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
886  // fall-through
887  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
888  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
889  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
890  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
891  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
892  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
893  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
894  break;
895  }
896  break;
897  case AV_CODEC_ID_VP9 :
898  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
899  break;
900  default :
901  break;
902  }
903 
904 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
905  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
906  if (__builtin_available(macOS 10.9, *)) {
907  VTRegisterProfessionalVideoWorkflowVideoDecoders();
908  }
909  }
910 #endif
911 
912 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
913  if (__builtin_available(macOS 11.0, *)) {
914  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
915  }
916 #endif
917 
918  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
919 
920  if (!decoder_spec) {
921  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
922  return -1;
923  }
924 
925  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
926  decoder_spec,
927  avctx->width,
928  avctx->height);
929  if (!videotoolbox->cm_fmt_desc) {
930  if (decoder_spec)
931  CFRelease(decoder_spec);
932 
933  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
934  return -1;
935  }
936 
937  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
938  avctx->height,
939  videotoolbox->cv_pix_fmt_type);
940 
941  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
942  decoder_cb.decompressionOutputRefCon = avctx;
943 
944  status = VTDecompressionSessionCreate(NULL, // allocator
945  videotoolbox->cm_fmt_desc, // videoFormatDescription
946  decoder_spec, // videoDecoderSpecification
947  buf_attr, // destinationImageBufferAttributes
948  &decoder_cb, // outputCallback
949  &videotoolbox->session); // decompressionSessionOut
950 
951  if (decoder_spec)
952  CFRelease(decoder_spec);
953  if (buf_attr)
954  CFRelease(buf_attr);
955 
956  switch (status) {
957  case kVTVideoDecoderNotAvailableNowErr:
958  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
959  return AVERROR(ENOSYS);
960  case kVTVideoDecoderUnsupportedDataFormatErr:
961  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
962  return AVERROR(ENOSYS);
963  case kVTCouldNotFindVideoDecoderErr:
964  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
965  return AVERROR(ENOSYS);
966  case kVTVideoDecoderMalfunctionErr:
967  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
968  return AVERROR(EINVAL);
969  case kVTVideoDecoderBadDataErr:
970  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
971  return AVERROR_INVALIDDATA;
972  case 0:
973  return 0;
974  default:
975  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
976  return AVERROR_UNKNOWN;
977  }
978 }
979 
980 static const char *videotoolbox_error_string(OSStatus status)
981 {
982  switch (status) {
983  case kVTVideoDecoderBadDataErr:
984  return "bad data";
985  case kVTVideoDecoderMalfunctionErr:
986  return "decoder malfunction";
987  case kVTInvalidSessionErr:
988  return "invalid session";
989  }
990  return "unknown";
991 }
992 
994 {
995  OSStatus status;
996  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
997  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
998 
999  if (vtctx->reconfig_needed == true) {
1000  vtctx->reconfig_needed = false;
1001  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1002  videotoolbox_stop(avctx);
1003  if (videotoolbox_start(avctx) != 0) {
1004  return AVERROR_EXTERNAL;
1005  }
1006  }
1007 
1008  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1009  return AVERROR_INVALIDDATA;
1010 
1011  status = videotoolbox_session_decode_frame(avctx);
1012  if (status != noErr) {
1013  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1014  vtctx->reconfig_needed = true;
1015  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1016  return AVERROR_UNKNOWN;
1017  }
1018 
1019  if (!vtctx->frame) {
1020  vtctx->reconfig_needed = true;
1021  return AVERROR_UNKNOWN;
1022  }
1023 
1024  return videotoolbox_buffer_create(avctx, frame);
1025 }
1026 
1027 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1028 {
1029  H264Context *h = avctx->priv_data;
1030  AVFrame *frame = h->cur_pic_ptr->f;
1031  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1033  vtctx->bitstream_size = 0;
1034  return ret;
1035 }
1036 
1037 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1038  const uint8_t *buffer,
1039  uint32_t size)
1040 {
1041  return 0;
1042 }
1043 
1044 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1045  const uint8_t *buffer,
1046  uint32_t size)
1047 {
1049 }
1050 
1051 
1052 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1053  int type,
1054  const uint8_t *buffer,
1055  uint32_t size)
1056 {
1058 }
1059 
1060 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1061 {
1062  HEVCContext *h = avctx->priv_data;
1063  AVFrame *frame = h->ref->frame;
1064  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1065 
1066  h->output_frame->crop_right = 0;
1067  h->output_frame->crop_left = 0;
1068  h->output_frame->crop_top = 0;
1069  h->output_frame->crop_bottom = 0;
1070 
1072  vtctx->bitstream_size = 0;
1073  return ret;
1074 }
1075 
1076 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1077  const uint8_t *buffer,
1078  uint32_t size)
1079 {
1080  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1081 
1082  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1083 }
1084 
1085 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1086  const uint8_t *buffer,
1087  uint32_t size)
1088 {
1089  return 0;
1090 }
1091 
1092 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1093 {
1094  MpegEncContext *s = avctx->priv_data;
1095  AVFrame *frame = s->current_picture_ptr->f;
1096 
1097  return ff_videotoolbox_common_end_frame(avctx, frame);
1098 }
1099 
1100 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1101  const uint8_t *buffer,
1102  uint32_t size)
1103 {
1104  return 0;
1105 }
1106 
1107 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1108  const uint8_t *buffer,
1109  uint32_t size)
1110 {
1111  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1112 
1113  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1114 }
1115 
1116 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1117 {
1118  ProresContext *ctx = avctx->priv_data;
1119  AVFrame *frame = ctx->frame;
1120 
1121  return ff_videotoolbox_common_end_frame(avctx, frame);
1122 }
1123 
1124 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1125  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1126  if (!descriptor)
1127  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1128 
1129  int depth = descriptor->comp[0].depth;
1130 
1131  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1132  return AV_PIX_FMT_AYUV64;
1133 
1134 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1135  if (depth > 10)
1136  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1137 #endif
1138 
1139 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1140  if (descriptor->log2_chroma_w == 0) {
1141 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1142  if (depth <= 8)
1143  return AV_PIX_FMT_NV24;
1144 #endif
1145  return AV_PIX_FMT_P410;
1146  }
1147 #endif
1148 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1149  if (descriptor->log2_chroma_h == 0) {
1150 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1151  if (depth <= 8)
1152  return AV_PIX_FMT_NV16;
1153 #endif
1154  return AV_PIX_FMT_P210;
1155  }
1156 #endif
1157 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1158  if (depth > 8) {
1159  return AV_PIX_FMT_P010;
1160  }
1161 #endif
1162 
1163  return AV_PIX_FMT_NV12;
1164 }
1165 
1167 {
1168  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1169  AVHWFramesContext *hw_frames;
1170  int err;
1171 
1172  // Old API - do nothing.
1173  if (avctx->hwaccel_context)
1174  return 0;
1175 
1176  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1177  av_log(avctx, AV_LOG_ERROR,
1178  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1179  return AVERROR(EINVAL);
1180  }
1181 
1183  if (!vtctx->vt_ctx) {
1184  err = AVERROR(ENOMEM);
1185  goto fail;
1186  }
1187 
1188  if (avctx->hw_frames_ctx) {
1189  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1190  } else {
1192  if (!avctx->hw_frames_ctx) {
1193  err = AVERROR(ENOMEM);
1194  goto fail;
1195  }
1196 
1197  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1198  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1199  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1200  hw_frames->width = avctx->width;
1201  hw_frames->height = avctx->height;
1202 
1203  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1204  if (err < 0) {
1205  av_buffer_unref(&avctx->hw_frames_ctx);
1206  goto fail;
1207  }
1208  }
1209 
1211  if (!vtctx->cached_hw_frames_ctx) {
1212  err = AVERROR(ENOMEM);
1213  goto fail;
1214  }
1215 
1216  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1217  vtctx->vt_ctx->cv_pix_fmt_type =
1219  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1220  const AVPixFmtDescriptor *attempted_format =
1221  av_pix_fmt_desc_get(hw_frames->sw_format);
1222  av_log(avctx, AV_LOG_ERROR,
1223  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1224  "a VideoToolbox format!\n",
1225  attempted_format ? attempted_format->name : "<unknown>",
1227  err = AVERROR(EINVAL);
1228  goto fail;
1229  }
1230 
1231  err = videotoolbox_start(avctx);
1232  if (err < 0)
1233  goto fail;
1234 
1235  return 0;
1236 
1237 fail:
1238  ff_videotoolbox_uninit(avctx);
1239  return err;
1240 }
1241 
1243  AVBufferRef *hw_frames_ctx)
1244 {
1245  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1246 
1247  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1248  frames_ctx->width = avctx->coded_width;
1249  frames_ctx->height = avctx->coded_height;
1250  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1251 
1252  return 0;
1253 }
1254 
1256  .name = "h263_videotoolbox",
1257  .type = AVMEDIA_TYPE_VIDEO,
1258  .id = AV_CODEC_ID_H263,
1259  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1260  .alloc_frame = ff_videotoolbox_alloc_frame,
1261  .start_frame = videotoolbox_mpeg_start_frame,
1262  .decode_slice = videotoolbox_mpeg_decode_slice,
1263  .end_frame = videotoolbox_mpeg_end_frame,
1264  .frame_params = ff_videotoolbox_frame_params,
1266  .uninit = ff_videotoolbox_uninit,
1267  .priv_data_size = sizeof(VTContext),
1268 };
1269 
1271  .name = "hevc_videotoolbox",
1272  .type = AVMEDIA_TYPE_VIDEO,
1273  .id = AV_CODEC_ID_HEVC,
1274  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1275  .alloc_frame = ff_videotoolbox_alloc_frame,
1276  .start_frame = videotoolbox_hevc_start_frame,
1277  .decode_slice = videotoolbox_hevc_decode_slice,
1278  .decode_params = videotoolbox_hevc_decode_params,
1279  .end_frame = videotoolbox_hevc_end_frame,
1280  .frame_params = ff_videotoolbox_frame_params,
1282  .uninit = ff_videotoolbox_uninit,
1283  .priv_data_size = sizeof(VTContext),
1284 };
1285 
1287  .name = "h264_videotoolbox",
1288  .type = AVMEDIA_TYPE_VIDEO,
1289  .id = AV_CODEC_ID_H264,
1290  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1291  .alloc_frame = ff_videotoolbox_alloc_frame,
1292  .start_frame = ff_videotoolbox_h264_start_frame,
1293  .decode_slice = ff_videotoolbox_h264_decode_slice,
1294  .decode_params = videotoolbox_h264_decode_params,
1295  .end_frame = videotoolbox_h264_end_frame,
1296  .frame_params = ff_videotoolbox_frame_params,
1298  .uninit = ff_videotoolbox_uninit,
1299  .priv_data_size = sizeof(VTContext),
1300 };
1301 
1303  .name = "mpeg1_videotoolbox",
1304  .type = AVMEDIA_TYPE_VIDEO,
1305  .id = AV_CODEC_ID_MPEG1VIDEO,
1306  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1307  .alloc_frame = ff_videotoolbox_alloc_frame,
1308  .start_frame = videotoolbox_mpeg_start_frame,
1309  .decode_slice = videotoolbox_mpeg_decode_slice,
1310  .end_frame = videotoolbox_mpeg_end_frame,
1311  .frame_params = ff_videotoolbox_frame_params,
1313  .uninit = ff_videotoolbox_uninit,
1314  .priv_data_size = sizeof(VTContext),
1315 };
1316 
1318  .name = "mpeg2_videotoolbox",
1319  .type = AVMEDIA_TYPE_VIDEO,
1320  .id = AV_CODEC_ID_MPEG2VIDEO,
1321  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1322  .alloc_frame = ff_videotoolbox_alloc_frame,
1323  .start_frame = videotoolbox_mpeg_start_frame,
1324  .decode_slice = videotoolbox_mpeg_decode_slice,
1325  .end_frame = videotoolbox_mpeg_end_frame,
1326  .frame_params = ff_videotoolbox_frame_params,
1328  .uninit = ff_videotoolbox_uninit,
1329  .priv_data_size = sizeof(VTContext),
1330 };
1331 
1333  .name = "mpeg4_videotoolbox",
1334  .type = AVMEDIA_TYPE_VIDEO,
1335  .id = AV_CODEC_ID_MPEG4,
1336  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1337  .alloc_frame = ff_videotoolbox_alloc_frame,
1338  .start_frame = videotoolbox_mpeg_start_frame,
1339  .decode_slice = videotoolbox_mpeg_decode_slice,
1340  .end_frame = videotoolbox_mpeg_end_frame,
1341  .frame_params = ff_videotoolbox_frame_params,
1343  .uninit = ff_videotoolbox_uninit,
1344  .priv_data_size = sizeof(VTContext),
1345 };
1346 
1348  .name = "prores_videotoolbox",
1349  .type = AVMEDIA_TYPE_VIDEO,
1350  .id = AV_CODEC_ID_PRORES,
1351  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1352  .alloc_frame = ff_videotoolbox_alloc_frame,
1353  .start_frame = videotoolbox_prores_start_frame,
1354  .decode_slice = videotoolbox_prores_decode_slice,
1355  .end_frame = videotoolbox_prores_end_frame,
1356  .frame_params = ff_videotoolbox_frame_params,
1358  .uninit = ff_videotoolbox_uninit,
1359  .priv_data_size = sizeof(VTContext),
1360 };
1361 
1362 static AVVideotoolboxContext *av_videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1363  bool full_range)
1364 {
1365  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1366 
1367  if (ret) {
1368  ret->output_callback = videotoolbox_decoder_callback;
1369 
1370  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1371  if (cv_pix_fmt_type == 0) {
1372  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1373  }
1374  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1375  }
1376 
1377  return ret;
1378 }
1379 
1381 {
1382  return av_videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1383 }
1384 
1386 {
1387  return av_videotoolbox_default_init2(avctx, NULL);
1388 }
1389 
1391 {
1392  enum AVPixelFormat pix_fmt = videotoolbox_best_pixel_format(avctx);
1393  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1394  avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context_with_pix_fmt(pix_fmt, full_range);
1395  if (!avctx->hwaccel_context)
1396  return AVERROR(ENOMEM);
1397  return videotoolbox_start(avctx);
1398 }
1399 
1401 {
1402 
1403  videotoolbox_stop(avctx);
1404  av_freep(&avctx->hwaccel_context);
1405 }
1406 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:62
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:76
AVCodecContext::hwaccel_context
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:1370
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
av_videotoolbox_alloc_context
AVVideotoolboxContext * av_videotoolbox_alloc_context(void)
Allocate and initialize a Videotoolbox context.
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
ff_hevc_videotoolbox_hwaccel
const AVHWAccel ff_hevc_videotoolbox_hwaccel
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
ff_h264_videotoolbox_hwaccel
const AVHWAccel ff_h264_videotoolbox_hwaccel
av_videotoolbox_default_free
void av_videotoolbox_default_free(AVCodecContext *avctx)
This function must be called to free the Videotoolbox context initialized with av_videotoolbox_defaul...
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
ff_prores_videotoolbox_hwaccel
const AVHWAccel ff_prores_videotoolbox_hwaccel
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:597
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:40
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:586
data
const char data[16]
Definition: mxf.c:143
ProresContext
Definition: proresdec.h:38
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:152
PTLCommon::profile_space
uint8_t profile_space
Definition: hevc_ps.h:93
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:52
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:46
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:96
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:154
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: hevc_ps.h:97
ff_mpeg1_videotoolbox_hwaccel
const AVHWAccel ff_mpeg1_videotoolbox_hwaccel
AVHWAccel
Definition: avcodec.h:2039
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: hevc_ps.h:98
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:184
fail
#define fail()
Definition: checkasm.h:127
av_videotoolbox_default_init
int av_videotoolbox_default_init(AVCodecContext *avctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:653
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:100
av_bswap32
#define av_bswap32
Definition: bswap.h:33
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:377
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:44
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
ff_h263_videotoolbox_hwaccel
const AVHWAccel ff_h263_videotoolbox_hwaccel
duration
int64_t duration
Definition: movenc.c:64
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:42
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:504
width
#define width
vt_internal.h
PTLCommon
Definition: hevc_ps.h:92
s
#define s(width, name)
Definition: cbs_vp9.c:257
VTHWFrame
Definition: videotoolbox.c:57
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:62
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:99
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: hevc_ps.h:95
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
PTLCommon::tier_flag
uint8_t tier_flag
Definition: hevc_ps.h:94
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2988
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:141
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:418
V
#define V
Definition: avdct.c:30
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:461
src
#define src
Definition: vp8dsp.c:255
ff_mpeg2_videotoolbox_hwaccel
const AVHWAccel ff_mpeg2_videotoolbox_hwaccel
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:51
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:437
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:51
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:414
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:58
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:229
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
hevcdec.h
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:185
P
#define P
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:54
size
int size
Definition: twinvq_data.h:10344
VUI
Definition: hevc_ps.h:49
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:452
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:70
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:191
height
#define height
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:48
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:462
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:460
VTContext
Definition: vt_internal.h:25
av_videotoolbox_default_init2
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2045
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:41
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:272
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:350
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:338
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1908
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1858
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:122
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:55
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:137
AVCodecContext
main external API structure.
Definition: avcodec.h:383
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
HEVCContext
Definition: hevcdec.h:470
PTLCommon::level_idc
uint8_t level_idc
Definition: hevc_ps.h:112
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:91
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:59
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:453
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: hevc_ps.h:85
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:571
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avutil.h
ff_mpeg4_videotoolbox_hwaccel
const AVHWAccel ff_mpeg4_videotoolbox_hwaccel
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: hevc_ps.h:123
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1628
HEVCSPS
Definition: hevc_ps.h:153
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: hevc_ps.h:249
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:71
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:408
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:463
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:64
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:152
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1717
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:391
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:71
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:198