FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "config_components.h"
25 #include "videotoolbox.h"
27 #include "libavutil/mem.h"
28 #include "vt_internal.h"
29 #include "libavutil/avutil.h"
30 #include "libavutil/hwcontext.h"
31 #include "libavutil/pixdesc.h"
32 #include "bytestream.h"
33 #include "decode.h"
34 #include "internal.h"
35 #include "h264dec.h"
36 #include "hevc/hevcdec.h"
37 #include "hwaccel_internal.h"
38 #include "mpegvideo.h"
39 #include "proresdec.h"
40 #include <Availability.h>
41 #include <AvailabilityMacros.h>
42 #include <TargetConditionals.h>
43 
44 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
45 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
46 #endif
47 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
48 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
49 #endif
50 
51 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
52 enum { kCMVideoCodecType_HEVC = 'hvc1' };
53 #endif
54 
55 #if !HAVE_KCMVIDEOCODECTYPE_VP9
56 enum { kCMVideoCodecType_VP9 = 'vp09' };
57 #endif
58 
59 #if !HAVE_KCMVIDEOCODECTYPE_AV1
60 enum { kCMVideoCodecType_AV1 = 'av01' };
61 #endif
62 
63 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
64 
65 typedef struct VTHWFrame {
66  CVPixelBufferRef pixbuf;
68 } VTHWFrame;
69 
70 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
71 {
73  av_buffer_unref(&ref->hw_frames_ctx);
74  CVPixelBufferRelease(ref->pixbuf);
75 
76  av_free(data);
77 }
78 
80  const uint8_t *buffer,
81  uint32_t size)
82 {
83  void *tmp;
84 
85  tmp = av_fast_realloc(vtctx->bitstream,
86  &vtctx->allocated_size,
87  size);
88 
89  if (!tmp)
90  return AVERROR(ENOMEM);
91 
92  vtctx->bitstream = tmp;
93  memcpy(vtctx->bitstream, buffer, size);
94  vtctx->bitstream_size = size;
95 
96  return 0;
97 }
98 
100  const uint8_t *buffer,
101  uint32_t size)
102 {
103  void *tmp;
104 
105  tmp = av_fast_realloc(vtctx->bitstream,
106  &vtctx->allocated_size,
107  vtctx->bitstream_size + size);
108 
109  if (!tmp)
110  return AVERROR(ENOMEM);
111 
112  vtctx->bitstream = tmp;
113  memcpy(vtctx->bitstream + vtctx->bitstream_size, buffer, size);
114  vtctx->bitstream_size += size;
115 
116  return 0;
117 }
118 
119 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
120 {
121  int ret;
122  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
123 
124  if (!ref->pixbuf) {
125  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
127  return AVERROR_EXTERNAL;
128  }
129 
130  frame->crop_right = 0;
131  frame->crop_left = 0;
132  frame->crop_top = 0;
133  frame->crop_bottom = 0;
134 
135  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
136  return ret;
137 
138  frame->data[3] = (uint8_t*)ref->pixbuf;
139 
140  if (ref->hw_frames_ctx) {
141  av_buffer_unref(&frame->hw_frames_ctx);
142  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
143  if (!frame->hw_frames_ctx)
144  return AVERROR(ENOMEM);
145  }
146 
147  return 0;
148 }
149 
151 {
152  size_t size = sizeof(VTHWFrame);
153  uint8_t *data = NULL;
154  AVBufferRef *buf = NULL;
156  FrameDecodeData *fdd;
157  if (ret < 0)
158  return ret;
159 
160  data = av_mallocz(size);
161  if (!data)
162  return AVERROR(ENOMEM);
164  if (!buf) {
165  av_freep(&data);
166  return AVERROR(ENOMEM);
167  }
168  frame->buf[0] = buf;
169 
170  fdd = (FrameDecodeData*)frame->private_ref->data;
172 
173  frame->width = avctx->width;
174  frame->height = avctx->height;
175  frame->format = avctx->pix_fmt;
176 
177  return 0;
178 }
179 
180 #define AV_W8(p, v) *(p) = (v)
181 
182 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
183 {
184  int i;
185  int size = src_size;
186  uint8_t* p = dst;
187 
188  for (i = 0; i < src_size; i++) {
189  if (i + 2 < src_size &&
190  src[i] == 0x00 &&
191  src[i + 1] == 0x00 &&
192  src[i + 2] <= 0x03) {
193  if (dst) {
194  *p++ = src[i++];
195  *p++ = src[i];
196  *p++ = 0x03;
197  } else {
198  i++;
199  }
200  size++;
201  } else if (dst)
202  *p++ = src[i];
203  }
204 
205  if (dst)
206  av_assert0((p - dst) == size);
207 
208  return size;
209 }
210 
212 {
213  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
214  H264Context *h = avctx->priv_data;
215  CFDataRef data = NULL;
216  uint8_t *p;
217  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
218  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
219  int vt_extradata_size;
220  uint8_t *vt_extradata;
221 
222  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
223  vt_extradata = av_malloc(vt_extradata_size);
224 
225  if (!vt_extradata)
226  return NULL;
227 
228  p = vt_extradata;
229 
230  AV_W8(p + 0, 1); /* version */
231  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
232  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
233  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
234  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
235  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
236  AV_WB16(p + 6, sps_size);
237  p += 8;
238  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
239  AV_W8(p + 0, 1); /* number of pps */
240  AV_WB16(p + 1, pps_size);
241  p += 3;
242  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
243 
244  av_assert0(p - vt_extradata == vt_extradata_size);
245 
246  // save sps header (profile/level) used to create decoder session,
247  // so we can detect changes and recreate it.
248  if (vtctx)
249  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
250 
251  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
252  av_free(vt_extradata);
253  return data;
254 }
255 
257 {
258  HEVCContext *h = avctx->priv_data;
259  int i, num_vps = 0, num_sps = 0, num_pps = 0;
260  const HEVCPPS *pps = h->pps;
261  const HEVCSPS *sps = pps->sps;
262  const HEVCVPS *vps = sps->vps;
263  PTLCommon ptlc = vps->ptl.general_ptl;
264  VUI vui = sps->vui;
265  uint8_t parallelismType;
266  CFDataRef data = NULL;
267  uint8_t *p;
268  int vt_extradata_size = 23 + 3 + 3 + 3;
269  uint8_t *vt_extradata;
270 
271 #define COUNT_SIZE_PS(T, t) \
272  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
273  if (h->ps.t##ps_list[i]) { \
274  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
275  vt_extradata_size += 2 + escape_ps(NULL, lps->data, lps->data_size); \
276  num_##t##ps++; \
277  } \
278  }
279 
280  COUNT_SIZE_PS(V, v)
281  COUNT_SIZE_PS(S, s)
282  COUNT_SIZE_PS(P, p)
283 
284  vt_extradata = av_malloc(vt_extradata_size);
285  if (!vt_extradata)
286  return NULL;
287  p = vt_extradata;
288 
289  /* unsigned int(8) configurationVersion = 1; */
290  AV_W8(p + 0, 1);
291 
292  /*
293  * unsigned int(2) general_profile_space;
294  * unsigned int(1) general_tier_flag;
295  * unsigned int(5) general_profile_idc;
296  */
297  AV_W8(p + 1, ptlc.profile_space << 6 |
298  ptlc.tier_flag << 5 |
299  ptlc.profile_idc);
300 
301  /* unsigned int(32) general_profile_compatibility_flags; */
302  for (i = 0; i < 4; i++) {
303  AV_W8(p + 2 + i, ptlc.profile_compatibility_flag[i * 8] << 7 |
304  ptlc.profile_compatibility_flag[i * 8 + 1] << 6 |
305  ptlc.profile_compatibility_flag[i * 8 + 2] << 5 |
306  ptlc.profile_compatibility_flag[i * 8 + 3] << 4 |
307  ptlc.profile_compatibility_flag[i * 8 + 4] << 3 |
308  ptlc.profile_compatibility_flag[i * 8 + 5] << 2 |
309  ptlc.profile_compatibility_flag[i * 8 + 6] << 1 |
310  ptlc.profile_compatibility_flag[i * 8 + 7]);
311  }
312 
313  /* unsigned int(48) general_constraint_indicator_flags; */
314  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
315  ptlc.interlaced_source_flag << 6 |
316  ptlc.non_packed_constraint_flag << 5 |
317  ptlc.frame_only_constraint_flag << 4);
318  AV_W8(p + 7, 0);
319  AV_WN32(p + 8, 0);
320 
321  /* unsigned int(8) general_level_idc; */
322  AV_W8(p + 12, ptlc.level_idc);
323 
324  /*
325  * bit(4) reserved = ‘1111’b;
326  * unsigned int(12) min_spatial_segmentation_idc;
327  */
328  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
329  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
330 
331  /*
332  * bit(6) reserved = ‘111111’b;
333  * unsigned int(2) parallelismType;
334  */
336  parallelismType = 0;
337  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
338  parallelismType = 0;
339  else if (pps->entropy_coding_sync_enabled_flag)
340  parallelismType = 3;
341  else if (pps->tiles_enabled_flag)
342  parallelismType = 2;
343  else
344  parallelismType = 1;
345  AV_W8(p + 15, 0xfc | parallelismType);
346 
347  /*
348  * bit(6) reserved = ‘111111’b;
349  * unsigned int(2) chromaFormat;
350  */
351  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
352 
353  /*
354  * bit(5) reserved = ‘11111’b;
355  * unsigned int(3) bitDepthLumaMinus8;
356  */
357  AV_W8(p + 17, (sps->bit_depth - 8) | 0xf8);
358 
359  /*
360  * bit(5) reserved = ‘11111’b;
361  * unsigned int(3) bitDepthChromaMinus8;
362  */
363  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xf8);
364 
365  /* bit(16) avgFrameRate; */
366  AV_WB16(p + 19, 0);
367 
368  /*
369  * bit(2) constantFrameRate;
370  * bit(3) numTemporalLayers;
371  * bit(1) temporalIdNested;
372  * unsigned int(2) lengthSizeMinusOne;
373  */
374  AV_W8(p + 21, 0 << 6 |
375  sps->max_sub_layers << 3 |
376  sps->temporal_id_nesting << 2 |
377  3);
378 
379  /* unsigned int(8) numOfArrays; */
380  AV_W8(p + 22, 3);
381 
382  p += 23;
383 
384 #define APPEND_PS(T, t) \
385  /* \
386  * bit(1) array_completeness; \
387  * unsigned int(1) reserved = 0; \
388  * unsigned int(6) NAL_unit_type; \
389  */ \
390  AV_W8(p, 1 << 7 | \
391  HEVC_NAL_##T##PS & 0x3f); \
392  /* unsigned int(16) numNalus; */ \
393  AV_WB16(p + 1, num_##t##ps); \
394  p += 3; \
395  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
396  if (h->ps.t##ps_list[i]) { \
397  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
398  int size = escape_ps(p + 2, lps->data, lps->data_size); \
399  /* unsigned int(16) nalUnitLength; */ \
400  AV_WB16(p, size); \
401  /* bit(8*nalUnitLength) nalUnit; */ \
402  p += 2 + size; \
403  } \
404  }
405 
406  APPEND_PS(V, v)
407  APPEND_PS(S, s)
408  APPEND_PS(P, p)
409 
410  av_assert0(p - vt_extradata == vt_extradata_size);
411 
412  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
413  av_free(vt_extradata);
414  return data;
415 }
416 
418  const uint8_t *buffer,
419  uint32_t size)
420 {
421  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
422  H264Context *h = avctx->priv_data;
423 
424  if (h->is_avc == 1) {
425  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
426  }
427 
428  return 0;
429 }
430 
432  int type,
433  const uint8_t *buffer,
434  uint32_t size)
435 {
436  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
437  H264Context *h = avctx->priv_data;
438 
439  // save sps header (profile/level) used to create decoder session
440  if (!vtctx->sps[0])
441  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
442 
443  if (type == H264_NAL_SPS) {
444  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
445  vtctx->reconfig_needed = true;
446  memcpy(vtctx->sps, buffer + 1, 3);
447  }
448  }
449 
450  // pass-through SPS/PPS changes to the decoder
452 }
453 
455  const uint8_t *buffer,
456  uint32_t size)
457 {
458  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
459  void *tmp;
460 
461  tmp = av_fast_realloc(vtctx->bitstream,
462  &vtctx->allocated_size,
463  vtctx->bitstream_size+size+4);
464  if (!tmp)
465  return AVERROR(ENOMEM);
466 
467  vtctx->bitstream = tmp;
468 
469  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
470  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
471 
472  vtctx->bitstream_size += size + 4;
473 
474  return 0;
475 }
476 
478  const uint8_t *buffer,
479  uint32_t size)
480 {
481  H264Context *h = avctx->priv_data;
482 
483  if (h->is_avc == 1)
484  return 0;
485 
487 }
488 
489 #if CONFIG_VIDEOTOOLBOX
490 // Return the AVVideotoolboxContext that matters currently. Where it comes from
491 // depends on the API used.
492 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
493 {
494  // Somewhat tricky because the user can call av_videotoolbox_default_free()
495  // at any time, even when the codec is closed.
496  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
497  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
498  if (vtctx->vt_ctx)
499  return vtctx->vt_ctx;
500  }
501  return avctx->hwaccel_context;
502 }
503 
504 static void videotoolbox_stop(AVCodecContext *avctx)
505 {
506  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
507  if (!videotoolbox)
508  return;
509 
510  if (videotoolbox->cm_fmt_desc) {
511  CFRelease(videotoolbox->cm_fmt_desc);
512  videotoolbox->cm_fmt_desc = NULL;
513  }
514 
515  if (videotoolbox->session) {
516  VTDecompressionSessionInvalidate(videotoolbox->session);
517  CFRelease(videotoolbox->session);
518  videotoolbox->session = NULL;
519  }
520 }
521 
523 {
524  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
525  if (!vtctx)
526  return 0;
527 
528  av_freep(&vtctx->bitstream);
529  if (vtctx->frame)
530  CVPixelBufferRelease(vtctx->frame);
531 
532  if (vtctx->vt_ctx)
533  videotoolbox_stop(avctx);
534 
536  av_freep(&vtctx->vt_ctx);
537 
538  return 0;
539 }
540 
541 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
542 {
543  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
544  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
545  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
546  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
547  int width = CVPixelBufferGetWidth(pixbuf);
548  int height = CVPixelBufferGetHeight(pixbuf);
549  AVHWFramesContext *cached_frames;
550  VTHWFrame *ref;
551  int ret;
552 
553  if (!frame->buf[0] || frame->data[3]) {
554  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
556  return AVERROR_EXTERNAL;
557  }
558 
559  ref = (VTHWFrame *)frame->buf[0]->data;
560 
561  if (ref->pixbuf)
562  CVPixelBufferRelease(ref->pixbuf);
563  ref->pixbuf = vtctx->frame;
564  vtctx->frame = NULL;
565 
566  // Old API code path.
567  if (!vtctx->cached_hw_frames_ctx)
568  return 0;
569 
570  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
571 
572  if (cached_frames->sw_format != sw_format ||
573  cached_frames->width != width ||
574  cached_frames->height != height) {
575  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
576  AVHWFramesContext *hw_frames;
577  AVVTFramesContext *hw_ctx;
578  if (!hw_frames_ctx)
579  return AVERROR(ENOMEM);
580 
581  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
582  hw_frames->format = cached_frames->format;
583  hw_frames->sw_format = sw_format;
584  hw_frames->width = width;
585  hw_frames->height = height;
586  hw_ctx = hw_frames->hwctx;
587  hw_ctx->color_range = avctx->color_range;
588 
589  ret = av_hwframe_ctx_init(hw_frames_ctx);
590  if (ret < 0) {
591  av_buffer_unref(&hw_frames_ctx);
592  return ret;
593  }
594 
596  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
597  }
598 
599  av_buffer_unref(&ref->hw_frames_ctx);
600  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
601  if (!ref->hw_frames_ctx)
602  return AVERROR(ENOMEM);
603 
604  return 0;
605 }
606 
607 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
608 {
609  int i;
610  uint8_t b;
611 
612  for (i = 3; i >= 0; i--) {
613  b = (length >> (i * 7)) & 0x7F;
614  if (i != 0)
615  b |= 0x80;
616 
617  bytestream2_put_byteu(pb, b);
618  }
619 }
620 
621 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
622 {
623  CFDataRef data;
624  uint8_t *rw_extradata;
625  PutByteContext pb;
626  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
627  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
628  int config_size = 13 + 5 + avctx->extradata_size;
629  int s;
630 
631  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
632  return NULL;
633 
634  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
635  bytestream2_put_byteu(&pb, 0); // version
636  bytestream2_put_ne24(&pb, 0); // flags
637 
638  // elementary stream descriptor
639  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
640  videotoolbox_write_mp4_descr_length(&pb, full_size);
641  bytestream2_put_ne16(&pb, 0); // esid
642  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
643 
644  // decoder configuration descriptor
645  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
646  videotoolbox_write_mp4_descr_length(&pb, config_size);
647  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
648  bytestream2_put_byteu(&pb, 0x11); // stream type
649  bytestream2_put_ne24(&pb, 0); // buffer size
650  bytestream2_put_ne32(&pb, 0); // max bitrate
651  bytestream2_put_ne32(&pb, 0); // avg bitrate
652 
653  // decoder specific descriptor
654  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
655  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
656 
657  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
658 
659  // SLConfigDescriptor
660  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
661  bytestream2_put_byteu(&pb, 0x01); // length
662  bytestream2_put_byteu(&pb, 0x02); //
663 
664  s = bytestream2_size_p(&pb);
665 
666  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
667 
668  av_freep(&rw_extradata);
669  return data;
670 }
671 
672 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
673  void *buffer,
674  int size)
675 {
676  OSStatus status;
677  CMBlockBufferRef block_buf;
678  CMSampleBufferRef sample_buf;
679 
680  block_buf = NULL;
681  sample_buf = NULL;
682 
683  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
684  buffer, // memoryBlock
685  size, // blockLength
686  kCFAllocatorNull, // blockAllocator
687  NULL, // customBlockSource
688  0, // offsetToData
689  size, // dataLength
690  0, // flags
691  &block_buf);
692 
693  if (!status) {
694  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
695  block_buf, // dataBuffer
696  TRUE, // dataReady
697  0, // makeDataReadyCallback
698  0, // makeDataReadyRefcon
699  fmt_desc, // formatDescription
700  1, // numSamples
701  0, // numSampleTimingEntries
702  NULL, // sampleTimingArray
703  0, // numSampleSizeEntries
704  NULL, // sampleSizeArray
705  &sample_buf);
706  }
707 
708  if (block_buf)
709  CFRelease(block_buf);
710 
711  return sample_buf;
712 }
713 
714 static void videotoolbox_decoder_callback(void *opaque,
715  void *sourceFrameRefCon,
716  OSStatus status,
717  VTDecodeInfoFlags flags,
718  CVImageBufferRef image_buffer,
719  CMTime pts,
720  CMTime duration)
721 {
722  VTContext *vtctx = opaque;
723 
724  if (vtctx->frame) {
725  CVPixelBufferRelease(vtctx->frame);
726  vtctx->frame = NULL;
727  }
728 
729  if (!image_buffer) {
731  "vt decoder cb: output image buffer is null: %i\n", status);
732  return;
733  }
734 
735  vtctx->frame = CVPixelBufferRetain(image_buffer);
736 }
737 
738 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
739 {
740  OSStatus status;
741  CMSampleBufferRef sample_buf;
742  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
743  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
744 
745  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
746  vtctx->bitstream,
747  vtctx->bitstream_size);
748 
749  if (!sample_buf)
750  return -1;
751 
752  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
753  sample_buf,
754  0, // decodeFlags
755  NULL, // sourceFrameRefCon
756  0); // infoFlagsOut
757  if (status == noErr)
758  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
759 
760  CFRelease(sample_buf);
761 
762  return status;
763 }
764 
765 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
766  CFDictionaryRef decoder_spec,
767  int width,
768  int height)
769 {
770  CMFormatDescriptionRef cm_fmt_desc;
771  OSStatus status;
772 
773  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
774  codec_type,
775  width,
776  height,
777  decoder_spec, // Dictionary of extension
778  &cm_fmt_desc);
779 
780  if (status)
781  return NULL;
782 
783  return cm_fmt_desc;
784 }
785 
786 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
787  int height,
788  OSType pix_fmt)
789 {
790  CFMutableDictionaryRef buffer_attributes;
791  CFMutableDictionaryRef io_surface_properties;
792  CFNumberRef cv_pix_fmt;
793  CFNumberRef w;
794  CFNumberRef h;
795 
796  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
797  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
798  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
799 
800  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
801  4,
802  &kCFTypeDictionaryKeyCallBacks,
803  &kCFTypeDictionaryValueCallBacks);
804  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
805  0,
806  &kCFTypeDictionaryKeyCallBacks,
807  &kCFTypeDictionaryValueCallBacks);
808 
809  if (pix_fmt)
810  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
811  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
812  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
813  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
814 #if TARGET_OS_IPHONE
815  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
816 #else
817  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
818 #endif
819 
820  CFRelease(io_surface_properties);
821  CFRelease(cv_pix_fmt);
822  CFRelease(w);
823  CFRelease(h);
824 
825  return buffer_attributes;
826 }
827 
828 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
829  AVCodecContext *avctx)
830 {
831  CFMutableDictionaryRef avc_info;
832  CFDataRef data = NULL;
833 
834  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
835  0,
836  &kCFTypeDictionaryKeyCallBacks,
837  &kCFTypeDictionaryValueCallBacks);
838 
839  CFDictionarySetValue(config_info,
843  kCFBooleanTrue);
844 
845  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
846  1,
847  &kCFTypeDictionaryKeyCallBacks,
848  &kCFTypeDictionaryValueCallBacks);
849 
850  switch (codec_type) {
851  case kCMVideoCodecType_MPEG4Video :
852  if (avctx->extradata_size)
853  data = videotoolbox_esds_extradata_create(avctx);
854  if (data)
855  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
856  break;
857  case kCMVideoCodecType_H264 :
859  if (data)
860  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
861  break;
864  if (data)
865  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
866  break;
867 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
868  case kCMVideoCodecType_VP9 :
870  if (data)
871  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
872  break;
873 #endif
874 #if CONFIG_AV1_VIDEOTOOLBOX_HWACCEL
875  case kCMVideoCodecType_AV1 :
877  if (data)
878  CFDictionarySetValue(avc_info, CFSTR("av1C"), data);
879  break;
880 #endif
881  default:
882  break;
883  }
884 
885  CFDictionarySetValue(config_info,
886  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
887  avc_info);
888 
889  if (data)
890  CFRelease(data);
891 
892  CFRelease(avc_info);
893  return config_info;
894 }
895 
896 static int videotoolbox_start(AVCodecContext *avctx)
897 {
898  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
899  OSStatus status;
900  VTDecompressionOutputCallbackRecord decoder_cb;
901  CFDictionaryRef decoder_spec;
902  CFDictionaryRef buf_attr;
903 
904  if (!videotoolbox) {
905  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
906  return -1;
907  }
908 
909  switch( avctx->codec_id ) {
910  case AV_CODEC_ID_H263 :
911  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
912  break;
913  case AV_CODEC_ID_H264 :
914  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
915  break;
916  case AV_CODEC_ID_HEVC :
917  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
918  break;
920  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
921  break;
923  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
924  break;
925  case AV_CODEC_ID_MPEG4 :
926  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
927  break;
928  case AV_CODEC_ID_PRORES :
929  switch (avctx->codec_tag) {
930  default:
931  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
932  // fall-through
933  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
934  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
935  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
936  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
937  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
938  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
939  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
940  break;
941  }
942  break;
943  case AV_CODEC_ID_VP9 :
944  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
945  break;
946  case AV_CODEC_ID_AV1 :
947  videotoolbox->cm_codec_type = kCMVideoCodecType_AV1;
948  break;
949  default :
950  break;
951  }
952 
953 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
954  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
955  if (__builtin_available(macOS 10.9, *)) {
956  VTRegisterProfessionalVideoWorkflowVideoDecoders();
957  }
958  }
959 #endif
960 
961 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
962  if (__builtin_available(macOS 11.0, *)) {
963  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
964  }
965 #endif
966 
967  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
968 
969  if (!decoder_spec) {
970  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
971  return -1;
972  }
973 
974  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
975  decoder_spec,
976  avctx->width,
977  avctx->height);
978  if (!videotoolbox->cm_fmt_desc) {
979  if (decoder_spec)
980  CFRelease(decoder_spec);
981 
982  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
983  return -1;
984  }
985 
986  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
987  avctx->height,
988  videotoolbox->cv_pix_fmt_type);
989 
990  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
991  decoder_cb.decompressionOutputRefCon = avctx->internal->hwaccel_priv_data;
992 
993  status = VTDecompressionSessionCreate(NULL, // allocator
994  videotoolbox->cm_fmt_desc, // videoFormatDescription
995  decoder_spec, // videoDecoderSpecification
996  buf_attr, // destinationImageBufferAttributes
997  &decoder_cb, // outputCallback
998  &videotoolbox->session); // decompressionSessionOut
999 
1000  if (decoder_spec)
1001  CFRelease(decoder_spec);
1002  if (buf_attr)
1003  CFRelease(buf_attr);
1004 
1005  switch (status) {
1006  case kVTVideoDecoderNotAvailableNowErr:
1007  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
1008  return AVERROR(ENOSYS);
1009  case kVTVideoDecoderUnsupportedDataFormatErr:
1010  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
1011  return AVERROR(ENOSYS);
1012  case kVTCouldNotFindVideoDecoderErr:
1013  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
1014  return AVERROR(ENOSYS);
1015  case kVTVideoDecoderMalfunctionErr:
1016  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
1017  return AVERROR(EINVAL);
1018  case kVTVideoDecoderBadDataErr:
1019  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
1020  return AVERROR_INVALIDDATA;
1021  case 0:
1022  return 0;
1023  default:
1024  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
1025  return AVERROR_UNKNOWN;
1026  }
1027 }
1028 
1029 static const char *videotoolbox_error_string(OSStatus status)
1030 {
1031  switch (status) {
1032  case kVTVideoDecoderBadDataErr:
1033  return "bad data";
1034  case kVTVideoDecoderMalfunctionErr:
1035  return "decoder malfunction";
1036  case kVTInvalidSessionErr:
1037  return "invalid session";
1038  }
1039  return "unknown";
1040 }
1041 
1043 {
1044  OSStatus status;
1045  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
1046  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1047 
1048  if (vtctx->reconfig_needed == true) {
1049  vtctx->reconfig_needed = false;
1050  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1051  videotoolbox_stop(avctx);
1052  if (videotoolbox_start(avctx) != 0) {
1053  return AVERROR_EXTERNAL;
1054  }
1055  }
1056 
1057  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1058  return AVERROR_INVALIDDATA;
1059 
1060  status = videotoolbox_session_decode_frame(avctx);
1061  if (status != noErr) {
1062  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1063  vtctx->reconfig_needed = true;
1064  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1065  return AVERROR_UNKNOWN;
1066  }
1067 
1068  if (!vtctx->frame) {
1069  vtctx->reconfig_needed = true;
1070  return AVERROR_UNKNOWN;
1071  }
1072 
1073  return videotoolbox_buffer_create(avctx, frame);
1074 }
1075 
1076 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1077 {
1078  H264Context *h = avctx->priv_data;
1079  AVFrame *frame = h->cur_pic_ptr->f;
1080  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1082  vtctx->bitstream_size = 0;
1083  return ret;
1084 }
1085 
1086 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1087  const uint8_t *buffer,
1088  uint32_t size)
1089 {
1090  HEVCContext *h = avctx->priv_data;
1091  AVFrame *frame = h->cur_frame->f;
1092 
1093  frame->crop_right = 0;
1094  frame->crop_left = 0;
1095  frame->crop_top = 0;
1096  frame->crop_bottom = 0;
1097 
1098  return 0;
1099 }
1100 
1101 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1102  const uint8_t *buffer,
1103  uint32_t size)
1104 {
1106 }
1107 
1108 
1109 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1110  int type,
1111  const uint8_t *buffer,
1112  uint32_t size)
1113 {
1115 }
1116 
1117 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1118 {
1119  HEVCContext *h = avctx->priv_data;
1120  AVFrame *frame = h->cur_frame->f;
1121  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1122  int ret;
1123 
1125  vtctx->bitstream_size = 0;
1126  return ret;
1127 }
1128 
1129 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1130  const uint8_t *buffer,
1131  uint32_t size)
1132 {
1133  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1134 
1135  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1136 }
1137 
1138 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1139  const uint8_t *buffer,
1140  uint32_t size)
1141 {
1142  return 0;
1143 }
1144 
1145 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1146 {
1147  MpegEncContext *s = avctx->priv_data;
1148  AVFrame *frame = s->cur_pic.ptr->f;
1149 
1150  return ff_videotoolbox_common_end_frame(avctx, frame);
1151 }
1152 
1153 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1154  const uint8_t *buffer,
1155  uint32_t size)
1156 {
1157  return 0;
1158 }
1159 
1160 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1161  const uint8_t *buffer,
1162  uint32_t size)
1163 {
1164  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1165 
1166  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1167 }
1168 
1169 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1170 {
1171  ProresContext *ctx = avctx->priv_data;
1172  AVFrame *frame = ctx->frame;
1173 
1174  return ff_videotoolbox_common_end_frame(avctx, frame);
1175 }
1176 
1177 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1178  int depth;
1179  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1180  if (!descriptor)
1181  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1182 
1183 
1184  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1185  return AV_PIX_FMT_AYUV64;
1186 
1187  depth = descriptor->comp[0].depth;
1188 
1189 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1190  if (depth > 10)
1191  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1192 #endif
1193 
1194 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1195  if (descriptor->log2_chroma_w == 0) {
1196 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1197  if (depth <= 8)
1198  return AV_PIX_FMT_NV24;
1199 #endif
1200  return AV_PIX_FMT_P410;
1201  }
1202 #endif
1203 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1204  if (descriptor->log2_chroma_h == 0) {
1205 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1206  if (depth <= 8)
1207  return AV_PIX_FMT_NV16;
1208 #endif
1209  return AV_PIX_FMT_P210;
1210  }
1211 #endif
1212 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1213  if (depth > 8) {
1214  return AV_PIX_FMT_P010;
1215  }
1216 #endif
1217 
1218  return AV_PIX_FMT_NV12;
1219 }
1220 
1221 static AVVideotoolboxContext *videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1222  bool full_range)
1223 {
1224  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1225 
1226  if (ret) {
1227  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1228  if (cv_pix_fmt_type == 0) {
1229  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1230  }
1231  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1232  }
1233 
1234  return ret;
1235 }
1236 
1238 {
1239  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1240  AVHWFramesContext *hw_frames;
1241  AVVTFramesContext *hw_ctx;
1242  int err;
1243  bool full_range;
1244 
1245  vtctx->logctx = avctx;
1246 
1247  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx &&
1248  avctx->hwaccel_context)
1249  return videotoolbox_start(avctx);
1250 
1251  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1252  av_log(avctx, AV_LOG_ERROR,
1253  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1254  return AVERROR(EINVAL);
1255  }
1256 
1257  vtctx->vt_ctx = videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1258  if (!vtctx->vt_ctx) {
1259  err = AVERROR(ENOMEM);
1260  goto fail;
1261  }
1262 
1263  if (avctx->hw_frames_ctx) {
1264  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1265  } else {
1267  if (!avctx->hw_frames_ctx) {
1268  err = AVERROR(ENOMEM);
1269  goto fail;
1270  }
1271 
1272  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1273  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1274  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1275  hw_frames->width = avctx->width;
1276  hw_frames->height = avctx->height;
1277  hw_ctx = hw_frames->hwctx;
1278  hw_ctx->color_range = avctx->color_range;
1279 
1280  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1281  if (err < 0) {
1282  av_buffer_unref(&avctx->hw_frames_ctx);
1283  goto fail;
1284  }
1285  }
1286 
1288  if (!vtctx->cached_hw_frames_ctx) {
1289  err = AVERROR(ENOMEM);
1290  goto fail;
1291  }
1292 
1294  vtctx->vt_ctx->cv_pix_fmt_type =
1296  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1297  const AVPixFmtDescriptor *attempted_format =
1298  av_pix_fmt_desc_get(hw_frames->sw_format);
1299  av_log(avctx, AV_LOG_ERROR,
1300  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1301  "a VideoToolbox format!\n",
1302  attempted_format ? attempted_format->name : "<unknown>",
1304  err = AVERROR(EINVAL);
1305  goto fail;
1306  }
1307 
1308  err = videotoolbox_start(avctx);
1309  if (err < 0)
1310  goto fail;
1311 
1312  return 0;
1313 
1314 fail:
1315  ff_videotoolbox_uninit(avctx);
1316  return err;
1317 }
1318 
1320  AVBufferRef *hw_frames_ctx)
1321 {
1322  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1323 
1324  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1325  frames_ctx->width = avctx->coded_width;
1326  frames_ctx->height = avctx->coded_height;
1327  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1328 
1329  return 0;
1330 }
1331 
1333  .p.name = "h263_videotoolbox",
1334  .p.type = AVMEDIA_TYPE_VIDEO,
1335  .p.id = AV_CODEC_ID_H263,
1336  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1337  .alloc_frame = ff_videotoolbox_alloc_frame,
1338  .start_frame = videotoolbox_mpeg_start_frame,
1339  .decode_slice = videotoolbox_mpeg_decode_slice,
1340  .end_frame = videotoolbox_mpeg_end_frame,
1341  .frame_params = ff_videotoolbox_frame_params,
1343  .uninit = ff_videotoolbox_uninit,
1344  .priv_data_size = sizeof(VTContext),
1345 };
1346 
1348  .p.name = "hevc_videotoolbox",
1349  .p.type = AVMEDIA_TYPE_VIDEO,
1350  .p.id = AV_CODEC_ID_HEVC,
1351  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1352  .alloc_frame = ff_videotoolbox_alloc_frame,
1353  .start_frame = videotoolbox_hevc_start_frame,
1354  .decode_slice = videotoolbox_hevc_decode_slice,
1355  .decode_params = videotoolbox_hevc_decode_params,
1356  .end_frame = videotoolbox_hevc_end_frame,
1357  .frame_params = ff_videotoolbox_frame_params,
1359  .uninit = ff_videotoolbox_uninit,
1360  .priv_data_size = sizeof(VTContext),
1361 };
1362 
1364  .p.name = "h264_videotoolbox",
1365  .p.type = AVMEDIA_TYPE_VIDEO,
1366  .p.id = AV_CODEC_ID_H264,
1367  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1368  .alloc_frame = ff_videotoolbox_alloc_frame,
1369  .start_frame = ff_videotoolbox_h264_start_frame,
1370  .decode_slice = ff_videotoolbox_h264_decode_slice,
1371  .decode_params = videotoolbox_h264_decode_params,
1372  .end_frame = videotoolbox_h264_end_frame,
1373  .frame_params = ff_videotoolbox_frame_params,
1375  .uninit = ff_videotoolbox_uninit,
1376  .priv_data_size = sizeof(VTContext),
1377 };
1378 
1380  .p.name = "mpeg1_videotoolbox",
1381  .p.type = AVMEDIA_TYPE_VIDEO,
1382  .p.id = AV_CODEC_ID_MPEG1VIDEO,
1383  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1384  .alloc_frame = ff_videotoolbox_alloc_frame,
1385  .start_frame = videotoolbox_mpeg_start_frame,
1386  .decode_slice = videotoolbox_mpeg_decode_slice,
1387  .end_frame = videotoolbox_mpeg_end_frame,
1388  .frame_params = ff_videotoolbox_frame_params,
1390  .uninit = ff_videotoolbox_uninit,
1391  .priv_data_size = sizeof(VTContext),
1392 };
1393 
1395  .p.name = "mpeg2_videotoolbox",
1396  .p.type = AVMEDIA_TYPE_VIDEO,
1397  .p.id = AV_CODEC_ID_MPEG2VIDEO,
1398  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1399  .alloc_frame = ff_videotoolbox_alloc_frame,
1400  .start_frame = videotoolbox_mpeg_start_frame,
1401  .decode_slice = videotoolbox_mpeg_decode_slice,
1402  .end_frame = videotoolbox_mpeg_end_frame,
1403  .frame_params = ff_videotoolbox_frame_params,
1405  .uninit = ff_videotoolbox_uninit,
1406  .priv_data_size = sizeof(VTContext),
1407 };
1408 
1410  .p.name = "mpeg4_videotoolbox",
1411  .p.type = AVMEDIA_TYPE_VIDEO,
1412  .p.id = AV_CODEC_ID_MPEG4,
1413  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1414  .alloc_frame = ff_videotoolbox_alloc_frame,
1415  .start_frame = videotoolbox_mpeg_start_frame,
1416  .decode_slice = videotoolbox_mpeg_decode_slice,
1417  .end_frame = videotoolbox_mpeg_end_frame,
1418  .frame_params = ff_videotoolbox_frame_params,
1420  .uninit = ff_videotoolbox_uninit,
1421  .priv_data_size = sizeof(VTContext),
1422 };
1423 
1425  .p.name = "prores_videotoolbox",
1426  .p.type = AVMEDIA_TYPE_VIDEO,
1427  .p.id = AV_CODEC_ID_PRORES,
1428  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1429  .alloc_frame = ff_videotoolbox_alloc_frame,
1430  .start_frame = videotoolbox_prores_start_frame,
1431  .decode_slice = videotoolbox_prores_decode_slice,
1432  .end_frame = videotoolbox_prores_end_frame,
1433  .frame_params = ff_videotoolbox_frame_params,
1435  .uninit = ff_videotoolbox_uninit,
1436  .priv_data_size = sizeof(VTContext),
1437 };
1438 
1439 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:70
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:78
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1461
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3025
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:197
ff_videotoolbox_buffer_append
int ff_videotoolbox_buffer_append(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:99
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:33
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:322
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:696
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:41
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:665
data
const char data[16]
Definition: mxf.c:149
ProresContext
Definition: proresdec.h:43
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:180
PTLCommon::profile_space
uint8_t profile_space
Definition: ps.h:128
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_mpeg2_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg2_videotoolbox_hwaccel
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:217
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:57
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: ps.h:131
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:182
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: ps.h:132
ff_hevc_videotoolbox_hwaccel
const struct FFHWAccel ff_hevc_videotoolbox_hwaccel
FFHWAccel
Definition: hwaccel_internal.h:34
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: ps.h:133
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:211
fail
#define fail()
Definition: checkasm.h:188
ff_h263_videotoolbox_hwaccel
const struct FFHWAccel ff_h263_videotoolbox_hwaccel
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:644
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:639
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: ps.h:135
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:413
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:48
AVHWFramesContext::height
int height
Definition: hwcontext.h:217
duration
int64_t duration
Definition: movenc.c:65
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:530
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:46
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:497
vt_internal.h
PTLCommon
Definition: ps.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:198
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:52
VTHWFrame
Definition: videotoolbox.c:65
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
ff_mpeg1_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg1_videotoolbox_hwaccel
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:65
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:49
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: ps.h:134
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: ps.h:130
AVVTFramesContext
Definition: hwcontext_videotoolbox.h:45
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
PTLCommon::tier_flag
uint8_t tier_flag
Definition: ps.h:129
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3341
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:210
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:701
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:280
hwaccel_internal.h
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
AVVTFramesContext::color_range
enum AVColorRange color_range
Definition: hwcontext_videotoolbox.h:46
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:126
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:486
V
#define V
Definition: avdct.c:31
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:550
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:61
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:473
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:450
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:66
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
ff_prores_videotoolbox_hwaccel
const struct FFHWAccel ff_prores_videotoolbox_hwaccel
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:256
kCMVideoCodecType_AV1
@ kCMVideoCodecType_AV1
Definition: videotoolbox.c:60
hevcdec.h
height
#define height
Definition: dsp.h:85
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:372
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:44
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:130
P
#define P
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:56
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_bswap32
#define av_bswap32
Definition: bswap.h:47
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
VUI
Definition: ps.h:98
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:536
ff_videotoolbox_av1c_extradata_create
CFDataRef ff_videotoolbox_av1c_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_av1.c:31
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:73
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:198
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:553
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:549
VTContext
Definition: vt_internal.h:25
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2105
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:45
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:340
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:529
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:371
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1507
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1485
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:115
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:150
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:150
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:63
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:142
AVCodecContext
main external API structure.
Definition: avcodec.h:451
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_mpeg4_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg4_videotoolbox_hwaccel
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
HEVCContext
Definition: hevcdec.h:487
PTLCommon::level_idc
uint8_t level_idc
Definition: ps.h:147
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
pps
uint64_t pps
Definition: dovi_rpuenc.c:35
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:119
VTContext::logctx
void * logctx
Definition: vt_internal.h:49
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:67
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:537
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: ps.h:120
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:639
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avutil.h
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: ps.h:171
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1634
HEVCSPS
Definition: ps.h:252
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: ps.h:371
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:79
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:476
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:554
ff_h264_videotoolbox_hwaccel
const struct FFHWAccel ff_h264_videotoolbox_hwaccel
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:68
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:176
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:670
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:427
width
#define width
Definition: dsp.h:85
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
src
#define src
Definition: vp8dsp.c:248
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:200