FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "config_components.h"
25 #include "videotoolbox.h"
27 #include "vt_internal.h"
28 #include "libavutil/avutil.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/pixdesc.h"
31 #include "bytestream.h"
32 #include "decode.h"
33 #include "internal.h"
34 #include "h264dec.h"
35 #include "hevcdec.h"
36 #include "mpegvideo.h"
37 #include "proresdec.h"
38 #include <Availability.h>
39 #include <AvailabilityMacros.h>
40 #include <TargetConditionals.h>
41 
42 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
43 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
44 #endif
45 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
46 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
47 #endif
48 
49 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
50 enum { kCMVideoCodecType_HEVC = 'hvc1' };
51 #endif
52 
53 #if !HAVE_KCMVIDEOCODECTYPE_VP9
54 enum { kCMVideoCodecType_VP9 = 'vp09' };
55 #endif
56 
57 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
58 
59 typedef struct VTHWFrame {
60  CVPixelBufferRef pixbuf;
62 } VTHWFrame;
63 
64 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
65 {
67  av_buffer_unref(&ref->hw_frames_ctx);
68  CVPixelBufferRelease(ref->pixbuf);
69 
70  av_free(data);
71 }
72 
74  const uint8_t *buffer,
75  uint32_t size)
76 {
77  void *tmp;
78 
79  tmp = av_fast_realloc(vtctx->bitstream,
80  &vtctx->allocated_size,
81  size);
82 
83  if (!tmp)
84  return AVERROR(ENOMEM);
85 
86  vtctx->bitstream = tmp;
87  memcpy(vtctx->bitstream, buffer, size);
88  vtctx->bitstream_size = size;
89 
90  return 0;
91 }
92 
93 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
94 {
95  int ret;
96  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
97 
98  if (!ref->pixbuf) {
99  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
101  return AVERROR_EXTERNAL;
102  }
103 
104  frame->crop_right = 0;
105  frame->crop_left = 0;
106  frame->crop_top = 0;
107  frame->crop_bottom = 0;
108 
109  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
110  return ret;
111 
112  frame->data[3] = (uint8_t*)ref->pixbuf;
113 
114  if (ref->hw_frames_ctx) {
115  av_buffer_unref(&frame->hw_frames_ctx);
116  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
117  if (!frame->hw_frames_ctx)
118  return AVERROR(ENOMEM);
119  }
120 
121  return 0;
122 }
123 
125 {
126  size_t size = sizeof(VTHWFrame);
127  uint8_t *data = NULL;
128  AVBufferRef *buf = NULL;
130  FrameDecodeData *fdd;
131  if (ret < 0)
132  return ret;
133 
134  data = av_mallocz(size);
135  if (!data)
136  return AVERROR(ENOMEM);
138  if (!buf) {
139  av_freep(&data);
140  return AVERROR(ENOMEM);
141  }
142  frame->buf[0] = buf;
143 
144  fdd = (FrameDecodeData*)frame->private_ref->data;
146 
147  frame->width = avctx->width;
148  frame->height = avctx->height;
149  frame->format = avctx->pix_fmt;
150 
151  return 0;
152 }
153 
154 #define AV_W8(p, v) *(p) = (v)
155 
156 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
157 {
158  int i;
159  int size = src_size;
160  uint8_t* p = dst;
161 
162  for (i = 0; i < src_size; i++) {
163  if (i + 2 < src_size &&
164  src[i] == 0x00 &&
165  src[i + 1] == 0x00 &&
166  src[i + 2] <= 0x03) {
167  if (dst) {
168  *p++ = src[i++];
169  *p++ = src[i];
170  *p++ = 0x03;
171  } else {
172  i++;
173  }
174  size++;
175  } else if (dst)
176  *p++ = src[i];
177  }
178 
179  if (dst)
180  av_assert0((p - dst) == size);
181 
182  return size;
183 }
184 
186 {
187  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
188  H264Context *h = avctx->priv_data;
189  CFDataRef data = NULL;
190  uint8_t *p;
191  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
192  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
193  int vt_extradata_size;
194  uint8_t *vt_extradata;
195 
196  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
197  vt_extradata = av_malloc(vt_extradata_size);
198 
199  if (!vt_extradata)
200  return NULL;
201 
202  p = vt_extradata;
203 
204  AV_W8(p + 0, 1); /* version */
205  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
206  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
207  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
208  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
209  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
210  AV_WB16(p + 6, sps_size);
211  p += 8;
212  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
213  AV_W8(p + 0, 1); /* number of pps */
214  AV_WB16(p + 1, pps_size);
215  p += 3;
216  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
217 
218  av_assert0(p - vt_extradata == vt_extradata_size);
219 
220  // save sps header (profile/level) used to create decoder session,
221  // so we can detect changes and recreate it.
222  if (vtctx)
223  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
224 
225  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
226  av_free(vt_extradata);
227  return data;
228 }
229 
231 {
232  HEVCContext *h = avctx->priv_data;
233  int i, num_vps = 0, num_sps = 0, num_pps = 0;
234  const HEVCVPS *vps = h->ps.vps;
235  const HEVCSPS *sps = h->ps.sps;
236  const HEVCPPS *pps = h->ps.pps;
237  PTLCommon ptlc = vps->ptl.general_ptl;
238  VUI vui = sps->vui;
239  uint8_t parallelismType;
240  CFDataRef data = NULL;
241  uint8_t *p;
242  int vt_extradata_size = 23 + 3 + 3 + 3;
243  uint8_t *vt_extradata;
244 
245 #define COUNT_SIZE_PS(T, t) \
246  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
247  if (h->ps.t##ps_list[i]) { \
248  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
249  vt_extradata_size += 2 + escape_ps(NULL, lps->data, lps->data_size); \
250  num_##t##ps++; \
251  } \
252  }
253 
254  COUNT_SIZE_PS(V, v)
255  COUNT_SIZE_PS(S, s)
256  COUNT_SIZE_PS(P, p)
257 
258  vt_extradata = av_malloc(vt_extradata_size);
259  if (!vt_extradata)
260  return NULL;
261  p = vt_extradata;
262 
263  /* unsigned int(8) configurationVersion = 1; */
264  AV_W8(p + 0, 1);
265 
266  /*
267  * unsigned int(2) general_profile_space;
268  * unsigned int(1) general_tier_flag;
269  * unsigned int(5) general_profile_idc;
270  */
271  AV_W8(p + 1, ptlc.profile_space << 6 |
272  ptlc.tier_flag << 5 |
273  ptlc.profile_idc);
274 
275  /* unsigned int(32) general_profile_compatibility_flags; */
276  for (i = 0; i < 4; i++) {
277  AV_W8(p + 2 + i, ptlc.profile_compatibility_flag[i * 8] << 7 |
278  ptlc.profile_compatibility_flag[i * 8 + 1] << 6 |
279  ptlc.profile_compatibility_flag[i * 8 + 2] << 5 |
280  ptlc.profile_compatibility_flag[i * 8 + 3] << 4 |
281  ptlc.profile_compatibility_flag[i * 8 + 4] << 3 |
282  ptlc.profile_compatibility_flag[i * 8 + 5] << 2 |
283  ptlc.profile_compatibility_flag[i * 8 + 6] << 1 |
284  ptlc.profile_compatibility_flag[i * 8 + 7]);
285  }
286 
287  /* unsigned int(48) general_constraint_indicator_flags; */
288  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
289  ptlc.interlaced_source_flag << 6 |
290  ptlc.non_packed_constraint_flag << 5 |
291  ptlc.frame_only_constraint_flag << 4);
292  AV_W8(p + 7, 0);
293  AV_WN32(p + 8, 0);
294 
295  /* unsigned int(8) general_level_idc; */
296  AV_W8(p + 12, ptlc.level_idc);
297 
298  /*
299  * bit(4) reserved = ‘1111’b;
300  * unsigned int(12) min_spatial_segmentation_idc;
301  */
302  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
303  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
304 
305  /*
306  * bit(6) reserved = ‘111111’b;
307  * unsigned int(2) parallelismType;
308  */
310  parallelismType = 0;
311  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
312  parallelismType = 0;
313  else if (pps->entropy_coding_sync_enabled_flag)
314  parallelismType = 3;
315  else if (pps->tiles_enabled_flag)
316  parallelismType = 2;
317  else
318  parallelismType = 1;
319  AV_W8(p + 15, 0xfc | parallelismType);
320 
321  /*
322  * bit(6) reserved = ‘111111’b;
323  * unsigned int(2) chromaFormat;
324  */
325  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
326 
327  /*
328  * bit(5) reserved = ‘11111’b;
329  * unsigned int(3) bitDepthLumaMinus8;
330  */
331  AV_W8(p + 17, (sps->bit_depth - 8) | 0xf8);
332 
333  /*
334  * bit(5) reserved = ‘11111’b;
335  * unsigned int(3) bitDepthChromaMinus8;
336  */
337  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xf8);
338 
339  /* bit(16) avgFrameRate; */
340  AV_WB16(p + 19, 0);
341 
342  /*
343  * bit(2) constantFrameRate;
344  * bit(3) numTemporalLayers;
345  * bit(1) temporalIdNested;
346  * unsigned int(2) lengthSizeMinusOne;
347  */
348  AV_W8(p + 21, 0 << 6 |
349  sps->max_sub_layers << 3 |
350  sps->temporal_id_nesting_flag << 2 |
351  3);
352 
353  /* unsigned int(8) numOfArrays; */
354  AV_W8(p + 22, 3);
355 
356  p += 23;
357 
358 #define APPEND_PS(T, t) \
359  /* \
360  * bit(1) array_completeness; \
361  * unsigned int(1) reserved = 0; \
362  * unsigned int(6) NAL_unit_type; \
363  */ \
364  AV_W8(p, 1 << 7 | \
365  HEVC_NAL_##T##PS & 0x3f); \
366  /* unsigned int(16) numNalus; */ \
367  AV_WB16(p + 1, num_##t##ps); \
368  p += 3; \
369  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
370  if (h->ps.t##ps_list[i]) { \
371  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
372  int size = escape_ps(p + 2, lps->data, lps->data_size); \
373  /* unsigned int(16) nalUnitLength; */ \
374  AV_WB16(p, size); \
375  /* bit(8*nalUnitLength) nalUnit; */ \
376  p += 2 + size; \
377  } \
378  }
379 
380  APPEND_PS(V, v)
381  APPEND_PS(S, s)
382  APPEND_PS(P, p)
383 
384  av_assert0(p - vt_extradata == vt_extradata_size);
385 
386  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
387  av_free(vt_extradata);
388  return data;
389 }
390 
392  const uint8_t *buffer,
393  uint32_t size)
394 {
395  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
396  H264Context *h = avctx->priv_data;
397 
398  if (h->is_avc == 1) {
399  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
400  }
401 
402  return 0;
403 }
404 
406  int type,
407  const uint8_t *buffer,
408  uint32_t size)
409 {
410  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
411  H264Context *h = avctx->priv_data;
412 
413  // save sps header (profile/level) used to create decoder session
414  if (!vtctx->sps[0])
415  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
416 
417  if (type == H264_NAL_SPS) {
418  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
419  vtctx->reconfig_needed = true;
420  memcpy(vtctx->sps, buffer + 1, 3);
421  }
422  }
423 
424  // pass-through SPS/PPS changes to the decoder
426 }
427 
429  const uint8_t *buffer,
430  uint32_t size)
431 {
432  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
433  void *tmp;
434 
435  tmp = av_fast_realloc(vtctx->bitstream,
436  &vtctx->allocated_size,
437  vtctx->bitstream_size+size+4);
438  if (!tmp)
439  return AVERROR(ENOMEM);
440 
441  vtctx->bitstream = tmp;
442 
443  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
444  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
445 
446  vtctx->bitstream_size += size + 4;
447 
448  return 0;
449 }
450 
452  const uint8_t *buffer,
453  uint32_t size)
454 {
455  H264Context *h = avctx->priv_data;
456 
457  if (h->is_avc == 1)
458  return 0;
459 
461 }
462 
463 #if CONFIG_VIDEOTOOLBOX
464 // Return the AVVideotoolboxContext that matters currently. Where it comes from
465 // depends on the API used.
466 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
467 {
468  // Somewhat tricky because the user can call av_videotoolbox_default_free()
469  // at any time, even when the codec is closed.
470  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
471  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
472  if (vtctx->vt_ctx)
473  return vtctx->vt_ctx;
474  }
475  return avctx->hwaccel_context;
476 }
477 
478 static void videotoolbox_stop(AVCodecContext *avctx)
479 {
480  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
481  if (!videotoolbox)
482  return;
483 
484  if (videotoolbox->cm_fmt_desc) {
485  CFRelease(videotoolbox->cm_fmt_desc);
486  videotoolbox->cm_fmt_desc = NULL;
487  }
488 
489  if (videotoolbox->session) {
490  VTDecompressionSessionInvalidate(videotoolbox->session);
491  CFRelease(videotoolbox->session);
492  videotoolbox->session = NULL;
493  }
494 }
495 
497 {
498  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
499  if (!vtctx)
500  return 0;
501 
502  av_freep(&vtctx->bitstream);
503  if (vtctx->frame)
504  CVPixelBufferRelease(vtctx->frame);
505 
506  if (vtctx->vt_ctx)
507  videotoolbox_stop(avctx);
508 
510  av_freep(&vtctx->vt_ctx);
511 
512  return 0;
513 }
514 
515 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
516 {
517  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
518  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
519  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
520  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
521  int width = CVPixelBufferGetWidth(pixbuf);
522  int height = CVPixelBufferGetHeight(pixbuf);
523  AVHWFramesContext *cached_frames;
524  VTHWFrame *ref;
525  int ret;
526 
527  if (!frame->buf[0] || frame->data[3]) {
528  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
530  return AVERROR_EXTERNAL;
531  }
532 
533  ref = (VTHWFrame *)frame->buf[0]->data;
534 
535  if (ref->pixbuf)
536  CVPixelBufferRelease(ref->pixbuf);
537  ref->pixbuf = vtctx->frame;
538  vtctx->frame = NULL;
539 
540  // Old API code path.
541  if (!vtctx->cached_hw_frames_ctx)
542  return 0;
543 
544  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
545 
546  if (cached_frames->sw_format != sw_format ||
547  cached_frames->width != width ||
548  cached_frames->height != height) {
549  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
550  AVHWFramesContext *hw_frames;
551  if (!hw_frames_ctx)
552  return AVERROR(ENOMEM);
553 
554  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
555  hw_frames->format = cached_frames->format;
556  hw_frames->sw_format = sw_format;
557  hw_frames->width = width;
558  hw_frames->height = height;
559 
560  ret = av_hwframe_ctx_init(hw_frames_ctx);
561  if (ret < 0) {
562  av_buffer_unref(&hw_frames_ctx);
563  return ret;
564  }
565 
567  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
568  }
569 
570  av_buffer_unref(&ref->hw_frames_ctx);
571  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
572  if (!ref->hw_frames_ctx)
573  return AVERROR(ENOMEM);
574 
575  return 0;
576 }
577 
578 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
579 {
580  int i;
581  uint8_t b;
582 
583  for (i = 3; i >= 0; i--) {
584  b = (length >> (i * 7)) & 0x7F;
585  if (i != 0)
586  b |= 0x80;
587 
588  bytestream2_put_byteu(pb, b);
589  }
590 }
591 
592 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
593 {
594  CFDataRef data;
595  uint8_t *rw_extradata;
596  PutByteContext pb;
597  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
598  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
599  int config_size = 13 + 5 + avctx->extradata_size;
600  int s;
601 
602  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
603  return NULL;
604 
605  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
606  bytestream2_put_byteu(&pb, 0); // version
607  bytestream2_put_ne24(&pb, 0); // flags
608 
609  // elementary stream descriptor
610  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
611  videotoolbox_write_mp4_descr_length(&pb, full_size);
612  bytestream2_put_ne16(&pb, 0); // esid
613  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
614 
615  // decoder configuration descriptor
616  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
617  videotoolbox_write_mp4_descr_length(&pb, config_size);
618  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
619  bytestream2_put_byteu(&pb, 0x11); // stream type
620  bytestream2_put_ne24(&pb, 0); // buffer size
621  bytestream2_put_ne32(&pb, 0); // max bitrate
622  bytestream2_put_ne32(&pb, 0); // avg bitrate
623 
624  // decoder specific descriptor
625  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
626  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
627 
628  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
629 
630  // SLConfigDescriptor
631  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
632  bytestream2_put_byteu(&pb, 0x01); // length
633  bytestream2_put_byteu(&pb, 0x02); //
634 
635  s = bytestream2_size_p(&pb);
636 
637  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
638 
639  av_freep(&rw_extradata);
640  return data;
641 }
642 
643 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
644  void *buffer,
645  int size)
646 {
647  OSStatus status;
648  CMBlockBufferRef block_buf;
649  CMSampleBufferRef sample_buf;
650 
651  block_buf = NULL;
652  sample_buf = NULL;
653 
654  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
655  buffer, // memoryBlock
656  size, // blockLength
657  kCFAllocatorNull, // blockAllocator
658  NULL, // customBlockSource
659  0, // offsetToData
660  size, // dataLength
661  0, // flags
662  &block_buf);
663 
664  if (!status) {
665  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
666  block_buf, // dataBuffer
667  TRUE, // dataReady
668  0, // makeDataReadyCallback
669  0, // makeDataReadyRefcon
670  fmt_desc, // formatDescription
671  1, // numSamples
672  0, // numSampleTimingEntries
673  NULL, // sampleTimingArray
674  0, // numSampleSizeEntries
675  NULL, // sampleSizeArray
676  &sample_buf);
677  }
678 
679  if (block_buf)
680  CFRelease(block_buf);
681 
682  return sample_buf;
683 }
684 
685 static void videotoolbox_decoder_callback(void *opaque,
686  void *sourceFrameRefCon,
687  OSStatus status,
688  VTDecodeInfoFlags flags,
689  CVImageBufferRef image_buffer,
690  CMTime pts,
691  CMTime duration)
692 {
693  AVCodecContext *avctx = opaque;
694  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
695 
696  if (vtctx->frame) {
697  CVPixelBufferRelease(vtctx->frame);
698  vtctx->frame = NULL;
699  }
700 
701  if (!image_buffer) {
702  av_log(avctx, status ? AV_LOG_WARNING : AV_LOG_DEBUG, "vt decoder cb: output image buffer is null: %i\n", status);
703  return;
704  }
705 
706  vtctx->frame = CVPixelBufferRetain(image_buffer);
707 }
708 
709 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
710 {
711  OSStatus status;
712  CMSampleBufferRef sample_buf;
713  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
714  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
715 
716  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
717  vtctx->bitstream,
718  vtctx->bitstream_size);
719 
720  if (!sample_buf)
721  return -1;
722 
723  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
724  sample_buf,
725  0, // decodeFlags
726  NULL, // sourceFrameRefCon
727  0); // infoFlagsOut
728  if (status == noErr)
729  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
730 
731  CFRelease(sample_buf);
732 
733  return status;
734 }
735 
736 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
737  CFDictionaryRef decoder_spec,
738  int width,
739  int height)
740 {
741  CMFormatDescriptionRef cm_fmt_desc;
742  OSStatus status;
743 
744  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
745  codec_type,
746  width,
747  height,
748  decoder_spec, // Dictionary of extension
749  &cm_fmt_desc);
750 
751  if (status)
752  return NULL;
753 
754  return cm_fmt_desc;
755 }
756 
757 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
758  int height,
759  OSType pix_fmt)
760 {
761  CFMutableDictionaryRef buffer_attributes;
762  CFMutableDictionaryRef io_surface_properties;
763  CFNumberRef cv_pix_fmt;
764  CFNumberRef w;
765  CFNumberRef h;
766 
767  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
768  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
769  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
770 
771  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
772  4,
773  &kCFTypeDictionaryKeyCallBacks,
774  &kCFTypeDictionaryValueCallBacks);
775  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
776  0,
777  &kCFTypeDictionaryKeyCallBacks,
778  &kCFTypeDictionaryValueCallBacks);
779 
780  if (pix_fmt)
781  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
782  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
783  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
784  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
785 #if TARGET_OS_IPHONE
786  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
787 #else
788  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
789 #endif
790 
791  CFRelease(io_surface_properties);
792  CFRelease(cv_pix_fmt);
793  CFRelease(w);
794  CFRelease(h);
795 
796  return buffer_attributes;
797 }
798 
799 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
800  AVCodecContext *avctx)
801 {
802  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
803  0,
804  &kCFTypeDictionaryKeyCallBacks,
805  &kCFTypeDictionaryValueCallBacks);
806 
807  CFDictionarySetValue(config_info,
811  kCFBooleanTrue);
812 
813  CFMutableDictionaryRef avc_info;
814  CFDataRef data = NULL;
815 
816  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
817  1,
818  &kCFTypeDictionaryKeyCallBacks,
819  &kCFTypeDictionaryValueCallBacks);
820 
821  switch (codec_type) {
822  case kCMVideoCodecType_MPEG4Video :
823  if (avctx->extradata_size)
824  data = videotoolbox_esds_extradata_create(avctx);
825  if (data)
826  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
827  break;
828  case kCMVideoCodecType_H264 :
830  if (data)
831  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
832  break;
835  if (data)
836  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
837  break;
838 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
839  case kCMVideoCodecType_VP9 :
841  if (data)
842  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
843  break;
844 #endif
845  default:
846  break;
847  }
848 
849  CFDictionarySetValue(config_info,
850  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
851  avc_info);
852 
853  if (data)
854  CFRelease(data);
855 
856  CFRelease(avc_info);
857  return config_info;
858 }
859 
860 static int videotoolbox_start(AVCodecContext *avctx)
861 {
862  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
863  OSStatus status;
864  VTDecompressionOutputCallbackRecord decoder_cb;
865  CFDictionaryRef decoder_spec;
866  CFDictionaryRef buf_attr;
867 
868  if (!videotoolbox) {
869  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
870  return -1;
871  }
872 
873  switch( avctx->codec_id ) {
874  case AV_CODEC_ID_H263 :
875  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
876  break;
877  case AV_CODEC_ID_H264 :
878  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
879  break;
880  case AV_CODEC_ID_HEVC :
881  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
882  break;
884  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
885  break;
887  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
888  break;
889  case AV_CODEC_ID_MPEG4 :
890  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
891  break;
892  case AV_CODEC_ID_PRORES :
893  switch (avctx->codec_tag) {
894  default:
895  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
896  // fall-through
897  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
898  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
899  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
900  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
901  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
902  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
903  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
904  break;
905  }
906  break;
907  case AV_CODEC_ID_VP9 :
908  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
909  break;
910  default :
911  break;
912  }
913 
914 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
915  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
916  if (__builtin_available(macOS 10.9, *)) {
917  VTRegisterProfessionalVideoWorkflowVideoDecoders();
918  }
919  }
920 #endif
921 
922 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
923  if (__builtin_available(macOS 11.0, *)) {
924  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
925  }
926 #endif
927 
928  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
929 
930  if (!decoder_spec) {
931  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
932  return -1;
933  }
934 
935  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
936  decoder_spec,
937  avctx->width,
938  avctx->height);
939  if (!videotoolbox->cm_fmt_desc) {
940  if (decoder_spec)
941  CFRelease(decoder_spec);
942 
943  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
944  return -1;
945  }
946 
947  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
948  avctx->height,
949  videotoolbox->cv_pix_fmt_type);
950 
951  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
952  decoder_cb.decompressionOutputRefCon = avctx;
953 
954  status = VTDecompressionSessionCreate(NULL, // allocator
955  videotoolbox->cm_fmt_desc, // videoFormatDescription
956  decoder_spec, // videoDecoderSpecification
957  buf_attr, // destinationImageBufferAttributes
958  &decoder_cb, // outputCallback
959  &videotoolbox->session); // decompressionSessionOut
960 
961  if (decoder_spec)
962  CFRelease(decoder_spec);
963  if (buf_attr)
964  CFRelease(buf_attr);
965 
966  switch (status) {
967  case kVTVideoDecoderNotAvailableNowErr:
968  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
969  return AVERROR(ENOSYS);
970  case kVTVideoDecoderUnsupportedDataFormatErr:
971  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
972  return AVERROR(ENOSYS);
973  case kVTCouldNotFindVideoDecoderErr:
974  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
975  return AVERROR(ENOSYS);
976  case kVTVideoDecoderMalfunctionErr:
977  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
978  return AVERROR(EINVAL);
979  case kVTVideoDecoderBadDataErr:
980  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
981  return AVERROR_INVALIDDATA;
982  case 0:
983  return 0;
984  default:
985  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
986  return AVERROR_UNKNOWN;
987  }
988 }
989 
990 static const char *videotoolbox_error_string(OSStatus status)
991 {
992  switch (status) {
993  case kVTVideoDecoderBadDataErr:
994  return "bad data";
995  case kVTVideoDecoderMalfunctionErr:
996  return "decoder malfunction";
997  case kVTInvalidSessionErr:
998  return "invalid session";
999  }
1000  return "unknown";
1001 }
1002 
1004 {
1005  OSStatus status;
1006  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
1007  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1008 
1009  if (vtctx->reconfig_needed == true) {
1010  vtctx->reconfig_needed = false;
1011  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1012  videotoolbox_stop(avctx);
1013  if (videotoolbox_start(avctx) != 0) {
1014  return AVERROR_EXTERNAL;
1015  }
1016  }
1017 
1018  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1019  return AVERROR_INVALIDDATA;
1020 
1021  status = videotoolbox_session_decode_frame(avctx);
1022  if (status != noErr) {
1023  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1024  vtctx->reconfig_needed = true;
1025  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1026  return AVERROR_UNKNOWN;
1027  }
1028 
1029  if (!vtctx->frame) {
1030  vtctx->reconfig_needed = true;
1031  return AVERROR_UNKNOWN;
1032  }
1033 
1034  return videotoolbox_buffer_create(avctx, frame);
1035 }
1036 
1037 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1038 {
1039  H264Context *h = avctx->priv_data;
1040  AVFrame *frame = h->cur_pic_ptr->f;
1041  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1043  vtctx->bitstream_size = 0;
1044  return ret;
1045 }
1046 
1047 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1048  const uint8_t *buffer,
1049  uint32_t size)
1050 {
1051  return 0;
1052 }
1053 
1054 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1055  const uint8_t *buffer,
1056  uint32_t size)
1057 {
1059 }
1060 
1061 
1062 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1063  int type,
1064  const uint8_t *buffer,
1065  uint32_t size)
1066 {
1068 }
1069 
1070 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1071 {
1072  HEVCContext *h = avctx->priv_data;
1073  AVFrame *frame = h->ref->frame;
1074  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1075 
1076  h->output_frame->crop_right = 0;
1077  h->output_frame->crop_left = 0;
1078  h->output_frame->crop_top = 0;
1079  h->output_frame->crop_bottom = 0;
1080 
1082  vtctx->bitstream_size = 0;
1083  return ret;
1084 }
1085 
1086 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1087  const uint8_t *buffer,
1088  uint32_t size)
1089 {
1090  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1091 
1092  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1093 }
1094 
1095 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1096  const uint8_t *buffer,
1097  uint32_t size)
1098 {
1099  return 0;
1100 }
1101 
1102 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1103 {
1104  MpegEncContext *s = avctx->priv_data;
1105  AVFrame *frame = s->current_picture_ptr->f;
1106 
1107  return ff_videotoolbox_common_end_frame(avctx, frame);
1108 }
1109 
1110 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1111  const uint8_t *buffer,
1112  uint32_t size)
1113 {
1114  return 0;
1115 }
1116 
1117 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1118  const uint8_t *buffer,
1119  uint32_t size)
1120 {
1121  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1122 
1123  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1124 }
1125 
1126 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1127 {
1128  ProresContext *ctx = avctx->priv_data;
1129  AVFrame *frame = ctx->frame;
1130 
1131  return ff_videotoolbox_common_end_frame(avctx, frame);
1132 }
1133 
1134 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1135  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1136  if (!descriptor)
1137  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1138 
1139  int depth = descriptor->comp[0].depth;
1140 
1141  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1142  return AV_PIX_FMT_AYUV64;
1143 
1144 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1145  if (depth > 10)
1146  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1147 #endif
1148 
1149 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1150  if (descriptor->log2_chroma_w == 0) {
1151 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1152  if (depth <= 8)
1153  return AV_PIX_FMT_NV24;
1154 #endif
1155  return AV_PIX_FMT_P410;
1156  }
1157 #endif
1158 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1159  if (descriptor->log2_chroma_h == 0) {
1160 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1161  if (depth <= 8)
1162  return AV_PIX_FMT_NV16;
1163 #endif
1164  return AV_PIX_FMT_P210;
1165  }
1166 #endif
1167 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1168  if (depth > 8) {
1169  return AV_PIX_FMT_P010;
1170  }
1171 #endif
1172 
1173  return AV_PIX_FMT_NV12;
1174 }
1175 
1177 {
1178  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1179  AVHWFramesContext *hw_frames;
1180  int err;
1181 
1182  // Old API - do nothing.
1183  if (avctx->hwaccel_context)
1184  return 0;
1185 
1186  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1187  av_log(avctx, AV_LOG_ERROR,
1188  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1189  return AVERROR(EINVAL);
1190  }
1191 
1193  if (!vtctx->vt_ctx) {
1194  err = AVERROR(ENOMEM);
1195  goto fail;
1196  }
1197 
1198  if (avctx->hw_frames_ctx) {
1199  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1200  } else {
1202  if (!avctx->hw_frames_ctx) {
1203  err = AVERROR(ENOMEM);
1204  goto fail;
1205  }
1206 
1207  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1208  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1209  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1210  hw_frames->width = avctx->width;
1211  hw_frames->height = avctx->height;
1212 
1213  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1214  if (err < 0) {
1215  av_buffer_unref(&avctx->hw_frames_ctx);
1216  goto fail;
1217  }
1218  }
1219 
1221  if (!vtctx->cached_hw_frames_ctx) {
1222  err = AVERROR(ENOMEM);
1223  goto fail;
1224  }
1225 
1226  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1227  vtctx->vt_ctx->cv_pix_fmt_type =
1229  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1230  const AVPixFmtDescriptor *attempted_format =
1231  av_pix_fmt_desc_get(hw_frames->sw_format);
1232  av_log(avctx, AV_LOG_ERROR,
1233  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1234  "a VideoToolbox format!\n",
1235  attempted_format ? attempted_format->name : "<unknown>",
1237  err = AVERROR(EINVAL);
1238  goto fail;
1239  }
1240 
1241  err = videotoolbox_start(avctx);
1242  if (err < 0)
1243  goto fail;
1244 
1245  return 0;
1246 
1247 fail:
1248  ff_videotoolbox_uninit(avctx);
1249  return err;
1250 }
1251 
1253  AVBufferRef *hw_frames_ctx)
1254 {
1255  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1256 
1257  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1258  frames_ctx->width = avctx->coded_width;
1259  frames_ctx->height = avctx->coded_height;
1260  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1261 
1262  return 0;
1263 }
1264 
1266  .name = "h263_videotoolbox",
1267  .type = AVMEDIA_TYPE_VIDEO,
1268  .id = AV_CODEC_ID_H263,
1269  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1270  .alloc_frame = ff_videotoolbox_alloc_frame,
1271  .start_frame = videotoolbox_mpeg_start_frame,
1272  .decode_slice = videotoolbox_mpeg_decode_slice,
1273  .end_frame = videotoolbox_mpeg_end_frame,
1274  .frame_params = ff_videotoolbox_frame_params,
1276  .uninit = ff_videotoolbox_uninit,
1277  .priv_data_size = sizeof(VTContext),
1278 };
1279 
1281  .name = "hevc_videotoolbox",
1282  .type = AVMEDIA_TYPE_VIDEO,
1283  .id = AV_CODEC_ID_HEVC,
1284  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1285  .alloc_frame = ff_videotoolbox_alloc_frame,
1286  .start_frame = videotoolbox_hevc_start_frame,
1287  .decode_slice = videotoolbox_hevc_decode_slice,
1288  .decode_params = videotoolbox_hevc_decode_params,
1289  .end_frame = videotoolbox_hevc_end_frame,
1290  .frame_params = ff_videotoolbox_frame_params,
1292  .uninit = ff_videotoolbox_uninit,
1293  .priv_data_size = sizeof(VTContext),
1294 };
1295 
1297  .name = "h264_videotoolbox",
1298  .type = AVMEDIA_TYPE_VIDEO,
1299  .id = AV_CODEC_ID_H264,
1300  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1301  .alloc_frame = ff_videotoolbox_alloc_frame,
1302  .start_frame = ff_videotoolbox_h264_start_frame,
1303  .decode_slice = ff_videotoolbox_h264_decode_slice,
1304  .decode_params = videotoolbox_h264_decode_params,
1305  .end_frame = videotoolbox_h264_end_frame,
1306  .frame_params = ff_videotoolbox_frame_params,
1308  .uninit = ff_videotoolbox_uninit,
1309  .priv_data_size = sizeof(VTContext),
1310 };
1311 
1313  .name = "mpeg1_videotoolbox",
1314  .type = AVMEDIA_TYPE_VIDEO,
1315  .id = AV_CODEC_ID_MPEG1VIDEO,
1316  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1317  .alloc_frame = ff_videotoolbox_alloc_frame,
1318  .start_frame = videotoolbox_mpeg_start_frame,
1319  .decode_slice = videotoolbox_mpeg_decode_slice,
1320  .end_frame = videotoolbox_mpeg_end_frame,
1321  .frame_params = ff_videotoolbox_frame_params,
1323  .uninit = ff_videotoolbox_uninit,
1324  .priv_data_size = sizeof(VTContext),
1325 };
1326 
1328  .name = "mpeg2_videotoolbox",
1329  .type = AVMEDIA_TYPE_VIDEO,
1330  .id = AV_CODEC_ID_MPEG2VIDEO,
1331  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1332  .alloc_frame = ff_videotoolbox_alloc_frame,
1333  .start_frame = videotoolbox_mpeg_start_frame,
1334  .decode_slice = videotoolbox_mpeg_decode_slice,
1335  .end_frame = videotoolbox_mpeg_end_frame,
1336  .frame_params = ff_videotoolbox_frame_params,
1338  .uninit = ff_videotoolbox_uninit,
1339  .priv_data_size = sizeof(VTContext),
1340 };
1341 
1343  .name = "mpeg4_videotoolbox",
1344  .type = AVMEDIA_TYPE_VIDEO,
1345  .id = AV_CODEC_ID_MPEG4,
1346  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1347  .alloc_frame = ff_videotoolbox_alloc_frame,
1348  .start_frame = videotoolbox_mpeg_start_frame,
1349  .decode_slice = videotoolbox_mpeg_decode_slice,
1350  .end_frame = videotoolbox_mpeg_end_frame,
1351  .frame_params = ff_videotoolbox_frame_params,
1353  .uninit = ff_videotoolbox_uninit,
1354  .priv_data_size = sizeof(VTContext),
1355 };
1356 
1358  .name = "prores_videotoolbox",
1359  .type = AVMEDIA_TYPE_VIDEO,
1360  .id = AV_CODEC_ID_PRORES,
1361  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1362  .alloc_frame = ff_videotoolbox_alloc_frame,
1363  .start_frame = videotoolbox_prores_start_frame,
1364  .decode_slice = videotoolbox_prores_decode_slice,
1365  .end_frame = videotoolbox_prores_end_frame,
1366  .frame_params = ff_videotoolbox_frame_params,
1368  .uninit = ff_videotoolbox_uninit,
1369  .priv_data_size = sizeof(VTContext),
1370 };
1371 
1372 static AVVideotoolboxContext *av_videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1373  bool full_range)
1374 {
1375  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1376 
1377  if (ret) {
1378  ret->output_callback = videotoolbox_decoder_callback;
1379 
1380  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1381  if (cv_pix_fmt_type == 0) {
1382  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1383  }
1384  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1385  }
1386 
1387  return ret;
1388 }
1389 
1391 {
1392  return av_videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1393 }
1394 
1396 {
1397  return av_videotoolbox_default_init2(avctx, NULL);
1398 }
1399 
1401 {
1402  enum AVPixelFormat pix_fmt = videotoolbox_best_pixel_format(avctx);
1403  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1404  avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context_with_pix_fmt(pix_fmt, full_range);
1405  if (!avctx->hwaccel_context)
1406  return AVERROR(ENOMEM);
1407  return videotoolbox_start(avctx);
1408 }
1409 
1411 {
1412 
1413  videotoolbox_stop(avctx);
1414  av_freep(&avctx->hwaccel_context);
1415 }
1416 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:64
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:50
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:76
AVCodecContext::hwaccel_context
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:1390
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
av_videotoolbox_alloc_context
AVVideotoolboxContext * av_videotoolbox_alloc_context(void)
Allocate and initialize a Videotoolbox context.
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
ff_hevc_videotoolbox_hwaccel
const AVHWAccel ff_hevc_videotoolbox_hwaccel
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
ff_h264_videotoolbox_hwaccel
const AVHWAccel ff_h264_videotoolbox_hwaccel
av_videotoolbox_default_free
void av_videotoolbox_default_free(AVCodecContext *avctx)
This function must be called to free the Videotoolbox context initialized with av_videotoolbox_defaul...
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
ff_prores_videotoolbox_hwaccel
const AVHWAccel ff_prores_videotoolbox_hwaccel
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:599
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:34
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:583
data
const char data[16]
Definition: mxf.c:143
ProresContext
Definition: proresdec.h:38
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:154
PTLCommon::profile_space
uint8_t profile_space
Definition: hevc_ps.h:93
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:46
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:96
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:156
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: hevc_ps.h:97
ff_mpeg1_videotoolbox_hwaccel
const AVHWAccel ff_mpeg1_videotoolbox_hwaccel
AVHWAccel
Definition: avcodec.h:2067
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: hevc_ps.h:98
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:185
fail
#define fail()
Definition: checkasm.h:131
av_videotoolbox_default_init
int av_videotoolbox_default_init(AVCodecContext *avctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:654
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:577
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:100
av_bswap32
#define av_bswap32
Definition: bswap.h:33
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:387
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:46
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
ff_h263_videotoolbox_hwaccel
const AVHWAccel ff_h263_videotoolbox_hwaccel
duration
int64_t duration
Definition: movenc.c:64
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:491
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:42
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:505
width
#define width
vt_internal.h
PTLCommon
Definition: hevc_ps.h:92
s
#define s(width, name)
Definition: cbs_vp9.c:256
VTHWFrame
Definition: videotoolbox.c:59
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:62
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:99
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: hevc_ps.h:95
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
PTLCommon::tier_flag
uint8_t tier_flag
Definition: hevc_ps.h:94
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2990
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:973
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:141
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:424
V
#define V
Definition: avdct.c:30
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:463
ff_mpeg2_videotoolbox_hwaccel
const AVHWAccel ff_mpeg2_videotoolbox_hwaccel
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:51
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:447
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:51
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:424
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:60
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:230
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
hevcdec.h
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:120
P
#define P
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:54
size
int size
Definition: twinvq_data.h:10344
VUI
Definition: hevc_ps.h:49
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:454
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:70
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:191
height
#define height
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:464
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:462
VTContext
Definition: vt_internal.h:25
av_videotoolbox_default_init2
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2073
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:43
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:274
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:330
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:490
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:340
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1930
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1880
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:124
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:54
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:57
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:137
AVCodecContext
main external API structure.
Definition: avcodec.h:389
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
HEVCContext
Definition: hevcdec.h:467
PTLCommon::level_idc
uint8_t level_idc
Definition: hevc_ps.h:112
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:93
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:61
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:455
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: hevc_ps.h:85
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:577
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avutil.h
ff_mpeg4_videotoolbox_hwaccel
const AVHWAccel ff_mpeg4_videotoolbox_hwaccel
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: hevc_ps.h:123
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1379
HEVCSPS
Definition: hevc_ps.h:153
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: hevc_ps.h:249
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:73
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:414
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:465
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:64
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:152
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1739
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:401
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:198