FFmpeg
videotoolboxenc.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "h264.h"
35 #include "h264_sei.h"
36 #include <dlfcn.h>
37 
38 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
39 enum { kCMVideoCodecType_HEVC = 'hvc1' };
40 #endif
41 
42 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
45 #endif
46 
47 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
48  size_t parameterSetIndex,
49  const uint8_t **parameterSetPointerOut,
50  size_t *parameterSetSizeOut,
51  size_t *parameterSetCountOut,
52  int *NALUnitHeaderLengthOut);
53 
54 //These symbols may not be present
55 static struct{
59 
63 
85 
88 
90 
93 
95 } compat_keys;
96 
97 #define GET_SYM(symbol, defaultVal) \
98 do{ \
99  CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol); \
100  if(!handle) \
101  compat_keys.symbol = CFSTR(defaultVal); \
102  else \
103  compat_keys.symbol = *handle; \
104 }while(0)
105 
107 
108 static void loadVTEncSymbols(){
109  compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
110  (getParameterSetAtIndex)dlsym(
111  RTLD_DEFAULT,
112  "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
113  );
114 
118 
122 
123  GET_SYM(kVTProfileLevel_H264_Baseline_4_0, "H264_Baseline_4_0");
124  GET_SYM(kVTProfileLevel_H264_Baseline_4_2, "H264_Baseline_4_2");
125  GET_SYM(kVTProfileLevel_H264_Baseline_5_0, "H264_Baseline_5_0");
126  GET_SYM(kVTProfileLevel_H264_Baseline_5_1, "H264_Baseline_5_1");
127  GET_SYM(kVTProfileLevel_H264_Baseline_5_2, "H264_Baseline_5_2");
128  GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
129  GET_SYM(kVTProfileLevel_H264_Main_4_2, "H264_Main_4_2");
130  GET_SYM(kVTProfileLevel_H264_Main_5_1, "H264_Main_5_1");
131  GET_SYM(kVTProfileLevel_H264_Main_5_2, "H264_Main_5_2");
132  GET_SYM(kVTProfileLevel_H264_Main_AutoLevel, "H264_Main_AutoLevel");
133  GET_SYM(kVTProfileLevel_H264_High_3_0, "H264_High_3_0");
134  GET_SYM(kVTProfileLevel_H264_High_3_1, "H264_High_3_1");
135  GET_SYM(kVTProfileLevel_H264_High_3_2, "H264_High_3_2");
136  GET_SYM(kVTProfileLevel_H264_High_4_0, "H264_High_4_0");
137  GET_SYM(kVTProfileLevel_H264_High_4_1, "H264_High_4_1");
138  GET_SYM(kVTProfileLevel_H264_High_4_2, "H264_High_4_2");
139  GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
140  GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
141  GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
142  GET_SYM(kVTProfileLevel_H264_Extended_5_0, "H264_Extended_5_0");
143  GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
144 
145  GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel, "HEVC_Main_AutoLevel");
146  GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel, "HEVC_Main10_AutoLevel");
147 
149 
151  "EnableHardwareAcceleratedVideoEncoder");
153  "RequireHardwareAcceleratedVideoEncoder");
154 }
155 
156 typedef enum VT_H264Profile {
164 
165 typedef enum VTH264Entropy{
169 } VTH264Entropy;
170 
171 typedef enum VT_HEVCProfile {
177 
178 static const uint8_t start_code[] = { 0, 0, 0, 1 };
179 
180 typedef struct ExtraSEI {
181  void *data;
182  size_t size;
183 } ExtraSEI;
184 
185 typedef struct BufNode {
186  CMSampleBufferRef cm_buffer;
188  struct BufNode* next;
189  int error;
190 } BufNode;
191 
192 typedef struct VTEncContext {
193  AVClass *class;
195  VTCompressionSessionRef session;
196  CFStringRef ycbcr_matrix;
197  CFStringRef color_primaries;
198  CFStringRef transfer_function;
200 
203 
205 
208 
209  int64_t frame_ct_out;
210  int64_t frame_ct_in;
211 
212  int64_t first_pts;
213  int64_t dts_delta;
214 
215  int64_t profile;
216  int64_t level;
217  int64_t entropy;
218  int64_t realtime;
219  int64_t frames_before;
220  int64_t frames_after;
221 
222  int64_t allow_sw;
223  int64_t require_sw;
224 
225  bool flushing;
228  bool a53_cc;
229 } VTEncContext;
230 
231 static int vtenc_populate_extradata(AVCodecContext *avctx,
232  CMVideoCodecType codec_type,
233  CFStringRef profile_level,
234  CFNumberRef gamma_level,
235  CFDictionaryRef enc_info,
236  CFDictionaryRef pixel_buffer_info);
237 
238 /**
239  * NULL-safe release of *refPtr, and sets value to NULL.
240  */
241 static void vt_release_num(CFNumberRef* refPtr){
242  if (!*refPtr) {
243  return;
244  }
245 
246  CFRelease(*refPtr);
247  *refPtr = NULL;
248 }
249 
250 static void set_async_error(VTEncContext *vtctx, int err)
251 {
252  BufNode *info;
253 
254  pthread_mutex_lock(&vtctx->lock);
255 
256  vtctx->async_error = err;
257 
258  info = vtctx->q_head;
259  vtctx->q_head = vtctx->q_tail = NULL;
260 
261  while (info) {
262  BufNode *next = info->next;
263  CFRelease(info->cm_buffer);
264  av_free(info);
265  info = next;
266  }
267 
268  pthread_mutex_unlock(&vtctx->lock);
269 }
270 
271 static void clear_frame_queue(VTEncContext *vtctx)
272 {
273  set_async_error(vtctx, 0);
274 }
275 
276 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
277 {
278  BufNode *info;
279 
280  pthread_mutex_lock(&vtctx->lock);
281 
282  if (vtctx->async_error) {
283  pthread_mutex_unlock(&vtctx->lock);
284  return vtctx->async_error;
285  }
286 
287  if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
288  *buf = NULL;
289 
290  pthread_mutex_unlock(&vtctx->lock);
291  return 0;
292  }
293 
294  while (!vtctx->q_head && !vtctx->async_error && wait) {
295  pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
296  }
297 
298  if (!vtctx->q_head) {
299  pthread_mutex_unlock(&vtctx->lock);
300  *buf = NULL;
301  return 0;
302  }
303 
304  info = vtctx->q_head;
305  vtctx->q_head = vtctx->q_head->next;
306  if (!vtctx->q_head) {
307  vtctx->q_tail = NULL;
308  }
309 
310  pthread_mutex_unlock(&vtctx->lock);
311 
312  *buf = info->cm_buffer;
313  if (sei && *buf) {
314  *sei = info->sei;
315  } else if (info->sei) {
316  if (info->sei->data) av_free(info->sei->data);
317  av_free(info->sei);
318  }
319  av_free(info);
320 
321  vtctx->frame_ct_out++;
322 
323  return 0;
324 }
325 
326 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
327 {
328  BufNode *info = av_malloc(sizeof(BufNode));
329  if (!info) {
330  set_async_error(vtctx, AVERROR(ENOMEM));
331  return;
332  }
333 
334  CFRetain(buffer);
335  info->cm_buffer = buffer;
336  info->sei = sei;
337  info->next = NULL;
338 
339  pthread_mutex_lock(&vtctx->lock);
341 
342  if (!vtctx->q_head) {
343  vtctx->q_head = info;
344  } else {
345  vtctx->q_tail->next = info;
346  }
347 
348  vtctx->q_tail = info;
349 
350  pthread_mutex_unlock(&vtctx->lock);
351 }
352 
353 static int count_nalus(size_t length_code_size,
354  CMSampleBufferRef sample_buffer,
355  int *count)
356 {
357  size_t offset = 0;
358  int status;
359  int nalu_ct = 0;
360  uint8_t size_buf[4];
361  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
362  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
363 
364  if (length_code_size > 4)
365  return AVERROR_INVALIDDATA;
366 
367  while (offset < src_size) {
368  size_t curr_src_len;
369  size_t box_len = 0;
370  size_t i;
371 
372  status = CMBlockBufferCopyDataBytes(block,
373  offset,
374  length_code_size,
375  size_buf);
376 
377  for (i = 0; i < length_code_size; i++) {
378  box_len <<= 8;
379  box_len |= size_buf[i];
380  }
381 
382  curr_src_len = box_len + length_code_size;
383  offset += curr_src_len;
384 
385  nalu_ct++;
386  }
387 
388  *count = nalu_ct;
389  return 0;
390 }
391 
392 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
393 {
394  switch (id) {
395  case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
397  default: return 0;
398  }
399 }
400 
401 /**
402  * Get the parameter sets from a CMSampleBufferRef.
403  * @param dst If *dst isn't NULL, the parameters are copied into existing
404  * memory. *dst_size must be set accordingly when *dst != NULL.
405  * If *dst is NULL, it will be allocated.
406  * In all cases, *dst_size is set to the number of bytes used starting
407  * at *dst.
408  */
409 static int get_params_size(
410  AVCodecContext *avctx,
411  CMVideoFormatDescriptionRef vid_fmt,
412  size_t *size)
413 {
414  VTEncContext *vtctx = avctx->priv_data;
415  size_t total_size = 0;
416  size_t ps_count;
417  int is_count_bad = 0;
418  size_t i;
419  int status;
420  status = vtctx->get_param_set_func(vid_fmt,
421  0,
422  NULL,
423  NULL,
424  &ps_count,
425  NULL);
426  if (status) {
427  is_count_bad = 1;
428  ps_count = 0;
429  status = 0;
430  }
431 
432  for (i = 0; i < ps_count || is_count_bad; i++) {
433  const uint8_t *ps;
434  size_t ps_size;
435  status = vtctx->get_param_set_func(vid_fmt,
436  i,
437  &ps,
438  &ps_size,
439  NULL,
440  NULL);
441  if (status) {
442  /*
443  * When ps_count is invalid, status != 0 ends the loop normally
444  * unless we didn't get any parameter sets.
445  */
446  if (i > 0 && is_count_bad) status = 0;
447 
448  break;
449  }
450 
451  total_size += ps_size + sizeof(start_code);
452  }
453 
454  if (status) {
455  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
456  return AVERROR_EXTERNAL;
457  }
458 
459  *size = total_size;
460  return 0;
461 }
462 
463 static int copy_param_sets(
464  AVCodecContext *avctx,
465  CMVideoFormatDescriptionRef vid_fmt,
466  uint8_t *dst,
467  size_t dst_size)
468 {
469  VTEncContext *vtctx = avctx->priv_data;
470  size_t ps_count;
471  int is_count_bad = 0;
472  int status;
473  size_t offset = 0;
474  size_t i;
475 
476  status = vtctx->get_param_set_func(vid_fmt,
477  0,
478  NULL,
479  NULL,
480  &ps_count,
481  NULL);
482  if (status) {
483  is_count_bad = 1;
484  ps_count = 0;
485  status = 0;
486  }
487 
488 
489  for (i = 0; i < ps_count || is_count_bad; i++) {
490  const uint8_t *ps;
491  size_t ps_size;
492  size_t next_offset;
493 
494  status = vtctx->get_param_set_func(vid_fmt,
495  i,
496  &ps,
497  &ps_size,
498  NULL,
499  NULL);
500  if (status) {
501  if (i > 0 && is_count_bad) status = 0;
502 
503  break;
504  }
505 
506  next_offset = offset + sizeof(start_code) + ps_size;
507  if (dst_size < next_offset) {
508  av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
510  }
511 
512  memcpy(dst + offset, start_code, sizeof(start_code));
513  offset += sizeof(start_code);
514 
515  memcpy(dst + offset, ps, ps_size);
516  offset = next_offset;
517  }
518 
519  if (status) {
520  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
521  return AVERROR_EXTERNAL;
522  }
523 
524  return 0;
525 }
526 
527 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
528 {
529  CMVideoFormatDescriptionRef vid_fmt;
530  size_t total_size;
531  int status;
532 
533  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
534  if (!vid_fmt) {
535  av_log(avctx, AV_LOG_ERROR, "No video format.\n");
536  return AVERROR_EXTERNAL;
537  }
538 
539  status = get_params_size(avctx, vid_fmt, &total_size);
540  if (status) {
541  av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
542  return status;
543  }
544 
545  avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
546  if (!avctx->extradata) {
547  return AVERROR(ENOMEM);
548  }
549  avctx->extradata_size = total_size;
550 
551  status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
552 
553  if (status) {
554  av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
555  return status;
556  }
557 
558  return 0;
559 }
560 
562  void *ctx,
563  void *sourceFrameCtx,
564  OSStatus status,
565  VTEncodeInfoFlags flags,
566  CMSampleBufferRef sample_buffer)
567 {
568  AVCodecContext *avctx = ctx;
569  VTEncContext *vtctx = avctx->priv_data;
570  ExtraSEI *sei = sourceFrameCtx;
571 
572  if (vtctx->async_error) {
573  if(sample_buffer) CFRelease(sample_buffer);
574  return;
575  }
576 
577  if (status) {
578  av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
580  return;
581  }
582 
583  if (!sample_buffer) {
584  return;
585  }
586 
587  if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
588  int set_status = set_extradata(avctx, sample_buffer);
589  if (set_status) {
590  set_async_error(vtctx, set_status);
591  return;
592  }
593  }
594 
595  vtenc_q_push(vtctx, sample_buffer, sei);
596 }
597 
599  AVCodecContext *avctx,
600  CMSampleBufferRef sample_buffer,
601  size_t *size)
602 {
603  VTEncContext *vtctx = avctx->priv_data;
604  CMVideoFormatDescriptionRef vid_fmt;
605  int isize;
606  int status;
607 
608  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
609  if (!vid_fmt) {
610  av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
611  return AVERROR_EXTERNAL;
612  }
613 
614  status = vtctx->get_param_set_func(vid_fmt,
615  0,
616  NULL,
617  NULL,
618  NULL,
619  &isize);
620  if (status) {
621  av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
622  return AVERROR_EXTERNAL;
623  }
624 
625  *size = isize;
626  return 0;
627 }
628 
629 /*
630  * Returns true on success.
631  *
632  * If profile_level_val is NULL and this method returns true, don't specify the
633  * profile/level to the encoder.
634  */
636  CFStringRef *profile_level_val)
637 {
638  VTEncContext *vtctx = avctx->priv_data;
639  int64_t profile = vtctx->profile;
640 
641  if (profile == H264_PROF_AUTO && vtctx->level) {
642  //Need to pick a profile if level is not auto-selected.
644  }
645 
646  *profile_level_val = NULL;
647 
648  switch (profile) {
649  case H264_PROF_AUTO:
650  return true;
651 
652  case H264_PROF_BASELINE:
653  switch (vtctx->level) {
654  case 0: *profile_level_val =
655  compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
656  case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
657  case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
658  case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
659  case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
660  case 40: *profile_level_val =
661  compat_keys.kVTProfileLevel_H264_Baseline_4_0; break;
662  case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
663  case 42: *profile_level_val =
664  compat_keys.kVTProfileLevel_H264_Baseline_4_2; break;
665  case 50: *profile_level_val =
666  compat_keys.kVTProfileLevel_H264_Baseline_5_0; break;
667  case 51: *profile_level_val =
668  compat_keys.kVTProfileLevel_H264_Baseline_5_1; break;
669  case 52: *profile_level_val =
670  compat_keys.kVTProfileLevel_H264_Baseline_5_2; break;
671  }
672  break;
673 
674  case H264_PROF_MAIN:
675  switch (vtctx->level) {
676  case 0: *profile_level_val =
677  compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
678  case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
679  case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
680  case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
681  case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
682  case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
683  case 42: *profile_level_val =
684  compat_keys.kVTProfileLevel_H264_Main_4_2; break;
685  case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
686  case 51: *profile_level_val =
687  compat_keys.kVTProfileLevel_H264_Main_5_1; break;
688  case 52: *profile_level_val =
689  compat_keys.kVTProfileLevel_H264_Main_5_2; break;
690  }
691  break;
692 
693  case H264_PROF_HIGH:
694  switch (vtctx->level) {
695  case 0: *profile_level_val =
696  compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
697  case 30: *profile_level_val =
698  compat_keys.kVTProfileLevel_H264_High_3_0; break;
699  case 31: *profile_level_val =
700  compat_keys.kVTProfileLevel_H264_High_3_1; break;
701  case 32: *profile_level_val =
702  compat_keys.kVTProfileLevel_H264_High_3_2; break;
703  case 40: *profile_level_val =
704  compat_keys.kVTProfileLevel_H264_High_4_0; break;
705  case 41: *profile_level_val =
706  compat_keys.kVTProfileLevel_H264_High_4_1; break;
707  case 42: *profile_level_val =
708  compat_keys.kVTProfileLevel_H264_High_4_2; break;
709  case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
710  case 51: *profile_level_val =
711  compat_keys.kVTProfileLevel_H264_High_5_1; break;
712  case 52: *profile_level_val =
713  compat_keys.kVTProfileLevel_H264_High_5_2; break;
714  }
715  break;
716  case H264_PROF_EXTENDED:
717  switch (vtctx->level) {
718  case 0: *profile_level_val =
719  compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
720  case 50: *profile_level_val =
721  compat_keys.kVTProfileLevel_H264_Extended_5_0; break;
722  }
723  break;
724  }
725 
726  if (!*profile_level_val) {
727  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
728  return false;
729  }
730 
731  return true;
732 }
733 
734 /*
735  * Returns true on success.
736  *
737  * If profile_level_val is NULL and this method returns true, don't specify the
738  * profile/level to the encoder.
739  */
741  CFStringRef *profile_level_val)
742 {
743  VTEncContext *vtctx = avctx->priv_data;
744  int64_t profile = vtctx->profile;
745 
746  *profile_level_val = NULL;
747 
748  switch (profile) {
749  case HEVC_PROF_AUTO:
750  return true;
751  case HEVC_PROF_MAIN:
752  *profile_level_val =
753  compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
754  break;
755  case HEVC_PROF_MAIN10:
756  *profile_level_val =
757  compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
758  break;
759  }
760 
761  if (!*profile_level_val) {
762  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
763  return false;
764  }
765 
766  return true;
767 }
768 
770  enum AVPixelFormat fmt,
771  enum AVColorRange range,
772  int* av_pixel_format,
773  int* range_guessed)
774 {
775  if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
776  range != AVCOL_RANGE_JPEG;
777 
778  //MPEG range is used when no range is set
779  if (fmt == AV_PIX_FMT_NV12) {
780  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
781  kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
782  kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
783  } else if (fmt == AV_PIX_FMT_YUV420P) {
784  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
785  kCVPixelFormatType_420YpCbCr8PlanarFullRange :
786  kCVPixelFormatType_420YpCbCr8Planar;
787  } else if (fmt == AV_PIX_FMT_P010LE) {
788  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
792  } else {
793  return AVERROR(EINVAL);
794  }
795 
796  return 0;
797 }
798 
799 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
800  VTEncContext *vtctx = avctx->priv_data;
801 
802  if (vtctx->color_primaries) {
803  CFDictionarySetValue(dict,
804  kCVImageBufferColorPrimariesKey,
805  vtctx->color_primaries);
806  }
807 
808  if (vtctx->transfer_function) {
809  CFDictionarySetValue(dict,
810  kCVImageBufferTransferFunctionKey,
811  vtctx->transfer_function);
812  }
813 
814  if (vtctx->ycbcr_matrix) {
815  CFDictionarySetValue(dict,
816  kCVImageBufferYCbCrMatrixKey,
817  vtctx->ycbcr_matrix);
818  }
819 }
820 
822  CFMutableDictionaryRef* dict)
823 {
824  CFNumberRef cv_color_format_num = NULL;
825  CFNumberRef width_num = NULL;
826  CFNumberRef height_num = NULL;
827  CFMutableDictionaryRef pixel_buffer_info = NULL;
828  int cv_color_format;
829  int status = get_cv_pixel_format(avctx,
830  avctx->pix_fmt,
831  avctx->color_range,
832  &cv_color_format,
833  NULL);
834  if (status) return status;
835 
836  pixel_buffer_info = CFDictionaryCreateMutable(
837  kCFAllocatorDefault,
838  20,
839  &kCFCopyStringDictionaryKeyCallBacks,
840  &kCFTypeDictionaryValueCallBacks);
841 
842  if (!pixel_buffer_info) goto pbinfo_nomem;
843 
844  cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
845  kCFNumberSInt32Type,
846  &cv_color_format);
847  if (!cv_color_format_num) goto pbinfo_nomem;
848 
849  CFDictionarySetValue(pixel_buffer_info,
850  kCVPixelBufferPixelFormatTypeKey,
851  cv_color_format_num);
852  vt_release_num(&cv_color_format_num);
853 
854  width_num = CFNumberCreate(kCFAllocatorDefault,
855  kCFNumberSInt32Type,
856  &avctx->width);
857  if (!width_num) return AVERROR(ENOMEM);
858 
859  CFDictionarySetValue(pixel_buffer_info,
860  kCVPixelBufferWidthKey,
861  width_num);
862  vt_release_num(&width_num);
863 
864  height_num = CFNumberCreate(kCFAllocatorDefault,
865  kCFNumberSInt32Type,
866  &avctx->height);
867  if (!height_num) goto pbinfo_nomem;
868 
869  CFDictionarySetValue(pixel_buffer_info,
870  kCVPixelBufferHeightKey,
871  height_num);
872  vt_release_num(&height_num);
873 
874  add_color_attr(avctx, pixel_buffer_info);
875 
876  *dict = pixel_buffer_info;
877  return 0;
878 
879 pbinfo_nomem:
880  vt_release_num(&cv_color_format_num);
881  vt_release_num(&width_num);
882  vt_release_num(&height_num);
883  if (pixel_buffer_info) CFRelease(pixel_buffer_info);
884 
885  return AVERROR(ENOMEM);
886 }
887 
889  CFStringRef *primaries)
890 {
891  enum AVColorPrimaries pri = avctx->color_primaries;
892  switch (pri) {
894  *primaries = NULL;
895  break;
896 
897  case AVCOL_PRI_BT470BG:
898  *primaries = kCVImageBufferColorPrimaries_EBU_3213;
899  break;
900 
901  case AVCOL_PRI_SMPTE170M:
902  *primaries = kCVImageBufferColorPrimaries_SMPTE_C;
903  break;
904 
905  case AVCOL_PRI_BT709:
906  *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
907  break;
908 
909  case AVCOL_PRI_BT2020:
910  *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
911  break;
912 
913  default:
914  av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
915  *primaries = NULL;
916  return -1;
917  }
918 
919  return 0;
920 }
921 
923  CFStringRef *transfer_fnc,
924  CFNumberRef *gamma_level)
925 {
926  enum AVColorTransferCharacteristic trc = avctx->color_trc;
927  Float32 gamma;
928  *gamma_level = NULL;
929 
930  switch (trc) {
932  *transfer_fnc = NULL;
933  break;
934 
935  case AVCOL_TRC_BT709:
936  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
937  break;
938 
939  case AVCOL_TRC_SMPTE240M:
940  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
941  break;
942 
943 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
944  case AVCOL_TRC_SMPTE2084:
945  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
946  break;
947 #endif
948 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
949  case AVCOL_TRC_LINEAR:
950  *transfer_fnc = kCVImageBufferTransferFunction_Linear;
951  break;
952 #endif
953 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
955  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
956  break;
957 #endif
958 
959  case AVCOL_TRC_GAMMA22:
960  gamma = 2.2;
961  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
962  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
963  break;
964 
965  case AVCOL_TRC_GAMMA28:
966  gamma = 2.8;
967  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
968  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
969  break;
970 
971  case AVCOL_TRC_BT2020_10:
972  case AVCOL_TRC_BT2020_12:
973  *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
974  break;
975 
976  default:
977  *transfer_fnc = NULL;
978  av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
979  return -1;
980  }
981 
982  return 0;
983 }
984 
985 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
986  switch(avctx->colorspace) {
987  case AVCOL_SPC_BT709:
988  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
989  break;
990 
992  *matrix = NULL;
993  break;
994 
995  case AVCOL_SPC_BT470BG:
996  case AVCOL_SPC_SMPTE170M:
997  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
998  break;
999 
1000  case AVCOL_SPC_SMPTE240M:
1001  *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
1002  break;
1003 
1004  case AVCOL_SPC_BT2020_NCL:
1005  *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
1006  break;
1007 
1008  default:
1009  av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
1010  return -1;
1011  }
1012 
1013  return 0;
1014 }
1015 
1017  CMVideoCodecType codec_type,
1018  CFStringRef profile_level,
1019  CFNumberRef gamma_level,
1020  CFDictionaryRef enc_info,
1021  CFDictionaryRef pixel_buffer_info,
1022  VTCompressionSessionRef *session)
1023 {
1024  VTEncContext *vtctx = avctx->priv_data;
1025  SInt32 bit_rate = avctx->bit_rate;
1026  SInt32 max_rate = avctx->rc_max_rate;
1027  CFNumberRef bit_rate_num;
1028  CFNumberRef bytes_per_second;
1029  CFNumberRef one_second;
1030  CFArrayRef data_rate_limits;
1031  int64_t bytes_per_second_value = 0;
1032  int64_t one_second_value = 0;
1033  void *nums[2];
1034 
1035  int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1036  avctx->width,
1037  avctx->height,
1038  codec_type,
1039  enc_info,
1040  pixel_buffer_info,
1041  kCFAllocatorDefault,
1043  avctx,
1044  session);
1045 
1046  if (status || !vtctx->session) {
1047  av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1048 
1049 #if !TARGET_OS_IPHONE
1050  if (!vtctx->allow_sw) {
1051  av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1052  }
1053 #endif
1054 
1055  return AVERROR_EXTERNAL;
1056  }
1057 
1058  bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1059  kCFNumberSInt32Type,
1060  &bit_rate);
1061  if (!bit_rate_num) return AVERROR(ENOMEM);
1062 
1063  status = VTSessionSetProperty(vtctx->session,
1064  kVTCompressionPropertyKey_AverageBitRate,
1065  bit_rate_num);
1066  CFRelease(bit_rate_num);
1067 
1068  if (status) {
1069  av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1070  return AVERROR_EXTERNAL;
1071  }
1072 
1073  if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1074  // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1075  bytes_per_second_value = max_rate >> 3;
1076  bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1077  kCFNumberSInt64Type,
1078  &bytes_per_second_value);
1079  if (!bytes_per_second) {
1080  return AVERROR(ENOMEM);
1081  }
1082  one_second_value = 1;
1083  one_second = CFNumberCreate(kCFAllocatorDefault,
1084  kCFNumberSInt64Type,
1085  &one_second_value);
1086  if (!one_second) {
1087  CFRelease(bytes_per_second);
1088  return AVERROR(ENOMEM);
1089  }
1090  nums[0] = (void *)bytes_per_second;
1091  nums[1] = (void *)one_second;
1092  data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1093  (const void **)nums,
1094  2,
1095  &kCFTypeArrayCallBacks);
1096 
1097  if (!data_rate_limits) {
1098  CFRelease(bytes_per_second);
1099  CFRelease(one_second);
1100  return AVERROR(ENOMEM);
1101  }
1102  status = VTSessionSetProperty(vtctx->session,
1103  kVTCompressionPropertyKey_DataRateLimits,
1104  data_rate_limits);
1105 
1106  CFRelease(bytes_per_second);
1107  CFRelease(one_second);
1108  CFRelease(data_rate_limits);
1109 
1110  if (status) {
1111  av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1112  return AVERROR_EXTERNAL;
1113  }
1114  }
1115 
1116  if (vtctx->codec_id == AV_CODEC_ID_H264) {
1117  // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
1118  if (profile_level) {
1119  status = VTSessionSetProperty(vtctx->session,
1120  kVTCompressionPropertyKey_ProfileLevel,
1121  profile_level);
1122  if (status) {
1123  av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
1124  }
1125  }
1126  }
1127 
1128  if (avctx->gop_size > 0) {
1129  CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1130  kCFNumberIntType,
1131  &avctx->gop_size);
1132  if (!interval) {
1133  return AVERROR(ENOMEM);
1134  }
1135 
1136  status = VTSessionSetProperty(vtctx->session,
1137  kVTCompressionPropertyKey_MaxKeyFrameInterval,
1138  interval);
1139  CFRelease(interval);
1140 
1141  if (status) {
1142  av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1143  return AVERROR_EXTERNAL;
1144  }
1145  }
1146 
1147  if (vtctx->frames_before) {
1148  status = VTSessionSetProperty(vtctx->session,
1149  kVTCompressionPropertyKey_MoreFramesBeforeStart,
1150  kCFBooleanTrue);
1151 
1152  if (status == kVTPropertyNotSupportedErr) {
1153  av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1154  } else if (status) {
1155  av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1156  }
1157  }
1158 
1159  if (vtctx->frames_after) {
1160  status = VTSessionSetProperty(vtctx->session,
1161  kVTCompressionPropertyKey_MoreFramesAfterEnd,
1162  kCFBooleanTrue);
1163 
1164  if (status == kVTPropertyNotSupportedErr) {
1165  av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1166  } else if (status) {
1167  av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1168  }
1169  }
1170 
1171  if (avctx->sample_aspect_ratio.num != 0) {
1172  CFNumberRef num;
1173  CFNumberRef den;
1174  CFMutableDictionaryRef par;
1175  AVRational *avpar = &avctx->sample_aspect_ratio;
1176 
1177  av_reduce(&avpar->num, &avpar->den,
1178  avpar->num, avpar->den,
1179  0xFFFFFFFF);
1180 
1181  num = CFNumberCreate(kCFAllocatorDefault,
1182  kCFNumberIntType,
1183  &avpar->num);
1184 
1185  den = CFNumberCreate(kCFAllocatorDefault,
1186  kCFNumberIntType,
1187  &avpar->den);
1188 
1189 
1190 
1191  par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1192  2,
1193  &kCFCopyStringDictionaryKeyCallBacks,
1194  &kCFTypeDictionaryValueCallBacks);
1195 
1196  if (!par || !num || !den) {
1197  if (par) CFRelease(par);
1198  if (num) CFRelease(num);
1199  if (den) CFRelease(den);
1200 
1201  return AVERROR(ENOMEM);
1202  }
1203 
1204  CFDictionarySetValue(
1205  par,
1206  kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1207  num);
1208 
1209  CFDictionarySetValue(
1210  par,
1211  kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1212  den);
1213 
1214  status = VTSessionSetProperty(vtctx->session,
1215  kVTCompressionPropertyKey_PixelAspectRatio,
1216  par);
1217 
1218  CFRelease(par);
1219  CFRelease(num);
1220  CFRelease(den);
1221 
1222  if (status) {
1223  av_log(avctx,
1224  AV_LOG_ERROR,
1225  "Error setting pixel aspect ratio to %d:%d: %d.\n",
1226  avctx->sample_aspect_ratio.num,
1227  avctx->sample_aspect_ratio.den,
1228  status);
1229 
1230  return AVERROR_EXTERNAL;
1231  }
1232  }
1233 
1234 
1235  if (vtctx->transfer_function) {
1236  status = VTSessionSetProperty(vtctx->session,
1237  kVTCompressionPropertyKey_TransferFunction,
1238  vtctx->transfer_function);
1239 
1240  if (status) {
1241  av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1242  }
1243  }
1244 
1245 
1246  if (vtctx->ycbcr_matrix) {
1247  status = VTSessionSetProperty(vtctx->session,
1248  kVTCompressionPropertyKey_YCbCrMatrix,
1249  vtctx->ycbcr_matrix);
1250 
1251  if (status) {
1252  av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1253  }
1254  }
1255 
1256 
1257  if (vtctx->color_primaries) {
1258  status = VTSessionSetProperty(vtctx->session,
1259  kVTCompressionPropertyKey_ColorPrimaries,
1260  vtctx->color_primaries);
1261 
1262  if (status) {
1263  av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1264  }
1265  }
1266 
1267  if (gamma_level) {
1268  status = VTSessionSetProperty(vtctx->session,
1269  kCVImageBufferGammaLevelKey,
1270  gamma_level);
1271 
1272  if (status) {
1273  av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1274  }
1275  }
1276 
1277  if (!vtctx->has_b_frames) {
1278  status = VTSessionSetProperty(vtctx->session,
1279  kVTCompressionPropertyKey_AllowFrameReordering,
1280  kCFBooleanFalse);
1281 
1282  if (status) {
1283  av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1284  return AVERROR_EXTERNAL;
1285  }
1286  }
1287 
1288  if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1289  CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1290  compat_keys.kVTH264EntropyMode_CABAC:
1291  compat_keys.kVTH264EntropyMode_CAVLC;
1292 
1293  status = VTSessionSetProperty(vtctx->session,
1294  compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1295  entropy);
1296 
1297  if (status) {
1298  av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1299  }
1300  }
1301 
1302  if (vtctx->realtime) {
1303  status = VTSessionSetProperty(vtctx->session,
1304  compat_keys.kVTCompressionPropertyKey_RealTime,
1305  kCFBooleanTrue);
1306 
1307  if (status) {
1308  av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1309  }
1310  }
1311 
1312  status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1313  if (status) {
1314  av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1315  return AVERROR_EXTERNAL;
1316  }
1317 
1318  return 0;
1319 }
1320 
1322 {
1323  CFMutableDictionaryRef enc_info;
1324  CFMutableDictionaryRef pixel_buffer_info;
1325  CMVideoCodecType codec_type;
1326  VTEncContext *vtctx = avctx->priv_data;
1327  CFStringRef profile_level;
1328  CFNumberRef gamma_level = NULL;
1329  int status;
1330 
1332  if (!codec_type) {
1333  av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1334  return AVERROR(EINVAL);
1335  }
1336 
1337  vtctx->codec_id = avctx->codec_id;
1338 
1339  if (vtctx->codec_id == AV_CODEC_ID_H264) {
1340  vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1341 
1342  vtctx->has_b_frames = avctx->max_b_frames > 0;
1343  if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1344  av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1345  vtctx->has_b_frames = false;
1346  }
1347 
1348  if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1349  av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1350  vtctx->entropy = VT_ENTROPY_NOT_SET;
1351  }
1352 
1353  if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1354  } else {
1355  vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1356  if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1357  if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1358  }
1359 
1360  enc_info = CFDictionaryCreateMutable(
1361  kCFAllocatorDefault,
1362  20,
1363  &kCFCopyStringDictionaryKeyCallBacks,
1364  &kCFTypeDictionaryValueCallBacks
1365  );
1366 
1367  if (!enc_info) return AVERROR(ENOMEM);
1368 
1369 #if !TARGET_OS_IPHONE
1370  if(vtctx->require_sw) {
1371  CFDictionarySetValue(enc_info,
1372  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1373  kCFBooleanFalse);
1374  } else if (!vtctx->allow_sw) {
1375  CFDictionarySetValue(enc_info,
1376  compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1377  kCFBooleanTrue);
1378  } else {
1379  CFDictionarySetValue(enc_info,
1380  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1381  kCFBooleanTrue);
1382  }
1383 #endif
1384 
1385  if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1386  status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1387  if (status)
1388  goto init_cleanup;
1389  } else {
1390  pixel_buffer_info = NULL;
1391  }
1392 
1393  vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1394 
1395  get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1396  get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1397  get_cv_color_primaries(avctx, &vtctx->color_primaries);
1398 
1399 
1400  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1402  codec_type,
1403  profile_level,
1404  gamma_level,
1405  enc_info,
1406  pixel_buffer_info);
1407  if (status)
1408  goto init_cleanup;
1409  }
1410 
1411  status = vtenc_create_encoder(avctx,
1412  codec_type,
1413  profile_level,
1414  gamma_level,
1415  enc_info,
1416  pixel_buffer_info,
1417  &vtctx->session);
1418 
1419 init_cleanup:
1420  if (gamma_level)
1421  CFRelease(gamma_level);
1422 
1423  if (pixel_buffer_info)
1424  CFRelease(pixel_buffer_info);
1425 
1426  CFRelease(enc_info);
1427 
1428  return status;
1429 }
1430 
1432 {
1433  VTEncContext *vtctx = avctx->priv_data;
1434  CFBooleanRef has_b_frames_cfbool;
1435  int status;
1436 
1438 
1439  pthread_mutex_init(&vtctx->lock, NULL);
1441 
1442  vtctx->session = NULL;
1444  if (status) return status;
1445 
1446  status = VTSessionCopyProperty(vtctx->session,
1447  kVTCompressionPropertyKey_AllowFrameReordering,
1448  kCFAllocatorDefault,
1449  &has_b_frames_cfbool);
1450 
1451  if (!status && has_b_frames_cfbool) {
1452  //Some devices don't output B-frames for main profile, even if requested.
1453  vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1454  CFRelease(has_b_frames_cfbool);
1455  }
1456  avctx->has_b_frames = vtctx->has_b_frames;
1457 
1458  return 0;
1459 }
1460 
1461 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1462 {
1463  CFArrayRef attachments;
1464  CFDictionaryRef attachment;
1465  CFBooleanRef not_sync;
1466  CFIndex len;
1467 
1468  attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1469  len = !attachments ? 0 : CFArrayGetCount(attachments);
1470 
1471  if (!len) {
1472  *is_key_frame = true;
1473  return;
1474  }
1475 
1476  attachment = CFArrayGetValueAtIndex(attachments, 0);
1477 
1478  if (CFDictionaryGetValueIfPresent(attachment,
1479  kCMSampleAttachmentKey_NotSync,
1480  (const void **)&not_sync))
1481  {
1482  *is_key_frame = !CFBooleanGetValue(not_sync);
1483  } else {
1484  *is_key_frame = true;
1485  }
1486 }
1487 
1488 static int is_post_sei_nal_type(int nal_type){
1489  return nal_type != H264_NAL_SEI &&
1490  nal_type != H264_NAL_SPS &&
1491  nal_type != H264_NAL_PPS &&
1492  nal_type != H264_NAL_AUD;
1493 }
1494 
1495 /*
1496  * Finds the sei message start/size of type find_sei_type.
1497  * If more than one of that type exists, the last one is returned.
1498  */
1499 static int find_sei_end(AVCodecContext *avctx,
1500  uint8_t *nal_data,
1501  size_t nal_size,
1502  uint8_t **sei_end)
1503 {
1504  int nal_type;
1505  size_t sei_payload_size = 0;
1506  int sei_payload_type = 0;
1507  *sei_end = NULL;
1508  uint8_t *nal_start = nal_data;
1509 
1510  if (!nal_size)
1511  return 0;
1512 
1513  nal_type = *nal_data & 0x1F;
1514  if (nal_type != H264_NAL_SEI)
1515  return 0;
1516 
1517  nal_data++;
1518  nal_size--;
1519 
1520  if (nal_data[nal_size - 1] == 0x80)
1521  nal_size--;
1522 
1523  while (nal_size > 0 && *nal_data > 0) {
1524  do{
1525  sei_payload_type += *nal_data;
1526  nal_data++;
1527  nal_size--;
1528  } while (nal_size > 0 && *nal_data == 0xFF);
1529 
1530  if (!nal_size) {
1531  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1532  return AVERROR_INVALIDDATA;
1533  }
1534 
1535  do{
1536  sei_payload_size += *nal_data;
1537  nal_data++;
1538  nal_size--;
1539  } while (nal_size > 0 && *nal_data == 0xFF);
1540 
1541  if (nal_size < sei_payload_size) {
1542  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1543  return AVERROR_INVALIDDATA;
1544  }
1545 
1546  nal_data += sei_payload_size;
1547  nal_size -= sei_payload_size;
1548  }
1549 
1550  *sei_end = nal_data;
1551 
1552  return nal_data - nal_start + 1;
1553 }
1554 
1555 /**
1556  * Copies the data inserting emulation prevention bytes as needed.
1557  * Existing data in the destination can be taken into account by providing
1558  * dst with a dst_offset > 0.
1559  *
1560  * @return The number of bytes copied on success. On failure, the negative of
1561  * the number of bytes needed to copy src is returned.
1562  */
1563 static int copy_emulation_prev(const uint8_t *src,
1564  size_t src_size,
1565  uint8_t *dst,
1566  ssize_t dst_offset,
1567  size_t dst_size)
1568 {
1569  int zeros = 0;
1570  int wrote_bytes;
1571  uint8_t* dst_start;
1572  uint8_t* dst_end = dst + dst_size;
1573  const uint8_t* src_end = src + src_size;
1574  int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1575  int i;
1576  for (i = start_at; i < dst_offset && i < dst_size; i++) {
1577  if (!dst[i])
1578  zeros++;
1579  else
1580  zeros = 0;
1581  }
1582 
1583  dst += dst_offset;
1584  dst_start = dst;
1585  for (; src < src_end; src++, dst++) {
1586  if (zeros == 2) {
1587  int insert_ep3_byte = *src <= 3;
1588  if (insert_ep3_byte) {
1589  if (dst < dst_end)
1590  *dst = 3;
1591  dst++;
1592  }
1593 
1594  zeros = 0;
1595  }
1596 
1597  if (dst < dst_end)
1598  *dst = *src;
1599 
1600  if (!*src)
1601  zeros++;
1602  else
1603  zeros = 0;
1604  }
1605 
1606  wrote_bytes = dst - dst_start;
1607 
1608  if (dst > dst_end)
1609  return -wrote_bytes;
1610 
1611  return wrote_bytes;
1612 }
1613 
1614 static int write_sei(const ExtraSEI *sei,
1615  int sei_type,
1616  uint8_t *dst,
1617  size_t dst_size)
1618 {
1619  uint8_t *sei_start = dst;
1620  size_t remaining_sei_size = sei->size;
1621  size_t remaining_dst_size = dst_size;
1622  int header_bytes;
1623  int bytes_written;
1624  ssize_t offset;
1625 
1626  if (!remaining_dst_size)
1627  return AVERROR_BUFFER_TOO_SMALL;
1628 
1629  while (sei_type && remaining_dst_size != 0) {
1630  int sei_byte = sei_type > 255 ? 255 : sei_type;
1631  *dst = sei_byte;
1632 
1633  sei_type -= sei_byte;
1634  dst++;
1635  remaining_dst_size--;
1636  }
1637 
1638  if (!dst_size)
1639  return AVERROR_BUFFER_TOO_SMALL;
1640 
1641  while (remaining_sei_size && remaining_dst_size != 0) {
1642  int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1643  *dst = size_byte;
1644 
1645  remaining_sei_size -= size_byte;
1646  dst++;
1647  remaining_dst_size--;
1648  }
1649 
1650  if (remaining_dst_size < sei->size)
1651  return AVERROR_BUFFER_TOO_SMALL;
1652 
1653  header_bytes = dst - sei_start;
1654 
1655  offset = header_bytes;
1656  bytes_written = copy_emulation_prev(sei->data,
1657  sei->size,
1658  sei_start,
1659  offset,
1660  dst_size);
1661  if (bytes_written < 0)
1662  return AVERROR_BUFFER_TOO_SMALL;
1663 
1664  bytes_written += header_bytes;
1665  return bytes_written;
1666 }
1667 
1668 /**
1669  * Copies NAL units and replaces length codes with
1670  * H.264 Annex B start codes. On failure, the contents of
1671  * dst_data may have been modified.
1672  *
1673  * @param length_code_size Byte length of each length code
1674  * @param sample_buffer NAL units prefixed with length codes.
1675  * @param sei Optional A53 closed captions SEI data.
1676  * @param dst_data Must be zeroed before calling this function.
1677  * Contains the copied NAL units prefixed with
1678  * start codes when the function returns
1679  * successfully.
1680  * @param dst_size Length of dst_data
1681  * @return 0 on success
1682  * AVERROR_INVALIDDATA if length_code_size is invalid
1683  * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1684  * or if a length_code in src_data specifies data beyond
1685  * the end of its buffer.
1686  */
1688  AVCodecContext *avctx,
1689  size_t length_code_size,
1690  CMSampleBufferRef sample_buffer,
1691  ExtraSEI *sei,
1692  uint8_t *dst_data,
1693  size_t dst_size)
1694 {
1695  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1696  size_t remaining_src_size = src_size;
1697  size_t remaining_dst_size = dst_size;
1698  size_t src_offset = 0;
1699  int wrote_sei = 0;
1700  int status;
1701  uint8_t size_buf[4];
1702  uint8_t nal_type;
1703  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1704 
1705  if (length_code_size > 4) {
1706  return AVERROR_INVALIDDATA;
1707  }
1708 
1709  while (remaining_src_size > 0) {
1710  size_t curr_src_len;
1711  size_t curr_dst_len;
1712  size_t box_len = 0;
1713  size_t i;
1714 
1715  uint8_t *dst_box;
1716 
1717  status = CMBlockBufferCopyDataBytes(block,
1718  src_offset,
1719  length_code_size,
1720  size_buf);
1721  if (status) {
1722  av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1723  return AVERROR_EXTERNAL;
1724  }
1725 
1726  status = CMBlockBufferCopyDataBytes(block,
1727  src_offset + length_code_size,
1728  1,
1729  &nal_type);
1730 
1731  if (status) {
1732  av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1733  return AVERROR_EXTERNAL;
1734  }
1735 
1736  nal_type &= 0x1F;
1737 
1738  for (i = 0; i < length_code_size; i++) {
1739  box_len <<= 8;
1740  box_len |= size_buf[i];
1741  }
1742 
1743  if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1744  //No SEI NAL unit - insert.
1745  int wrote_bytes;
1746 
1747  memcpy(dst_data, start_code, sizeof(start_code));
1748  dst_data += sizeof(start_code);
1749  remaining_dst_size -= sizeof(start_code);
1750 
1751  *dst_data = H264_NAL_SEI;
1752  dst_data++;
1753  remaining_dst_size--;
1754 
1755  wrote_bytes = write_sei(sei,
1757  dst_data,
1758  remaining_dst_size);
1759 
1760  if (wrote_bytes < 0)
1761  return wrote_bytes;
1762 
1763  remaining_dst_size -= wrote_bytes;
1764  dst_data += wrote_bytes;
1765 
1766  if (remaining_dst_size <= 0)
1767  return AVERROR_BUFFER_TOO_SMALL;
1768 
1769  *dst_data = 0x80;
1770 
1771  dst_data++;
1772  remaining_dst_size--;
1773 
1774  wrote_sei = 1;
1775  }
1776 
1777  curr_src_len = box_len + length_code_size;
1778  curr_dst_len = box_len + sizeof(start_code);
1779 
1780  if (remaining_src_size < curr_src_len) {
1781  return AVERROR_BUFFER_TOO_SMALL;
1782  }
1783 
1784  if (remaining_dst_size < curr_dst_len) {
1785  return AVERROR_BUFFER_TOO_SMALL;
1786  }
1787 
1788  dst_box = dst_data + sizeof(start_code);
1789 
1790  memcpy(dst_data, start_code, sizeof(start_code));
1791  status = CMBlockBufferCopyDataBytes(block,
1792  src_offset + length_code_size,
1793  box_len,
1794  dst_box);
1795 
1796  if (status) {
1797  av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1798  return AVERROR_EXTERNAL;
1799  }
1800 
1801  if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1802  //Found SEI NAL unit - append.
1803  int wrote_bytes;
1804  int old_sei_length;
1805  int extra_bytes;
1806  uint8_t *new_sei;
1807  old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1808  if (old_sei_length < 0)
1809  return status;
1810 
1811  wrote_bytes = write_sei(sei,
1813  new_sei,
1814  remaining_dst_size - old_sei_length);
1815  if (wrote_bytes < 0)
1816  return wrote_bytes;
1817 
1818  if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1819  return AVERROR_BUFFER_TOO_SMALL;
1820 
1821  new_sei[wrote_bytes++] = 0x80;
1822  extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1823 
1824  dst_data += extra_bytes;
1825  remaining_dst_size -= extra_bytes;
1826 
1827  wrote_sei = 1;
1828  }
1829 
1830  src_offset += curr_src_len;
1831  dst_data += curr_dst_len;
1832 
1833  remaining_src_size -= curr_src_len;
1834  remaining_dst_size -= curr_dst_len;
1835  }
1836 
1837  return 0;
1838 }
1839 
1840 /**
1841  * Returns a sufficient number of bytes to contain the sei data.
1842  * It may be greater than the minimum required.
1843  */
1844 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1845  int copied_size;
1846  if (sei->size == 0)
1847  return 0;
1848 
1849  copied_size = -copy_emulation_prev(sei->data,
1850  sei->size,
1851  NULL,
1852  0,
1853  0);
1854 
1855  if ((sei->size % 255) == 0) //may result in an extra byte
1856  copied_size++;
1857 
1858  return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1859 }
1860 
1862  AVCodecContext *avctx,
1863  CMSampleBufferRef sample_buffer,
1864  AVPacket *pkt,
1865  ExtraSEI *sei)
1866 {
1867  VTEncContext *vtctx = avctx->priv_data;
1868 
1869  int status;
1870  bool is_key_frame;
1871  bool add_header;
1872  size_t length_code_size;
1873  size_t header_size = 0;
1874  size_t in_buf_size;
1875  size_t out_buf_size;
1876  size_t sei_nalu_size = 0;
1877  int64_t dts_delta;
1878  int64_t time_base_num;
1879  int nalu_count;
1880  CMTime pts;
1881  CMTime dts;
1882  CMVideoFormatDescriptionRef vid_fmt;
1883 
1884 
1885  vtenc_get_frame_info(sample_buffer, &is_key_frame);
1886  status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1887  if (status) return status;
1888 
1889  add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1890 
1891  if (add_header) {
1892  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1893  if (!vid_fmt) {
1894  av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1895  return AVERROR_EXTERNAL;
1896  }
1897 
1898  int status = get_params_size(avctx, vid_fmt, &header_size);
1899  if (status) return status;
1900  }
1901 
1902  status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1903  if(status)
1904  return status;
1905 
1906  if (sei) {
1907  size_t msg_size = get_sei_msg_bytes(sei,
1909 
1910  sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1911  }
1912 
1913  in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1914  out_buf_size = header_size +
1915  in_buf_size +
1916  sei_nalu_size +
1917  nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1918 
1919  status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1920  if (status < 0)
1921  return status;
1922 
1923  if (add_header) {
1924  status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1925  if(status) return status;
1926  }
1927 
1929  avctx,
1930  length_code_size,
1931  sample_buffer,
1932  sei,
1933  pkt->data + header_size,
1934  pkt->size - header_size
1935  );
1936 
1937  if (status) {
1938  av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1939  return status;
1940  }
1941 
1942  if (is_key_frame) {
1944  }
1945 
1946  pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1947  dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
1948 
1949  if (CMTIME_IS_INVALID(dts)) {
1950  if (!vtctx->has_b_frames) {
1951  dts = pts;
1952  } else {
1953  av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1954  return AVERROR_EXTERNAL;
1955  }
1956  }
1957 
1958  dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1959  time_base_num = avctx->time_base.num;
1960  pkt->pts = pts.value / time_base_num;
1961  pkt->dts = dts.value / time_base_num - dts_delta;
1962  pkt->size = out_buf_size;
1963 
1964  return 0;
1965 }
1966 
1967 /*
1968  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1969  * containing all planes if so.
1970  */
1972  AVCodecContext *avctx,
1973  const AVFrame *frame,
1974  int *color,
1975  int *plane_count,
1976  size_t *widths,
1977  size_t *heights,
1978  size_t *strides,
1979  size_t *contiguous_buf_size)
1980 {
1981  VTEncContext *vtctx = avctx->priv_data;
1982  int av_format = frame->format;
1983  int av_color_range = frame->color_range;
1984  int i;
1985  int range_guessed;
1986  int status;
1987 
1988  status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1989  if (status) {
1990  av_log(avctx,
1991  AV_LOG_ERROR,
1992  "Could not get pixel format for color format '%s' range '%s'.\n",
1993  av_get_pix_fmt_name(av_format),
1994  av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1995  av_color_range < AVCOL_RANGE_NB ?
1996  av_color_range_name(av_color_range) :
1997  "Unknown");
1998 
1999  return AVERROR(EINVAL);
2000  }
2001 
2002  if (range_guessed) {
2003  if (!vtctx->warned_color_range) {
2004  vtctx->warned_color_range = true;
2005  av_log(avctx,
2007  "Color range not set for %s. Using MPEG range.\n",
2008  av_get_pix_fmt_name(av_format));
2009  }
2010  }
2011 
2012  switch (av_format) {
2013  case AV_PIX_FMT_NV12:
2014  *plane_count = 2;
2015 
2016  widths [0] = avctx->width;
2017  heights[0] = avctx->height;
2018  strides[0] = frame ? frame->linesize[0] : avctx->width;
2019 
2020  widths [1] = (avctx->width + 1) / 2;
2021  heights[1] = (avctx->height + 1) / 2;
2022  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
2023  break;
2024 
2025  case AV_PIX_FMT_YUV420P:
2026  *plane_count = 3;
2027 
2028  widths [0] = avctx->width;
2029  heights[0] = avctx->height;
2030  strides[0] = frame ? frame->linesize[0] : avctx->width;
2031 
2032  widths [1] = (avctx->width + 1) / 2;
2033  heights[1] = (avctx->height + 1) / 2;
2034  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2035 
2036  widths [2] = (avctx->width + 1) / 2;
2037  heights[2] = (avctx->height + 1) / 2;
2038  strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2039  break;
2040 
2041  case AV_PIX_FMT_P010LE:
2042  *plane_count = 2;
2043  widths[0] = avctx->width;
2044  heights[0] = avctx->height;
2045  strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2046 
2047  widths[1] = (avctx->width + 1) / 2;
2048  heights[1] = (avctx->height + 1) / 2;
2049  strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2050  break;
2051 
2052  default:
2053  av_log(
2054  avctx,
2055  AV_LOG_ERROR,
2056  "Could not get frame format info for color %d range %d.\n",
2057  av_format,
2058  av_color_range);
2059 
2060  return AVERROR(EINVAL);
2061  }
2062 
2063  *contiguous_buf_size = 0;
2064  for (i = 0; i < *plane_count; i++) {
2065  if (i < *plane_count - 1 &&
2066  frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2067  *contiguous_buf_size = 0;
2068  break;
2069  }
2070 
2071  *contiguous_buf_size += strides[i] * heights[i];
2072  }
2073 
2074  return 0;
2075 }
2076 
2077 //Not used on OSX - frame is never copied.
2079  const AVFrame *frame,
2080  CVPixelBufferRef cv_img,
2081  const size_t *plane_strides,
2082  const size_t *plane_rows)
2083 {
2084  int i, j;
2085  size_t plane_count;
2086  int status;
2087  int rows;
2088  int src_stride;
2089  int dst_stride;
2090  uint8_t *src_addr;
2091  uint8_t *dst_addr;
2092  size_t copy_bytes;
2093 
2094  status = CVPixelBufferLockBaseAddress(cv_img, 0);
2095  if (status) {
2096  av_log(
2097  avctx,
2098  AV_LOG_ERROR,
2099  "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2100  status
2101  );
2102  }
2103 
2104  if (CVPixelBufferIsPlanar(cv_img)) {
2105  plane_count = CVPixelBufferGetPlaneCount(cv_img);
2106  for (i = 0; frame->data[i]; i++) {
2107  if (i == plane_count) {
2108  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2109  av_log(avctx,
2110  AV_LOG_ERROR,
2111  "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2112  );
2113 
2114  return AVERROR_EXTERNAL;
2115  }
2116 
2117  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2118  src_addr = (uint8_t*)frame->data[i];
2119  dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2120  src_stride = plane_strides[i];
2121  rows = plane_rows[i];
2122 
2123  if (dst_stride == src_stride) {
2124  memcpy(dst_addr, src_addr, src_stride * rows);
2125  } else {
2126  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2127 
2128  for (j = 0; j < rows; j++) {
2129  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2130  }
2131  }
2132  }
2133  } else {
2134  if (frame->data[1]) {
2135  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2136  av_log(avctx,
2137  AV_LOG_ERROR,
2138  "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2139  );
2140 
2141  return AVERROR_EXTERNAL;
2142  }
2143 
2144  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2145  src_addr = (uint8_t*)frame->data[0];
2146  dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2147  src_stride = plane_strides[0];
2148  rows = plane_rows[0];
2149 
2150  if (dst_stride == src_stride) {
2151  memcpy(dst_addr, src_addr, src_stride * rows);
2152  } else {
2153  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2154 
2155  for (j = 0; j < rows; j++) {
2156  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2157  }
2158  }
2159  }
2160 
2161  status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2162  if (status) {
2163  av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2164  return AVERROR_EXTERNAL;
2165  }
2166 
2167  return 0;
2168 }
2169 
2171  const AVFrame *frame,
2172  CVPixelBufferRef *cv_img)
2173 {
2174  int plane_count;
2175  int color;
2176  size_t widths [AV_NUM_DATA_POINTERS];
2177  size_t heights[AV_NUM_DATA_POINTERS];
2178  size_t strides[AV_NUM_DATA_POINTERS];
2179  int status;
2180  size_t contiguous_buf_size;
2181  CVPixelBufferPoolRef pix_buf_pool;
2182  VTEncContext* vtctx = avctx->priv_data;
2183 
2184  if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2186 
2187  *cv_img = (CVPixelBufferRef)frame->data[3];
2188  av_assert0(*cv_img);
2189 
2190  CFRetain(*cv_img);
2191  return 0;
2192  }
2193 
2194  memset(widths, 0, sizeof(widths));
2195  memset(heights, 0, sizeof(heights));
2196  memset(strides, 0, sizeof(strides));
2197 
2199  avctx,
2200  frame,
2201  &color,
2202  &plane_count,
2203  widths,
2204  heights,
2205  strides,
2206  &contiguous_buf_size
2207  );
2208 
2209  if (status) {
2210  av_log(
2211  avctx,
2212  AV_LOG_ERROR,
2213  "Error: Cannot convert format %d color_range %d: %d\n",
2214  frame->format,
2215  frame->color_range,
2216  status
2217  );
2218 
2219  return AVERROR_EXTERNAL;
2220  }
2221 
2222  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2223  if (!pix_buf_pool) {
2224  /* On iOS, the VT session is invalidated when the APP switches from
2225  * foreground to background and vice versa. Fetch the actual error code
2226  * of the VT session to detect that case and restart the VT session
2227  * accordingly. */
2228  OSStatus vtstatus;
2229 
2230  vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2231  if (vtstatus == kVTInvalidSessionErr) {
2232  CFRelease(vtctx->session);
2233  vtctx->session = NULL;
2235  if (status == 0)
2236  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2237  }
2238  if (!pix_buf_pool) {
2239  av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2240  return AVERROR_EXTERNAL;
2241  }
2242  else
2243  av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2244  "kVTInvalidSessionErr error.\n");
2245  }
2246 
2247  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2248  pix_buf_pool,
2249  cv_img);
2250 
2251 
2252  if (status) {
2253  av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2254  return AVERROR_EXTERNAL;
2255  }
2256 
2257  status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2258  if (status) {
2259  CFRelease(*cv_img);
2260  *cv_img = NULL;
2261  return status;
2262  }
2263 
2264  return 0;
2265 }
2266 
2268  CFDictionaryRef* dict_out)
2269 {
2270  CFDictionaryRef dict = NULL;
2271  if (frame->pict_type == AV_PICTURE_TYPE_I) {
2272  const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2273  const void *vals[] = { kCFBooleanTrue };
2274 
2275  dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2276  if(!dict) return AVERROR(ENOMEM);
2277  }
2278 
2279  *dict_out = dict;
2280  return 0;
2281 }
2282 
2284  VTEncContext *vtctx,
2285  const AVFrame *frame)
2286 {
2287  CMTime time;
2288  CFDictionaryRef frame_dict;
2289  CVPixelBufferRef cv_img = NULL;
2290  AVFrameSideData *side_data = NULL;
2291  ExtraSEI *sei = NULL;
2292  int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2293 
2294  if (status) return status;
2295 
2296  status = create_encoder_dict_h264(frame, &frame_dict);
2297  if (status) {
2298  CFRelease(cv_img);
2299  return status;
2300  }
2301 
2303  if (vtctx->a53_cc && side_data && side_data->size) {
2304  sei = av_mallocz(sizeof(*sei));
2305  if (!sei) {
2306  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2307  } else {
2308  int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2309  if (ret < 0) {
2310  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2311  av_free(sei);
2312  sei = NULL;
2313  }
2314  }
2315  }
2316 
2317  time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2318  status = VTCompressionSessionEncodeFrame(
2319  vtctx->session,
2320  cv_img,
2321  time,
2322  kCMTimeInvalid,
2323  frame_dict,
2324  sei,
2325  NULL
2326  );
2327 
2328  if (frame_dict) CFRelease(frame_dict);
2329  CFRelease(cv_img);
2330 
2331  if (status) {
2332  av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2333  return AVERROR_EXTERNAL;
2334  }
2335 
2336  return 0;
2337 }
2338 
2340  AVCodecContext *avctx,
2341  AVPacket *pkt,
2342  const AVFrame *frame,
2343  int *got_packet)
2344 {
2345  VTEncContext *vtctx = avctx->priv_data;
2346  bool get_frame;
2347  int status;
2348  CMSampleBufferRef buf = NULL;
2349  ExtraSEI *sei = NULL;
2350 
2351  if (frame) {
2352  status = vtenc_send_frame(avctx, vtctx, frame);
2353 
2354  if (status) {
2356  goto end_nopkt;
2357  }
2358 
2359  if (vtctx->frame_ct_in == 0) {
2360  vtctx->first_pts = frame->pts;
2361  } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2362  vtctx->dts_delta = frame->pts - vtctx->first_pts;
2363  }
2364 
2365  vtctx->frame_ct_in++;
2366  } else if(!vtctx->flushing) {
2367  vtctx->flushing = true;
2368 
2369  status = VTCompressionSessionCompleteFrames(vtctx->session,
2370  kCMTimeIndefinite);
2371 
2372  if (status) {
2373  av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2375  goto end_nopkt;
2376  }
2377  }
2378 
2379  *got_packet = 0;
2380  get_frame = vtctx->dts_delta >= 0 || !frame;
2381  if (!get_frame) {
2382  status = 0;
2383  goto end_nopkt;
2384  }
2385 
2386  status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2387  if (status) goto end_nopkt;
2388  if (!buf) goto end_nopkt;
2389 
2390  status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2391  if (sei) {
2392  if (sei->data) av_free(sei->data);
2393  av_free(sei);
2394  }
2395  CFRelease(buf);
2396  if (status) goto end_nopkt;
2397 
2398  *got_packet = 1;
2399  return 0;
2400 
2401 end_nopkt:
2403  return status;
2404 }
2405 
2407  CMVideoCodecType codec_type,
2408  CFStringRef profile_level,
2409  CFNumberRef gamma_level,
2410  CFDictionaryRef enc_info,
2411  CFDictionaryRef pixel_buffer_info)
2412 {
2413  VTEncContext *vtctx = avctx->priv_data;
2414  int status;
2415  CVPixelBufferPoolRef pool = NULL;
2416  CVPixelBufferRef pix_buf = NULL;
2417  CMTime time;
2418  CMSampleBufferRef buf = NULL;
2419 
2420  status = vtenc_create_encoder(avctx,
2421  codec_type,
2422  profile_level,
2423  gamma_level,
2424  enc_info,
2425  pixel_buffer_info,
2426  &vtctx->session);
2427  if (status)
2428  goto pe_cleanup;
2429 
2430  pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2431  if(!pool){
2432  av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2433  goto pe_cleanup;
2434  }
2435 
2436  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2437  pool,
2438  &pix_buf);
2439 
2440  if(status != kCVReturnSuccess){
2441  av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2442  goto pe_cleanup;
2443  }
2444 
2445  time = CMTimeMake(0, avctx->time_base.den);
2446  status = VTCompressionSessionEncodeFrame(vtctx->session,
2447  pix_buf,
2448  time,
2449  kCMTimeInvalid,
2450  NULL,
2451  NULL,
2452  NULL);
2453 
2454  if (status) {
2455  av_log(avctx,
2456  AV_LOG_ERROR,
2457  "Error sending frame for extradata: %d\n",
2458  status);
2459 
2460  goto pe_cleanup;
2461  }
2462 
2463  //Populates extradata - output frames are flushed and param sets are available.
2464  status = VTCompressionSessionCompleteFrames(vtctx->session,
2465  kCMTimeIndefinite);
2466 
2467  if (status)
2468  goto pe_cleanup;
2469 
2470  status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2471  if (status) {
2472  av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2473  goto pe_cleanup;
2474  }
2475 
2476  CFRelease(buf);
2477 
2478 
2479 
2480 pe_cleanup:
2481  if(vtctx->session)
2482  CFRelease(vtctx->session);
2483 
2484  vtctx->session = NULL;
2485  vtctx->frame_ct_out = 0;
2486 
2487  av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2488 
2489  return status;
2490 }
2491 
2493 {
2494  VTEncContext *vtctx = avctx->priv_data;
2495 
2497  pthread_mutex_destroy(&vtctx->lock);
2498 
2499  if(!vtctx->session) return 0;
2500 
2501  VTCompressionSessionCompleteFrames(vtctx->session,
2502  kCMTimeIndefinite);
2503  clear_frame_queue(vtctx);
2504  CFRelease(vtctx->session);
2505  vtctx->session = NULL;
2506 
2507  if (vtctx->color_primaries) {
2508  CFRelease(vtctx->color_primaries);
2509  vtctx->color_primaries = NULL;
2510  }
2511 
2512  if (vtctx->transfer_function) {
2513  CFRelease(vtctx->transfer_function);
2514  vtctx->transfer_function = NULL;
2515  }
2516 
2517  if (vtctx->ycbcr_matrix) {
2518  CFRelease(vtctx->ycbcr_matrix);
2519  vtctx->ycbcr_matrix = NULL;
2520  }
2521 
2522  return 0;
2523 }
2524 
2525 static const enum AVPixelFormat avc_pix_fmts[] = {
2530 };
2531 
2532 static const enum AVPixelFormat hevc_pix_fmts[] = {
2538 };
2539 
2540 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2541 #define COMMON_OPTIONS \
2542  { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2543  { .i64 = 0 }, 0, 1, VE }, \
2544  { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2545  { .i64 = 0 }, 0, 1, VE }, \
2546  { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2547  OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2548  { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2549  OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2550  { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2551  OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2552 
2553 #define OFFSET(x) offsetof(VTEncContext, x)
2554 static const AVOption h264_options[] = {
2555  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2556  { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2557  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2558  { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
2559  { "extended", "Extend Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
2560 
2561  { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2562  { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2563  { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2564  { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2565  { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2566  { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2567  { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2568  { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2569  { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2570  { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2571  { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2572 
2573  { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2574  { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2575  { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2576  { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2577  { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2578 
2579  { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2580 
2582  { NULL },
2583 };
2584 
2586  .class_name = "h264_videotoolbox",
2587  .item_name = av_default_item_name,
2588  .option = h264_options,
2589  .version = LIBAVUTIL_VERSION_INT,
2590 };
2591 
2593  .name = "h264_videotoolbox",
2594  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2595  .type = AVMEDIA_TYPE_VIDEO,
2596  .id = AV_CODEC_ID_H264,
2597  .priv_data_size = sizeof(VTEncContext),
2599  .init = vtenc_init,
2600  .encode2 = vtenc_frame,
2601  .close = vtenc_close,
2602  .capabilities = AV_CODEC_CAP_DELAY,
2603  .priv_class = &h264_videotoolbox_class,
2604  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2606 };
2607 
2608 static const AVOption hevc_options[] = {
2609  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2610  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2611  { "main10", "Main10 Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2612 
2614  { NULL },
2615 };
2616 
2618  .class_name = "hevc_videotoolbox",
2619  .item_name = av_default_item_name,
2620  .option = hevc_options,
2621  .version = LIBAVUTIL_VERSION_INT,
2622 };
2623 
2625  .name = "hevc_videotoolbox",
2626  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2627  .type = AVMEDIA_TYPE_VIDEO,
2628  .id = AV_CODEC_ID_HEVC,
2629  .priv_data_size = sizeof(VTEncContext),
2631  .init = vtenc_init,
2632  .encode2 = vtenc_frame,
2633  .close = vtenc_close,
2634  .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2635  .priv_class = &hevc_videotoolbox_class,
2636  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2638  .wrapper_name = "videotoolbox",
2639 };
get_vt_hevc_profile_level
static bool get_vt_hevc_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
Definition: videotoolboxenc.c:740
pthread_mutex_t
_fmutex pthread_mutex_t
Definition: os2threads.h:53
kVTProfileLevel_H264_Main_5_1
CFStringRef kVTProfileLevel_H264_Main_5_1
Definition: videotoolboxenc.c:71
H264_PROF_EXTENDED
@ H264_PROF_EXTENDED
Definition: videotoolboxenc.c:161
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
AVCodec
AVCodec.
Definition: codec.h:190
kVTCompressionPropertyKey_H264EntropyMode
CFStringRef kVTCompressionPropertyKey_H264EntropyMode
Definition: videotoolboxenc.c:60
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
kVTProfileLevel_H264_Extended_AutoLevel
CFStringRef kVTProfileLevel_H264_Extended_AutoLevel
Definition: videotoolboxenc.c:84
ExtraSEI::size
size_t size
Definition: videotoolboxenc.c:182
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
level
uint8_t level
Definition: svq3.c:210
kVTCompressionPropertyKey_RealTime
CFStringRef kVTCompressionPropertyKey_RealTime
Definition: videotoolboxenc.c:89
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
hevc_pix_fmts
static enum AVPixelFormat hevc_pix_fmts[]
Definition: videotoolboxenc.c:2532
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_frame
static int get_frame(AVFilterContext *ctx, int is_second)
Definition: vf_nnedi.c:689
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:480
color
Definition: vf_paletteuse.c:582
vtenc_populate_extradata
static int vtenc_populate_extradata(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info)
Definition: videotoolboxenc.c:2406
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:739
vtenc_cm_to_avpacket
static int vtenc_cm_to_avpacket(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, AVPacket *pkt, ExtraSEI *sei)
Definition: videotoolboxenc.c:1861
AV_CODEC_CAP_HARDWARE
#define AV_CODEC_CAP_HARDWARE
Codec is backed by a hardware implementation.
Definition: codec.h:150
H264_PROF_AUTO
@ H264_PROF_AUTO
Definition: videotoolboxenc.c:157
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:489
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
copy_avframe_to_pixel_buffer
static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef cv_img, const size_t *plane_strides, const size_t *plane_rows)
Definition: videotoolboxenc.c:2078
vtenc_output_callback
static void vtenc_output_callback(void *ctx, void *sourceFrameCtx, OSStatus status, VTEncodeInfoFlags flags, CMSampleBufferRef sample_buffer)
Definition: videotoolboxenc.c:561
get_vt_h264_profile_level
static bool get_vt_h264_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
Definition: videotoolboxenc.c:635
profile
mfxU16 profile
Definition: qsvenc.c:45
write_sei
static int write_sei(const ExtraSEI *sei, int sei_type, uint8_t *dst, size_t dst_size)
Definition: videotoolboxenc.c:1614
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:178
pixdesc.h
kVTProfileLevel_H264_High_AutoLevel
CFStringRef kVTProfileLevel_H264_High_AutoLevel
Definition: videotoolboxenc.c:82
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
AVOption
AVOption.
Definition: opt.h:246
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:483
kVTProfileLevel_H264_High_4_0
CFStringRef kVTProfileLevel_H264_High_4_0
Definition: videotoolboxenc.c:77
VTEncContext::frames_before
int64_t frames_before
Definition: videotoolboxenc.c:219
VTEncContext::lock
pthread_mutex_t lock
Definition: videotoolboxenc.c:201
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:496
VTEncContext::profile
int64_t profile
Definition: videotoolboxenc.c:215
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:455
HEVC_PROF_MAIN10
@ HEVC_PROF_MAIN10
Definition: videotoolboxenc.c:174
H264_SEI_TYPE_USER_DATA_REGISTERED
@ H264_SEI_TYPE_USER_DATA_REGISTERED
registered user data as specified by Rec. ITU-T T.35
Definition: h264_sei.h:33
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
BufNode::sei
ExtraSEI * sei
Definition: videotoolboxenc.c:187
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:329
copy_replace_length_codes
static int copy_replace_length_codes(AVCodecContext *avctx, size_t length_code_size, CMSampleBufferRef sample_buffer, ExtraSEI *sei, uint8_t *dst_data, size_t dst_size)
Copies NAL units and replaces length codes with H.264 Annex B start codes.
Definition: videotoolboxenc.c:1687
vtenc_create_encoder
static int vtenc_create_encoder(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info, VTCompressionSessionRef *session)
Definition: videotoolboxenc.c:1016
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:515
VTEncContext::level
int64_t level
Definition: videotoolboxenc.c:216
vtenc_get_frame_info
static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
Definition: videotoolboxenc.c:1461
get_cv_pixel_format
static int get_cv_pixel_format(AVCodecContext *avctx, enum AVPixelFormat fmt, enum AVColorRange range, int *av_pixel_format, int *range_guessed)
Definition: videotoolboxenc.c:769
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2942
VTEncContext::frames_after
int64_t frames_after
Definition: videotoolboxenc.c:220
vtenc_close
static av_cold int vtenc_close(AVCodecContext *avctx)
Definition: videotoolboxenc.c:2492
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:536
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:486
add_color_attr
static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict)
Definition: videotoolboxenc.c:799
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
kCVImageBufferYCbCrMatrix_ITU_R_2020
CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020
Definition: videotoolboxenc.c:58
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVERROR_BUFFER_TOO_SMALL
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
Definition: error.h:51
pts
static int64_t pts
Definition: transcode_aac.c:647
VTEncContext::flushing
bool flushing
Definition: videotoolboxenc.c:225
HEVC_PROF_COUNT
@ HEVC_PROF_COUNT
Definition: videotoolboxenc.c:175
create_encoder_dict_h264
static int create_encoder_dict_h264(const AVFrame *frame, CFDictionaryRef *dict_out)
Definition: videotoolboxenc.c:2267
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:485
kVTProfileLevel_HEVC_Main10_AutoLevel
CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel
Definition: videotoolboxenc.c:87
h264_options
static const AVOption h264_options[]
Definition: videotoolboxenc.c:2554
avassert.h
get_params_size
static int get_params_size(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, size_t *size)
Get the parameter sets from a CMSampleBufferRef.
Definition: videotoolboxenc.c:409
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
VTEncContext::dts_delta
int64_t dts_delta
Definition: videotoolboxenc.c:213
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
VTEncContext::first_pts
int64_t first_pts
Definition: videotoolboxenc.c:212
avc_pix_fmts
static enum AVPixelFormat avc_pix_fmts[]
Definition: videotoolboxenc.c:2525
kVTProfileLevel_H264_High_4_2
CFStringRef kVTProfileLevel_H264_High_4_2
Definition: videotoolboxenc.c:79
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
VTEncContext::async_error
int async_error
Definition: videotoolboxenc.c:204
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2608
VT_H264Profile
VT_H264Profile
Definition: videotoolboxenc.c:156
VT_CABAC
@ VT_CABAC
Definition: videotoolboxenc.c:168
get_cm_codec_type
static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
Definition: videotoolboxenc.c:392
H264_NAL_PPS
@ H264_NAL_PPS
Definition: h264.h:42
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:516
VTEncContext::cv_sample_sent
pthread_cond_t cv_sample_sent
Definition: videotoolboxenc.c:202
VTEncContext::transfer_function
CFStringRef transfer_function
Definition: videotoolboxenc.c:198
info
MIPS optimizations info
Definition: mips.txt:2
H264_PROF_MAIN
@ H264_PROF_MAIN
Definition: videotoolboxenc.c:159
loadVTEncSymbols
static void loadVTEncSymbols()
Definition: videotoolboxenc.c:108
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
kCVPixelFormatType_420YpCbCr10BiPlanarFullRange
@ kCVPixelFormatType_420YpCbCr10BiPlanarFullRange
Definition: videotoolboxenc.c:43
CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
Definition: videotoolboxenc.c:94
ctx
AVFormatContext * ctx
Definition: movenc.c:48
HEVC_PROF_AUTO
@ HEVC_PROF_AUTO
Definition: videotoolboxenc.c:172
ff_alloc_a53_sei
int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for A53 side data and allocate and fill SEI message with A53 info.
Definition: utils.c:2241
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1404
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:458
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolboxenc.c:39
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:76
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:462
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
getParameterSetAtIndex
OSStatus(* getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc, size_t parameterSetIndex, const uint8_t **parameterSetPointerOut, size_t *parameterSetSizeOut, size_t *parameterSetCountOut, int *NALUnitHeaderLengthOut)
Definition: videotoolboxenc.c:47
set_extradata
static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
Definition: videotoolboxenc.c:527
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:463
H264_NAL_SEI
@ H264_NAL_SEI
Definition: h264.h:40
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2875
VTEncContext::frame_ct_in
int64_t frame_ct_in
Definition: videotoolboxenc.c:210
kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
Definition: videotoolboxenc.c:92
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
kVTProfileLevel_HEVC_Main_AutoLevel
CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel
Definition: videotoolboxenc.c:86
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
BufNode
Definition: videotoolboxenc.c:185
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
kVTProfileLevel_H264_Baseline_5_2
CFStringRef kVTProfileLevel_H264_Baseline_5_2
Definition: videotoolboxenc.c:68
h264_videotoolbox_class
static const AVClass h264_videotoolbox_class
Definition: videotoolboxenc.c:2585
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:576
ff_hevc_videotoolbox_encoder
AVCodec ff_hevc_videotoolbox_encoder
Definition: videotoolboxenc.c:2624
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:457
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:254
create_cv_pixel_buffer_info
static int create_cv_pixel_buffer_info(AVCodecContext *avctx, CFMutableDictionaryRef *dict)
Definition: videotoolboxenc.c:821
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2894
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:495
pthread_once
static av_always_inline int pthread_once(pthread_once_t *once_control, void(*init_routine)(void))
Definition: os2threads.h:210
H264_PROF_BASELINE
@ H264_PROF_BASELINE
Definition: videotoolboxenc.c:158
VTH264Entropy
VTH264Entropy
Definition: videotoolboxenc.c:165
sei
static int FUNC() sei(CodedBitstreamContext *ctx, RWContext *rw, H264RawSEI *current)
Definition: cbs_h264_syntax_template.c:924
ExtraSEI::data
void * data
Definition: videotoolboxenc.c:181
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:533
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:66
AVCodecID
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:46
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:466
get_cv_transfer_function
static int get_cv_transfer_function(AVCodecContext *avctx, CFStringRef *transfer_fnc, CFNumberRef *gamma_level)
Definition: videotoolboxenc.c:922
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:649
AVCOL_TRC_SMPTE2084
@ AVCOL_TRC_SMPTE2084
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
Definition: pixfmt.h:497
VT_ENTROPY_NOT_SET
@ VT_ENTROPY_NOT_SET
Definition: videotoolboxenc.c:166
AVPacket::size
int size
Definition: packet.h:356
VTEncContext::realtime
int64_t realtime
Definition: videotoolboxenc.c:218
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:721
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:488
H264_NAL_AUD
@ H264_NAL_AUD
Definition: h264.h:43
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
vt_release_num
static void vt_release_num(CFNumberRef *refPtr)
NULL-safe release of *refPtr, and sets value to NULL.
Definition: videotoolboxenc.c:241
kCVImageBufferTransferFunction_ITU_R_2020
CFStringRef kCVImageBufferTransferFunction_ITU_R_2020
Definition: videotoolboxenc.c:57
size
int size
Definition: twinvq_data.h:11134
VTEncContext::allow_sw
int64_t allow_sw
Definition: videotoolboxenc.c:222
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:301
H264_PROF_COUNT
@ H264_PROF_COUNT
Definition: videotoolboxenc.c:162
VTEncContext::frame_ct_out
int64_t frame_ct_out
Definition: videotoolboxenc.c:209
create_cv_pixel_buffer
static int create_cv_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef *cv_img)
Definition: videotoolboxenc.c:2170
VTEncContext::entropy
int64_t entropy
Definition: videotoolboxenc.c:217
kVTProfileLevel_H264_Baseline_4_2
CFStringRef kVTProfileLevel_H264_Baseline_4_2
Definition: videotoolboxenc.c:65
kVTProfileLevel_H264_Main_5_2
CFStringRef kVTProfileLevel_H264_Main_5_2
Definition: videotoolboxenc.c:72
hevc_videotoolbox_class
static const AVClass hevc_videotoolbox_class
Definition: videotoolboxenc.c:2617
ff_h264_videotoolbox_encoder
AVCodec ff_h264_videotoolbox_encoder
Definition: videotoolboxenc.c:2592
kVTProfileLevel_H264_Main_AutoLevel
CFStringRef kVTProfileLevel_H264_Main_AutoLevel
Definition: videotoolboxenc.c:73
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:354
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ExtraSEI
Definition: videotoolboxenc.c:180
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
compat_keys
static struct @163 compat_keys
pthread_cond_destroy
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:482
kVTH264EntropyMode_CABAC
CFStringRef kVTH264EntropyMode_CABAC
Definition: videotoolboxenc.c:62
VTEncContext::get_param_set_func
getParameterSetAtIndex get_param_set_func
Definition: videotoolboxenc.c:199
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
kVTProfileLevel_H264_Baseline_AutoLevel
CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel
Definition: videotoolboxenc.c:69
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:517
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
h264_sei.h
BufNode::error
int error
Definition: videotoolboxenc.c:189
vtenc_q_pop
static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
Definition: videotoolboxenc.c:276
set_async_error
static void set_async_error(VTEncContext *vtctx, int err)
Definition: videotoolboxenc.c:250
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: videotoolboxenc.c:2541
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:520
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
@ kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
Definition: videotoolboxenc.c:44
VTEncContext::a53_cc
bool a53_cc
Definition: videotoolboxenc.c:228
VTEncContext::ycbcr_matrix
CFStringRef ycbcr_matrix
Definition: videotoolboxenc.c:196
VTEncContext::require_sw
int64_t require_sw
Definition: videotoolboxenc.c:223
GET_SYM
#define GET_SYM(symbol, defaultVal)
Definition: videotoolboxenc.c:97
copy_emulation_prev
static int copy_emulation_prev(const uint8_t *src, size_t src_size, uint8_t *dst, ssize_t dst_offset, size_t dst_size)
Copies the data inserting emulation prevention bytes as needed.
Definition: videotoolboxenc.c:1563
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:223
kVTProfileLevel_H264_Baseline_5_1
CFStringRef kVTProfileLevel_H264_Baseline_5_1
Definition: videotoolboxenc.c:67
count_nalus
static int count_nalus(size_t length_code_size, CMSampleBufferRef sample_buffer, int *count)
Definition: videotoolboxenc.c:353
uint8_t
uint8_t
Definition: audio_convert.c:194
is_post_sei_nal_type
static int is_post_sei_nal_type(int nal_type)
Definition: videotoolboxenc.c:1488
BufNode::next
struct BufNode * next
Definition: videotoolboxenc.c:188
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
len
int len
Definition: vorbis_enc_data.h:452
pthread_cond_t
Definition: os2threads.h:58
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:512
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
vtenc_q_push
static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
Definition: videotoolboxenc.c:326
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:534
once_ctrl
static pthread_once_t once_ctrl
Definition: videotoolboxenc.c:106
kVTProfileLevel_H264_Baseline_5_0
CFStringRef kVTProfileLevel_H264_Baseline_5_0
Definition: videotoolboxenc.c:66
avcodec.h
ret
ret
Definition: filter_design.txt:187
VTEncContext
Definition: videotoolboxenc.c:192
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
kVTProfileLevel_H264_Baseline_4_0
CFStringRef kVTProfileLevel_H264_Baseline_4_0
Definition: videotoolboxenc.c:64
clear_frame_queue
static void clear_frame_queue(VTEncContext *vtctx)
Definition: videotoolboxenc.c:271
vtenc_configure_encoder
static int vtenc_configure_encoder(AVCodecContext *avctx)
Definition: videotoolboxenc.c:1321
kVTProfileLevel_H264_High_5_2
CFStringRef kVTProfileLevel_H264_High_5_2
Definition: videotoolboxenc.c:81
VTEncContext::session
VTCompressionSessionRef session
Definition: videotoolboxenc.c:195
HEVC_PROF_MAIN
@ HEVC_PROF_MAIN
Definition: videotoolboxenc.c:173
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AVCodecContext
main external API structure.
Definition: avcodec.h:526
VT_HEVCProfile
VT_HEVCProfile
Definition: videotoolboxenc.c:171
AVCOL_TRC_ARIB_STD_B67
@ AVCOL_TRC_ARIB_STD_B67
ARIB STD-B67, known as "Hybrid log-gamma".
Definition: pixfmt.h:501
kCVImageBufferColorPrimaries_ITU_R_2020
CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020
Definition: videotoolboxenc.c:56
kVTH264EntropyMode_CAVLC
CFStringRef kVTH264EntropyMode_CAVLC
Definition: videotoolboxenc.c:61
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
kVTProfileLevel_H264_High_3_1
CFStringRef kVTProfileLevel_H264_High_3_1
Definition: videotoolboxenc.c:75
kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
Definition: videotoolboxenc.c:91
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
get_cv_pixel_info
static int get_cv_pixel_info(AVCodecContext *avctx, const AVFrame *frame, int *color, int *plane_count, size_t *widths, size_t *heights, size_t *strides, size_t *contiguous_buf_size)
Definition: videotoolboxenc.c:1971
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
pthread_once_t
Definition: os2threads.h:66
VTEncContext::has_b_frames
bool has_b_frames
Definition: videotoolboxenc.c:226
copy_param_sets
static int copy_param_sets(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, uint8_t *dst, size_t dst_size)
Definition: videotoolboxenc.c:463
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
get_cv_ycbcr_matrix
static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix)
Definition: videotoolboxenc.c:985
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
OFFSET
#define OFFSET(x)
Definition: videotoolboxenc.c:2553
kVTProfileLevel_H264_Extended_5_0
CFStringRef kVTProfileLevel_H264_Extended_5_0
Definition: videotoolboxenc.c:83
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:786
kVTProfileLevel_H264_High_5_1
CFStringRef kVTProfileLevel_H264_High_5_1
Definition: videotoolboxenc.c:80
vtenc_send_frame
static int vtenc_send_frame(AVCodecContext *avctx, VTEncContext *vtctx, const AVFrame *frame)
Definition: videotoolboxenc.c:2283
VE
#define VE
Definition: videotoolboxenc.c:2540
kVTProfileLevel_H264_High_4_1
CFStringRef kVTProfileLevel_H264_High_4_1
Definition: videotoolboxenc.c:78
AV_PIX_FMT_P010LE
@ AV_PIX_FMT_P010LE
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits,...
Definition: pixfmt.h:284
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:206
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
get_sei_msg_bytes
static int get_sei_msg_bytes(const ExtraSEI *sei, int type)
Returns a sufficient number of bytes to contain the sei data.
Definition: videotoolboxenc.c:1844
vtenc_frame
static av_cold int vtenc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: videotoolboxenc.c:2339
AVFrameSideData::size
int size
Definition: frame.h:209
kVTProfileLevel_H264_High_3_2
CFStringRef kVTProfileLevel_H264_High_3_2
Definition: videotoolboxenc.c:76
kVTProfileLevel_H264_High_3_0
CFStringRef kVTProfileLevel_H264_High_3_0
Definition: videotoolboxenc.c:74
find_sei_end
static int find_sei_end(AVCodecContext *avctx, uint8_t *nal_data, size_t nal_size, uint8_t **sei_end)
Definition: videotoolboxenc.c:1499
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
VTEncContext::q_tail
BufNode * q_tail
Definition: videotoolboxenc.c:207
h264.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
VTEncContext::codec_id
enum AVCodecID codec_id
Definition: videotoolboxenc.c:194
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
pthread_cond_init
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
VTEncContext::q_head
BufNode * q_head
Definition: videotoolboxenc.c:206
avstring.h
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:511
H264_PROF_HIGH
@ H264_PROF_HIGH
Definition: videotoolboxenc.c:160
AVColorRange
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:532
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
int
int
Definition: ffmpeg_filter.c:192
kVTProfileLevel_H264_Main_4_2
CFStringRef kVTProfileLevel_H264_Main_4_2
Definition: videotoolboxenc.c:70
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
VT_CAVLC
@ VT_CAVLC
Definition: videotoolboxenc.c:167
PTHREAD_ONCE_INIT
#define PTHREAD_ONCE_INIT
Definition: os2threads.h:71
VTEncContext::color_primaries
CFStringRef color_primaries
Definition: videotoolboxenc.c:197
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2918
BufNode::cm_buffer
CMSampleBufferRef cm_buffer
Definition: videotoolboxenc.c:186
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:905
VTEncContext::warned_color_range
bool warned_color_range
Definition: videotoolboxenc.c:227
get_length_code_size
static int get_length_code_size(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, size_t *size)
Definition: videotoolboxenc.c:598
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465
vtenc_init
static av_cold int vtenc_init(AVCodecContext *avctx)
Definition: videotoolboxenc.c:1431
get_cv_color_primaries
static int get_cv_color_primaries(AVCodecContext *avctx, CFStringRef *primaries)
Definition: videotoolboxenc.c:888
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:62