FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
videotoolboxenc.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "h264.h"
35 #include "h264_sei.h"
36 #include <dlfcn.h>
37 
38 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
39 enum { kCMVideoCodecType_HEVC = 'hvc1' };
40 #endif
41 
42 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
43  size_t parameterSetIndex,
44  const uint8_t **parameterSetPointerOut,
45  size_t *parameterSetSizeOut,
46  size_t *parameterSetCountOut,
47  int *NALUnitHeaderLengthOut);
48 
49 //These symbols may not be present
50 static struct{
54 
58 
78 
81 
83 
86 
88 } compat_keys;
89 
90 #define GET_SYM(symbol, defaultVal) \
91 do{ \
92  CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol); \
93  if(!handle) \
94  compat_keys.symbol = CFSTR(defaultVal); \
95  else \
96  compat_keys.symbol = *handle; \
97 }while(0)
98 
100 
101 static void loadVTEncSymbols(){
102  compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
103  (getParameterSetAtIndex)dlsym(
104  RTLD_DEFAULT,
105  "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
106  );
107 
111 
115 
116  GET_SYM(kVTProfileLevel_H264_Baseline_4_0, "H264_Baseline_4_0");
117  GET_SYM(kVTProfileLevel_H264_Baseline_4_2, "H264_Baseline_4_2");
118  GET_SYM(kVTProfileLevel_H264_Baseline_5_0, "H264_Baseline_5_0");
119  GET_SYM(kVTProfileLevel_H264_Baseline_5_1, "H264_Baseline_5_1");
120  GET_SYM(kVTProfileLevel_H264_Baseline_5_2, "H264_Baseline_5_2");
121  GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
122  GET_SYM(kVTProfileLevel_H264_Main_4_2, "H264_Main_4_2");
123  GET_SYM(kVTProfileLevel_H264_Main_5_1, "H264_Main_5_1");
124  GET_SYM(kVTProfileLevel_H264_Main_5_2, "H264_Main_5_2");
125  GET_SYM(kVTProfileLevel_H264_Main_AutoLevel, "H264_Main_AutoLevel");
126  GET_SYM(kVTProfileLevel_H264_High_3_0, "H264_High_3_0");
127  GET_SYM(kVTProfileLevel_H264_High_3_1, "H264_High_3_1");
128  GET_SYM(kVTProfileLevel_H264_High_3_2, "H264_High_3_2");
129  GET_SYM(kVTProfileLevel_H264_High_4_0, "H264_High_4_0");
130  GET_SYM(kVTProfileLevel_H264_High_4_1, "H264_High_4_1");
131  GET_SYM(kVTProfileLevel_H264_High_4_2, "H264_High_4_2");
132  GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
133  GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
134  GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
135 
136  GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel, "HEVC_Main_AutoLevel");
137  GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel, "HEVC_Main10_AutoLevel");
138 
140 
142  "EnableHardwareAcceleratedVideoEncoder");
144  "RequireHardwareAcceleratedVideoEncoder");
145 }
146 
147 typedef enum VT_H264Profile {
154 
155 typedef enum VTH264Entropy{
159 } VTH264Entropy;
160 
161 typedef enum VT_HEVCProfile {
167 
168 static const uint8_t start_code[] = { 0, 0, 0, 1 };
169 
170 typedef struct ExtraSEI {
171  void *data;
172  size_t size;
173 } ExtraSEI;
174 
175 typedef struct BufNode {
176  CMSampleBufferRef cm_buffer;
178  struct BufNode* next;
179  int error;
180 } BufNode;
181 
182 typedef struct VTEncContext {
183  AVClass *class;
185  VTCompressionSessionRef session;
186  CFStringRef ycbcr_matrix;
187  CFStringRef color_primaries;
188  CFStringRef transfer_function;
190 
193 
195 
198 
199  int64_t frame_ct_out;
200  int64_t frame_ct_in;
201 
202  int64_t first_pts;
203  int64_t dts_delta;
204 
205  int64_t profile;
206  int64_t level;
207  int64_t entropy;
208  int64_t realtime;
209  int64_t frames_before;
210  int64_t frames_after;
211 
212  int64_t allow_sw;
213 
214  bool flushing;
217  bool a53_cc;
218 } VTEncContext;
219 
220 static int vtenc_populate_extradata(AVCodecContext *avctx,
221  CMVideoCodecType codec_type,
222  CFStringRef profile_level,
223  CFNumberRef gamma_level,
224  CFDictionaryRef enc_info,
225  CFDictionaryRef pixel_buffer_info);
226 
227 /**
228  * NULL-safe release of *refPtr, and sets value to NULL.
229  */
230 static void vt_release_num(CFNumberRef* refPtr){
231  if (!*refPtr) {
232  return;
233  }
234 
235  CFRelease(*refPtr);
236  *refPtr = NULL;
237 }
238 
239 static void set_async_error(VTEncContext *vtctx, int err)
240 {
241  BufNode *info;
242 
243  pthread_mutex_lock(&vtctx->lock);
244 
245  vtctx->async_error = err;
246 
247  info = vtctx->q_head;
248  vtctx->q_head = vtctx->q_tail = NULL;
249 
250  while (info) {
251  BufNode *next = info->next;
252  CFRelease(info->cm_buffer);
253  av_free(info);
254  info = next;
255  }
256 
257  pthread_mutex_unlock(&vtctx->lock);
258 }
259 
260 static void clear_frame_queue(VTEncContext *vtctx)
261 {
262  set_async_error(vtctx, 0);
263 }
264 
265 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
266 {
267  BufNode *info;
268 
269  pthread_mutex_lock(&vtctx->lock);
270 
271  if (vtctx->async_error) {
272  pthread_mutex_unlock(&vtctx->lock);
273  return vtctx->async_error;
274  }
275 
276  if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
277  *buf = NULL;
278 
279  pthread_mutex_unlock(&vtctx->lock);
280  return 0;
281  }
282 
283  while (!vtctx->q_head && !vtctx->async_error && wait) {
284  pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
285  }
286 
287  if (!vtctx->q_head) {
288  pthread_mutex_unlock(&vtctx->lock);
289  *buf = NULL;
290  return 0;
291  }
292 
293  info = vtctx->q_head;
294  vtctx->q_head = vtctx->q_head->next;
295  if (!vtctx->q_head) {
296  vtctx->q_tail = NULL;
297  }
298 
299  pthread_mutex_unlock(&vtctx->lock);
300 
301  *buf = info->cm_buffer;
302  if (sei && *buf) {
303  *sei = info->sei;
304  } else if (info->sei) {
305  if (info->sei->data) av_free(info->sei->data);
306  av_free(info->sei);
307  }
308  av_free(info);
309 
310  vtctx->frame_ct_out++;
311 
312  return 0;
313 }
314 
315 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
316 {
317  BufNode *info = av_malloc(sizeof(BufNode));
318  if (!info) {
319  set_async_error(vtctx, AVERROR(ENOMEM));
320  return;
321  }
322 
323  CFRetain(buffer);
324  info->cm_buffer = buffer;
325  info->sei = sei;
326  info->next = NULL;
327 
328  pthread_mutex_lock(&vtctx->lock);
330 
331  if (!vtctx->q_head) {
332  vtctx->q_head = info;
333  } else {
334  vtctx->q_tail->next = info;
335  }
336 
337  vtctx->q_tail = info;
338 
339  pthread_mutex_unlock(&vtctx->lock);
340 }
341 
342 static int count_nalus(size_t length_code_size,
343  CMSampleBufferRef sample_buffer,
344  int *count)
345 {
346  size_t offset = 0;
347  int status;
348  int nalu_ct = 0;
349  uint8_t size_buf[4];
350  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
351  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
352 
353  if (length_code_size > 4)
354  return AVERROR_INVALIDDATA;
355 
356  while (offset < src_size) {
357  size_t curr_src_len;
358  size_t box_len = 0;
359  size_t i;
360 
361  status = CMBlockBufferCopyDataBytes(block,
362  offset,
363  length_code_size,
364  size_buf);
365 
366  for (i = 0; i < length_code_size; i++) {
367  box_len <<= 8;
368  box_len |= size_buf[i];
369  }
370 
371  curr_src_len = box_len + length_code_size;
372  offset += curr_src_len;
373 
374  nalu_ct++;
375  }
376 
377  *count = nalu_ct;
378  return 0;
379 }
380 
381 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
382 {
383  switch (id) {
384  case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
386  default: return 0;
387  }
388 }
389 
390 /**
391  * Get the parameter sets from a CMSampleBufferRef.
392  * @param dst If *dst isn't NULL, the parameters are copied into existing
393  * memory. *dst_size must be set accordingly when *dst != NULL.
394  * If *dst is NULL, it will be allocated.
395  * In all cases, *dst_size is set to the number of bytes used starting
396  * at *dst.
397  */
398 static int get_params_size(
399  AVCodecContext *avctx,
400  CMVideoFormatDescriptionRef vid_fmt,
401  size_t *size)
402 {
403  VTEncContext *vtctx = avctx->priv_data;
404  size_t total_size = 0;
405  size_t ps_count;
406  int is_count_bad = 0;
407  size_t i;
408  int status;
409  status = vtctx->get_param_set_func(vid_fmt,
410  0,
411  NULL,
412  NULL,
413  &ps_count,
414  NULL);
415  if (status) {
416  is_count_bad = 1;
417  ps_count = 0;
418  status = 0;
419  }
420 
421  for (i = 0; i < ps_count || is_count_bad; i++) {
422  const uint8_t *ps;
423  size_t ps_size;
424  status = vtctx->get_param_set_func(vid_fmt,
425  i,
426  &ps,
427  &ps_size,
428  NULL,
429  NULL);
430  if (status) {
431  /*
432  * When ps_count is invalid, status != 0 ends the loop normally
433  * unless we didn't get any parameter sets.
434  */
435  if (i > 0 && is_count_bad) status = 0;
436 
437  break;
438  }
439 
440  total_size += ps_size + sizeof(start_code);
441  }
442 
443  if (status) {
444  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
445  return AVERROR_EXTERNAL;
446  }
447 
448  *size = total_size;
449  return 0;
450 }
451 
452 static int copy_param_sets(
453  AVCodecContext *avctx,
454  CMVideoFormatDescriptionRef vid_fmt,
455  uint8_t *dst,
456  size_t dst_size)
457 {
458  VTEncContext *vtctx = avctx->priv_data;
459  size_t ps_count;
460  int is_count_bad = 0;
461  int status;
462  size_t offset = 0;
463  size_t i;
464 
465  status = vtctx->get_param_set_func(vid_fmt,
466  0,
467  NULL,
468  NULL,
469  &ps_count,
470  NULL);
471  if (status) {
472  is_count_bad = 1;
473  ps_count = 0;
474  status = 0;
475  }
476 
477 
478  for (i = 0; i < ps_count || is_count_bad; i++) {
479  const uint8_t *ps;
480  size_t ps_size;
481  size_t next_offset;
482 
483  status = vtctx->get_param_set_func(vid_fmt,
484  i,
485  &ps,
486  &ps_size,
487  NULL,
488  NULL);
489  if (status) {
490  if (i > 0 && is_count_bad) status = 0;
491 
492  break;
493  }
494 
495  next_offset = offset + sizeof(start_code) + ps_size;
496  if (dst_size < next_offset) {
497  av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
499  }
500 
501  memcpy(dst + offset, start_code, sizeof(start_code));
502  offset += sizeof(start_code);
503 
504  memcpy(dst + offset, ps, ps_size);
505  offset = next_offset;
506  }
507 
508  if (status) {
509  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
510  return AVERROR_EXTERNAL;
511  }
512 
513  return 0;
514 }
515 
516 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
517 {
518  CMVideoFormatDescriptionRef vid_fmt;
519  size_t total_size;
520  int status;
521 
522  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
523  if (!vid_fmt) {
524  av_log(avctx, AV_LOG_ERROR, "No video format.\n");
525  return AVERROR_EXTERNAL;
526  }
527 
528  status = get_params_size(avctx, vid_fmt, &total_size);
529  if (status) {
530  av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
531  return status;
532  }
533 
534  avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
535  if (!avctx->extradata) {
536  return AVERROR(ENOMEM);
537  }
538  avctx->extradata_size = total_size;
539 
540  status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
541 
542  if (status) {
543  av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
544  return status;
545  }
546 
547  return 0;
548 }
549 
551  void *ctx,
552  void *sourceFrameCtx,
553  OSStatus status,
554  VTEncodeInfoFlags flags,
555  CMSampleBufferRef sample_buffer)
556 {
557  AVCodecContext *avctx = ctx;
558  VTEncContext *vtctx = avctx->priv_data;
559  ExtraSEI *sei = sourceFrameCtx;
560 
561  if (vtctx->async_error) {
562  if(sample_buffer) CFRelease(sample_buffer);
563  return;
564  }
565 
566  if (status || !sample_buffer) {
567  av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
569  return;
570  }
571 
572  if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
573  int set_status = set_extradata(avctx, sample_buffer);
574  if (set_status) {
575  set_async_error(vtctx, set_status);
576  return;
577  }
578  }
579 
580  vtenc_q_push(vtctx, sample_buffer, sei);
581 }
582 
584  AVCodecContext *avctx,
585  CMSampleBufferRef sample_buffer,
586  size_t *size)
587 {
588  VTEncContext *vtctx = avctx->priv_data;
589  CMVideoFormatDescriptionRef vid_fmt;
590  int isize;
591  int status;
592 
593  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
594  if (!vid_fmt) {
595  av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
596  return AVERROR_EXTERNAL;
597  }
598 
599  status = vtctx->get_param_set_func(vid_fmt,
600  0,
601  NULL,
602  NULL,
603  NULL,
604  &isize);
605  if (status) {
606  av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
607  return AVERROR_EXTERNAL;
608  }
609 
610  *size = isize;
611  return 0;
612 }
613 
614 /*
615  * Returns true on success.
616  *
617  * If profile_level_val is NULL and this method returns true, don't specify the
618  * profile/level to the encoder.
619  */
621  CFStringRef *profile_level_val)
622 {
623  VTEncContext *vtctx = avctx->priv_data;
624  int64_t profile = vtctx->profile;
625 
626  if (profile == H264_PROF_AUTO && vtctx->level) {
627  //Need to pick a profile if level is not auto-selected.
628  profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
629  }
630 
631  *profile_level_val = NULL;
632 
633  switch (profile) {
634  case H264_PROF_AUTO:
635  return true;
636 
637  case H264_PROF_BASELINE:
638  switch (vtctx->level) {
639  case 0: *profile_level_val =
640  compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
641  case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
642  case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
643  case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
644  case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
645  case 40: *profile_level_val =
646  compat_keys.kVTProfileLevel_H264_Baseline_4_0; break;
647  case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
648  case 42: *profile_level_val =
649  compat_keys.kVTProfileLevel_H264_Baseline_4_2; break;
650  case 50: *profile_level_val =
651  compat_keys.kVTProfileLevel_H264_Baseline_5_0; break;
652  case 51: *profile_level_val =
653  compat_keys.kVTProfileLevel_H264_Baseline_5_1; break;
654  case 52: *profile_level_val =
655  compat_keys.kVTProfileLevel_H264_Baseline_5_2; break;
656  }
657  break;
658 
659  case H264_PROF_MAIN:
660  switch (vtctx->level) {
661  case 0: *profile_level_val =
662  compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
663  case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
664  case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
665  case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
666  case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
667  case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
668  case 42: *profile_level_val =
669  compat_keys.kVTProfileLevel_H264_Main_4_2; break;
670  case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
671  case 51: *profile_level_val =
672  compat_keys.kVTProfileLevel_H264_Main_5_1; break;
673  case 52: *profile_level_val =
674  compat_keys.kVTProfileLevel_H264_Main_5_2; break;
675  }
676  break;
677 
678  case H264_PROF_HIGH:
679  switch (vtctx->level) {
680  case 0: *profile_level_val =
681  compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
682  case 30: *profile_level_val =
683  compat_keys.kVTProfileLevel_H264_High_3_0; break;
684  case 31: *profile_level_val =
685  compat_keys.kVTProfileLevel_H264_High_3_1; break;
686  case 32: *profile_level_val =
687  compat_keys.kVTProfileLevel_H264_High_3_2; break;
688  case 40: *profile_level_val =
689  compat_keys.kVTProfileLevel_H264_High_4_0; break;
690  case 41: *profile_level_val =
691  compat_keys.kVTProfileLevel_H264_High_4_1; break;
692  case 42: *profile_level_val =
693  compat_keys.kVTProfileLevel_H264_High_4_2; break;
694  case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
695  case 51: *profile_level_val =
696  compat_keys.kVTProfileLevel_H264_High_5_1; break;
697  case 52: *profile_level_val =
698  compat_keys.kVTProfileLevel_H264_High_5_2; break;
699  }
700  break;
701  }
702 
703  if (!*profile_level_val) {
704  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
705  return false;
706  }
707 
708  return true;
709 }
710 
711 /*
712  * Returns true on success.
713  *
714  * If profile_level_val is NULL and this method returns true, don't specify the
715  * profile/level to the encoder.
716  */
718  CFStringRef *profile_level_val)
719 {
720  VTEncContext *vtctx = avctx->priv_data;
721  int64_t profile = vtctx->profile;
722 
723  *profile_level_val = NULL;
724 
725  switch (profile) {
726  case HEVC_PROF_AUTO:
727  return true;
728  case HEVC_PROF_MAIN:
729  *profile_level_val =
730  compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
731  break;
732  case HEVC_PROF_MAIN10:
733  *profile_level_val =
734  compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
735  break;
736  }
737 
738  if (!*profile_level_val) {
739  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
740  return false;
741  }
742 
743  return true;
744 }
745 
747  enum AVPixelFormat fmt,
748  enum AVColorRange range,
749  int* av_pixel_format,
750  int* range_guessed)
751 {
752  if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
753  range != AVCOL_RANGE_JPEG;
754 
755  //MPEG range is used when no range is set
756  if (fmt == AV_PIX_FMT_NV12) {
757  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
758  kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
759  kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
760  } else if (fmt == AV_PIX_FMT_YUV420P) {
761  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
762  kCVPixelFormatType_420YpCbCr8PlanarFullRange :
763  kCVPixelFormatType_420YpCbCr8Planar;
764  } else {
765  return AVERROR(EINVAL);
766  }
767 
768  return 0;
769 }
770 
771 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
772  VTEncContext *vtctx = avctx->priv_data;
773 
774  if (vtctx->color_primaries) {
775  CFDictionarySetValue(dict,
776  kCVImageBufferColorPrimariesKey,
777  vtctx->color_primaries);
778  }
779 
780  if (vtctx->transfer_function) {
781  CFDictionarySetValue(dict,
782  kCVImageBufferTransferFunctionKey,
783  vtctx->transfer_function);
784  }
785 
786  if (vtctx->ycbcr_matrix) {
787  CFDictionarySetValue(dict,
788  kCVImageBufferYCbCrMatrixKey,
789  vtctx->ycbcr_matrix);
790  }
791 }
792 
794  CFMutableDictionaryRef* dict)
795 {
796  CFNumberRef cv_color_format_num = NULL;
797  CFNumberRef width_num = NULL;
798  CFNumberRef height_num = NULL;
799  CFMutableDictionaryRef pixel_buffer_info = NULL;
800  int cv_color_format;
801  int status = get_cv_pixel_format(avctx,
802  avctx->pix_fmt,
803  avctx->color_range,
804  &cv_color_format,
805  NULL);
806  if (status) return status;
807 
808  pixel_buffer_info = CFDictionaryCreateMutable(
809  kCFAllocatorDefault,
810  20,
811  &kCFCopyStringDictionaryKeyCallBacks,
812  &kCFTypeDictionaryValueCallBacks);
813 
814  if (!pixel_buffer_info) goto pbinfo_nomem;
815 
816  cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
817  kCFNumberSInt32Type,
818  &cv_color_format);
819  if (!cv_color_format_num) goto pbinfo_nomem;
820 
821  CFDictionarySetValue(pixel_buffer_info,
822  kCVPixelBufferPixelFormatTypeKey,
823  cv_color_format_num);
824  vt_release_num(&cv_color_format_num);
825 
826  width_num = CFNumberCreate(kCFAllocatorDefault,
827  kCFNumberSInt32Type,
828  &avctx->width);
829  if (!width_num) return AVERROR(ENOMEM);
830 
831  CFDictionarySetValue(pixel_buffer_info,
832  kCVPixelBufferWidthKey,
833  width_num);
834  vt_release_num(&width_num);
835 
836  height_num = CFNumberCreate(kCFAllocatorDefault,
837  kCFNumberSInt32Type,
838  &avctx->height);
839  if (!height_num) goto pbinfo_nomem;
840 
841  CFDictionarySetValue(pixel_buffer_info,
842  kCVPixelBufferHeightKey,
843  height_num);
844  vt_release_num(&height_num);
845 
846  add_color_attr(avctx, pixel_buffer_info);
847 
848  *dict = pixel_buffer_info;
849  return 0;
850 
851 pbinfo_nomem:
852  vt_release_num(&cv_color_format_num);
853  vt_release_num(&width_num);
854  vt_release_num(&height_num);
855  if (pixel_buffer_info) CFRelease(pixel_buffer_info);
856 
857  return AVERROR(ENOMEM);
858 }
859 
861  CFStringRef *primaries)
862 {
863  enum AVColorPrimaries pri = avctx->color_primaries;
864  switch (pri) {
866  *primaries = NULL;
867  break;
868 
869  case AVCOL_PRI_BT709:
870  *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
871  break;
872 
873  case AVCOL_PRI_BT2020:
874  *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
875  break;
876 
877  default:
878  av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
879  *primaries = NULL;
880  return -1;
881  }
882 
883  return 0;
884 }
885 
887  CFStringRef *transfer_fnc,
888  CFNumberRef *gamma_level)
889 {
890  enum AVColorTransferCharacteristic trc = avctx->color_trc;
891  Float32 gamma;
892  *gamma_level = NULL;
893 
894  switch (trc) {
896  *transfer_fnc = NULL;
897  break;
898 
899  case AVCOL_TRC_BT709:
900  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
901  break;
902 
903  case AVCOL_TRC_SMPTE240M:
904  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
905  break;
906 
907  case AVCOL_TRC_GAMMA22:
908  gamma = 2.2;
909  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
910  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
911  break;
912 
913  case AVCOL_TRC_GAMMA28:
914  gamma = 2.8;
915  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
916  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
917  break;
918 
919  case AVCOL_TRC_BT2020_10:
920  case AVCOL_TRC_BT2020_12:
921  *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
922  break;
923 
924  default:
925  av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
926  return -1;
927  }
928 
929  return 0;
930 }
931 
932 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
933  switch(avctx->colorspace) {
934  case AVCOL_SPC_BT709:
935  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
936  break;
937 
939  *matrix = NULL;
940  break;
941 
942  case AVCOL_SPC_BT470BG:
943  case AVCOL_SPC_SMPTE170M:
944  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
945  break;
946 
947  case AVCOL_SPC_SMPTE240M:
948  *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
949  break;
950 
952  *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
953  break;
954 
955  default:
956  av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
957  return -1;
958  }
959 
960  return 0;
961 }
962 
964  CMVideoCodecType codec_type,
965  CFStringRef profile_level,
966  CFNumberRef gamma_level,
967  CFDictionaryRef enc_info,
968  CFDictionaryRef pixel_buffer_info,
969  VTCompressionSessionRef *session)
970 {
971  VTEncContext *vtctx = avctx->priv_data;
972  SInt32 bit_rate = avctx->bit_rate;
973  SInt32 max_rate = avctx->rc_max_rate;
974  CFNumberRef bit_rate_num;
975  CFNumberRef bytes_per_second;
976  CFNumberRef one_second;
977  CFArrayRef data_rate_limits;
978  int64_t bytes_per_second_value = 0;
979  int64_t one_second_value = 0;
980  void *nums[2];
981 
982  int status = VTCompressionSessionCreate(kCFAllocatorDefault,
983  avctx->width,
984  avctx->height,
985  codec_type,
986  enc_info,
987  pixel_buffer_info,
988  kCFAllocatorDefault,
990  avctx,
991  session);
992 
993  if (status || !vtctx->session) {
994  av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
995 
996 #if !TARGET_OS_IPHONE
997  if (!vtctx->allow_sw) {
998  av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
999  }
1000 #endif
1001 
1002  return AVERROR_EXTERNAL;
1003  }
1004 
1005  bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1006  kCFNumberSInt32Type,
1007  &bit_rate);
1008  if (!bit_rate_num) return AVERROR(ENOMEM);
1009 
1010  status = VTSessionSetProperty(vtctx->session,
1011  kVTCompressionPropertyKey_AverageBitRate,
1012  bit_rate_num);
1013  CFRelease(bit_rate_num);
1014 
1015  if (status) {
1016  av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1017  return AVERROR_EXTERNAL;
1018  }
1019 
1020  if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1021  // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1022  bytes_per_second_value = max_rate >> 3;
1023  bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1024  kCFNumberSInt64Type,
1025  &bytes_per_second_value);
1026  if (!bytes_per_second) {
1027  return AVERROR(ENOMEM);
1028  }
1029  one_second_value = 1;
1030  one_second = CFNumberCreate(kCFAllocatorDefault,
1031  kCFNumberSInt64Type,
1032  &one_second_value);
1033  if (!one_second) {
1034  CFRelease(bytes_per_second);
1035  return AVERROR(ENOMEM);
1036  }
1037  nums[0] = (void *)bytes_per_second;
1038  nums[1] = (void *)one_second;
1039  data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1040  (const void **)nums,
1041  2,
1042  &kCFTypeArrayCallBacks);
1043 
1044  if (!data_rate_limits) {
1045  CFRelease(bytes_per_second);
1046  CFRelease(one_second);
1047  return AVERROR(ENOMEM);
1048  }
1049  status = VTSessionSetProperty(vtctx->session,
1050  kVTCompressionPropertyKey_DataRateLimits,
1051  data_rate_limits);
1052 
1053  CFRelease(bytes_per_second);
1054  CFRelease(one_second);
1055  CFRelease(data_rate_limits);
1056 
1057  if (status) {
1058  av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1059  return AVERROR_EXTERNAL;
1060  }
1061  }
1062 
1063  if (vtctx->codec_id == AV_CODEC_ID_H264) {
1064  // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
1065  if (profile_level) {
1066  status = VTSessionSetProperty(vtctx->session,
1067  kVTCompressionPropertyKey_ProfileLevel,
1068  profile_level);
1069  if (status) {
1070  av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
1071  }
1072  }
1073  }
1074 
1075  if (avctx->gop_size > 0) {
1076  CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1077  kCFNumberIntType,
1078  &avctx->gop_size);
1079  if (!interval) {
1080  return AVERROR(ENOMEM);
1081  }
1082 
1083  status = VTSessionSetProperty(vtctx->session,
1084  kVTCompressionPropertyKey_MaxKeyFrameInterval,
1085  interval);
1086  CFRelease(interval);
1087 
1088  if (status) {
1089  av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1090  return AVERROR_EXTERNAL;
1091  }
1092  }
1093 
1094  if (vtctx->frames_before) {
1095  status = VTSessionSetProperty(vtctx->session,
1096  kVTCompressionPropertyKey_MoreFramesBeforeStart,
1097  kCFBooleanTrue);
1098 
1099  if (status == kVTPropertyNotSupportedErr) {
1100  av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1101  } else if (status) {
1102  av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1103  }
1104  }
1105 
1106  if (vtctx->frames_after) {
1107  status = VTSessionSetProperty(vtctx->session,
1108  kVTCompressionPropertyKey_MoreFramesAfterEnd,
1109  kCFBooleanTrue);
1110 
1111  if (status == kVTPropertyNotSupportedErr) {
1112  av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1113  } else if (status) {
1114  av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1115  }
1116  }
1117 
1118  if (avctx->sample_aspect_ratio.num != 0) {
1119  CFNumberRef num;
1120  CFNumberRef den;
1121  CFMutableDictionaryRef par;
1122  AVRational *avpar = &avctx->sample_aspect_ratio;
1123 
1124  av_reduce(&avpar->num, &avpar->den,
1125  avpar->num, avpar->den,
1126  0xFFFFFFFF);
1127 
1128  num = CFNumberCreate(kCFAllocatorDefault,
1129  kCFNumberIntType,
1130  &avpar->num);
1131 
1132  den = CFNumberCreate(kCFAllocatorDefault,
1133  kCFNumberIntType,
1134  &avpar->den);
1135 
1136 
1137 
1138  par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1139  2,
1140  &kCFCopyStringDictionaryKeyCallBacks,
1141  &kCFTypeDictionaryValueCallBacks);
1142 
1143  if (!par || !num || !den) {
1144  if (par) CFRelease(par);
1145  if (num) CFRelease(num);
1146  if (den) CFRelease(den);
1147 
1148  return AVERROR(ENOMEM);
1149  }
1150 
1151  CFDictionarySetValue(
1152  par,
1153  kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1154  num);
1155 
1156  CFDictionarySetValue(
1157  par,
1158  kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1159  den);
1160 
1161  status = VTSessionSetProperty(vtctx->session,
1162  kVTCompressionPropertyKey_PixelAspectRatio,
1163  par);
1164 
1165  CFRelease(par);
1166  CFRelease(num);
1167  CFRelease(den);
1168 
1169  if (status) {
1170  av_log(avctx,
1171  AV_LOG_ERROR,
1172  "Error setting pixel aspect ratio to %d:%d: %d.\n",
1173  avctx->sample_aspect_ratio.num,
1174  avctx->sample_aspect_ratio.den,
1175  status);
1176 
1177  return AVERROR_EXTERNAL;
1178  }
1179  }
1180 
1181 
1182  if (vtctx->transfer_function) {
1183  status = VTSessionSetProperty(vtctx->session,
1184  kVTCompressionPropertyKey_TransferFunction,
1185  vtctx->transfer_function);
1186 
1187  if (status) {
1188  av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1189  }
1190  }
1191 
1192 
1193  if (vtctx->ycbcr_matrix) {
1194  status = VTSessionSetProperty(vtctx->session,
1195  kVTCompressionPropertyKey_YCbCrMatrix,
1196  vtctx->ycbcr_matrix);
1197 
1198  if (status) {
1199  av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1200  }
1201  }
1202 
1203 
1204  if (vtctx->color_primaries) {
1205  status = VTSessionSetProperty(vtctx->session,
1206  kVTCompressionPropertyKey_ColorPrimaries,
1207  vtctx->color_primaries);
1208 
1209  if (status) {
1210  av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1211  }
1212  }
1213 
1214  if (gamma_level) {
1215  status = VTSessionSetProperty(vtctx->session,
1216  kCVImageBufferGammaLevelKey,
1217  gamma_level);
1218 
1219  if (status) {
1220  av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1221  }
1222  }
1223 
1224  if (!vtctx->has_b_frames) {
1225  status = VTSessionSetProperty(vtctx->session,
1226  kVTCompressionPropertyKey_AllowFrameReordering,
1227  kCFBooleanFalse);
1228 
1229  if (status) {
1230  av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1231  return AVERROR_EXTERNAL;
1232  }
1233  }
1234 
1235  if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1236  CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1237  compat_keys.kVTH264EntropyMode_CABAC:
1238  compat_keys.kVTH264EntropyMode_CAVLC;
1239 
1240  status = VTSessionSetProperty(vtctx->session,
1241  compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1242  entropy);
1243 
1244  if (status) {
1245  av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1246  }
1247  }
1248 
1249  if (vtctx->realtime) {
1250  status = VTSessionSetProperty(vtctx->session,
1251  compat_keys.kVTCompressionPropertyKey_RealTime,
1252  kCFBooleanTrue);
1253 
1254  if (status) {
1255  av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1256  }
1257  }
1258 
1259  status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1260  if (status) {
1261  av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1262  return AVERROR_EXTERNAL;
1263  }
1264 
1265  return 0;
1266 }
1267 
1269 {
1270  CFMutableDictionaryRef enc_info;
1271  CFMutableDictionaryRef pixel_buffer_info;
1272  CMVideoCodecType codec_type;
1273  VTEncContext *vtctx = avctx->priv_data;
1274  CFStringRef profile_level;
1275  CFNumberRef gamma_level = NULL;
1276  int status;
1277 
1278  codec_type = get_cm_codec_type(avctx->codec_id);
1279  if (!codec_type) {
1280  av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1281  return AVERROR(EINVAL);
1282  }
1283 
1284  vtctx->codec_id = avctx->codec_id;
1285 
1286  if (vtctx->codec_id == AV_CODEC_ID_H264) {
1287  vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1288 
1289  vtctx->has_b_frames = avctx->max_b_frames > 0;
1290  if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1291  av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1292  vtctx->has_b_frames = false;
1293  }
1294 
1295  if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1296  av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1297  vtctx->entropy = VT_ENTROPY_NOT_SET;
1298  }
1299 
1300  if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1301  } else {
1302  vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1303  if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1304  if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1305  }
1306 
1307  enc_info = CFDictionaryCreateMutable(
1308  kCFAllocatorDefault,
1309  20,
1310  &kCFCopyStringDictionaryKeyCallBacks,
1311  &kCFTypeDictionaryValueCallBacks
1312  );
1313 
1314  if (!enc_info) return AVERROR(ENOMEM);
1315 
1316 #if !TARGET_OS_IPHONE
1317  if (!vtctx->allow_sw) {
1318  CFDictionarySetValue(enc_info,
1319  compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1320  kCFBooleanTrue);
1321  } else {
1322  CFDictionarySetValue(enc_info,
1323  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1324  kCFBooleanTrue);
1325  }
1326 #endif
1327 
1328  if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1329  status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1330  if (status)
1331  goto init_cleanup;
1332  } else {
1333  pixel_buffer_info = NULL;
1334  }
1335 
1336  vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1337 
1338  get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1339  get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1340  get_cv_color_primaries(avctx, &vtctx->color_primaries);
1341 
1342 
1343  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1344  status = vtenc_populate_extradata(avctx,
1345  codec_type,
1346  profile_level,
1347  gamma_level,
1348  enc_info,
1349  pixel_buffer_info);
1350  if (status)
1351  goto init_cleanup;
1352  }
1353 
1354  status = vtenc_create_encoder(avctx,
1355  codec_type,
1356  profile_level,
1357  gamma_level,
1358  enc_info,
1359  pixel_buffer_info,
1360  &vtctx->session);
1361 
1362 init_cleanup:
1363  if (gamma_level)
1364  CFRelease(gamma_level);
1365 
1366  if (pixel_buffer_info)
1367  CFRelease(pixel_buffer_info);
1368 
1369  CFRelease(enc_info);
1370 
1371  return status;
1372 }
1373 
1375 {
1376  VTEncContext *vtctx = avctx->priv_data;
1377  CFBooleanRef has_b_frames_cfbool;
1378  int status;
1379 
1380  pthread_once(&once_ctrl, loadVTEncSymbols);
1381 
1382  pthread_mutex_init(&vtctx->lock, NULL);
1384 
1385  vtctx->session = NULL;
1386  status = vtenc_configure_encoder(avctx);
1387  if (status) return status;
1388 
1389  status = VTSessionCopyProperty(vtctx->session,
1390  kVTCompressionPropertyKey_AllowFrameReordering,
1391  kCFAllocatorDefault,
1392  &has_b_frames_cfbool);
1393 
1394  if (!status && has_b_frames_cfbool) {
1395  //Some devices don't output B-frames for main profile, even if requested.
1396  vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1397  CFRelease(has_b_frames_cfbool);
1398  }
1399  avctx->has_b_frames = vtctx->has_b_frames;
1400 
1401  return 0;
1402 }
1403 
1404 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1405 {
1406  CFArrayRef attachments;
1407  CFDictionaryRef attachment;
1408  CFBooleanRef not_sync;
1409  CFIndex len;
1410 
1411  attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1412  len = !attachments ? 0 : CFArrayGetCount(attachments);
1413 
1414  if (!len) {
1415  *is_key_frame = true;
1416  return;
1417  }
1418 
1419  attachment = CFArrayGetValueAtIndex(attachments, 0);
1420 
1421  if (CFDictionaryGetValueIfPresent(attachment,
1422  kCMSampleAttachmentKey_NotSync,
1423  (const void **)&not_sync))
1424  {
1425  *is_key_frame = !CFBooleanGetValue(not_sync);
1426  } else {
1427  *is_key_frame = true;
1428  }
1429 }
1430 
1431 static int is_post_sei_nal_type(int nal_type){
1432  return nal_type != H264_NAL_SEI &&
1433  nal_type != H264_NAL_SPS &&
1434  nal_type != H264_NAL_PPS &&
1435  nal_type != H264_NAL_AUD;
1436 }
1437 
1438 /*
1439  * Finds the sei message start/size of type find_sei_type.
1440  * If more than one of that type exists, the last one is returned.
1441  */
1442 static int find_sei_end(AVCodecContext *avctx,
1443  uint8_t *nal_data,
1444  size_t nal_size,
1445  uint8_t **sei_end)
1446 {
1447  int nal_type;
1448  size_t sei_payload_size = 0;
1449  int sei_payload_type = 0;
1450  *sei_end = NULL;
1451  uint8_t *nal_start = nal_data;
1452 
1453  if (!nal_size)
1454  return 0;
1455 
1456  nal_type = *nal_data & 0x1F;
1457  if (nal_type != H264_NAL_SEI)
1458  return 0;
1459 
1460  nal_data++;
1461  nal_size--;
1462 
1463  if (nal_data[nal_size - 1] == 0x80)
1464  nal_size--;
1465 
1466  while (nal_size > 0 && *nal_data > 0) {
1467  do{
1468  sei_payload_type += *nal_data;
1469  nal_data++;
1470  nal_size--;
1471  } while (nal_size > 0 && *nal_data == 0xFF);
1472 
1473  if (!nal_size) {
1474  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1475  return AVERROR_INVALIDDATA;
1476  }
1477 
1478  do{
1479  sei_payload_size += *nal_data;
1480  nal_data++;
1481  nal_size--;
1482  } while (nal_size > 0 && *nal_data == 0xFF);
1483 
1484  if (nal_size < sei_payload_size) {
1485  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1486  return AVERROR_INVALIDDATA;
1487  }
1488 
1489  nal_data += sei_payload_size;
1490  nal_size -= sei_payload_size;
1491  }
1492 
1493  *sei_end = nal_data;
1494 
1495  return nal_data - nal_start + 1;
1496 }
1497 
1498 /**
1499  * Copies the data inserting emulation prevention bytes as needed.
1500  * Existing data in the destination can be taken into account by providing
1501  * dst with a dst_offset > 0.
1502  *
1503  * @return The number of bytes copied on success. On failure, the negative of
1504  * the number of bytes needed to copy src is returned.
1505  */
1506 static int copy_emulation_prev(const uint8_t *src,
1507  size_t src_size,
1508  uint8_t *dst,
1509  ssize_t dst_offset,
1510  size_t dst_size)
1511 {
1512  int zeros = 0;
1513  int wrote_bytes;
1514  uint8_t* dst_start;
1515  uint8_t* dst_end = dst + dst_size;
1516  const uint8_t* src_end = src + src_size;
1517  int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1518  int i;
1519  for (i = start_at; i < dst_offset && i < dst_size; i++) {
1520  if (!dst[i])
1521  zeros++;
1522  else
1523  zeros = 0;
1524  }
1525 
1526  dst += dst_offset;
1527  dst_start = dst;
1528  for (; src < src_end; src++, dst++) {
1529  if (zeros == 2) {
1530  int insert_ep3_byte = *src <= 3;
1531  if (insert_ep3_byte) {
1532  if (dst < dst_end)
1533  *dst = 3;
1534  dst++;
1535  }
1536 
1537  zeros = 0;
1538  }
1539 
1540  if (dst < dst_end)
1541  *dst = *src;
1542 
1543  if (!*src)
1544  zeros++;
1545  else
1546  zeros = 0;
1547  }
1548 
1549  wrote_bytes = dst - dst_start;
1550 
1551  if (dst > dst_end)
1552  return -wrote_bytes;
1553 
1554  return wrote_bytes;
1555 }
1556 
1557 static int write_sei(const ExtraSEI *sei,
1558  int sei_type,
1559  uint8_t *dst,
1560  size_t dst_size)
1561 {
1562  uint8_t *sei_start = dst;
1563  size_t remaining_sei_size = sei->size;
1564  size_t remaining_dst_size = dst_size;
1565  int header_bytes;
1566  int bytes_written;
1567  ssize_t offset;
1568 
1569  if (!remaining_dst_size)
1570  return AVERROR_BUFFER_TOO_SMALL;
1571 
1572  while (sei_type && remaining_dst_size != 0) {
1573  int sei_byte = sei_type > 255 ? 255 : sei_type;
1574  *dst = sei_byte;
1575 
1576  sei_type -= sei_byte;
1577  dst++;
1578  remaining_dst_size--;
1579  }
1580 
1581  if (!dst_size)
1582  return AVERROR_BUFFER_TOO_SMALL;
1583 
1584  while (remaining_sei_size && remaining_dst_size != 0) {
1585  int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1586  *dst = size_byte;
1587 
1588  remaining_sei_size -= size_byte;
1589  dst++;
1590  remaining_dst_size--;
1591  }
1592 
1593  if (remaining_dst_size < sei->size)
1594  return AVERROR_BUFFER_TOO_SMALL;
1595 
1596  header_bytes = dst - sei_start;
1597 
1598  offset = header_bytes;
1599  bytes_written = copy_emulation_prev(sei->data,
1600  sei->size,
1601  sei_start,
1602  offset,
1603  dst_size);
1604  if (bytes_written < 0)
1605  return AVERROR_BUFFER_TOO_SMALL;
1606 
1607  bytes_written += header_bytes;
1608  return bytes_written;
1609 }
1610 
1611 /**
1612  * Copies NAL units and replaces length codes with
1613  * H.264 Annex B start codes. On failure, the contents of
1614  * dst_data may have been modified.
1615  *
1616  * @param length_code_size Byte length of each length code
1617  * @param sample_buffer NAL units prefixed with length codes.
1618  * @param sei Optional A53 closed captions SEI data.
1619  * @param dst_data Must be zeroed before calling this function.
1620  * Contains the copied NAL units prefixed with
1621  * start codes when the function returns
1622  * successfully.
1623  * @param dst_size Length of dst_data
1624  * @return 0 on success
1625  * AVERROR_INVALIDDATA if length_code_size is invalid
1626  * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1627  * or if a length_code in src_data specifies data beyond
1628  * the end of its buffer.
1629  */
1631  AVCodecContext *avctx,
1632  size_t length_code_size,
1633  CMSampleBufferRef sample_buffer,
1634  ExtraSEI *sei,
1635  uint8_t *dst_data,
1636  size_t dst_size)
1637 {
1638  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1639  size_t remaining_src_size = src_size;
1640  size_t remaining_dst_size = dst_size;
1641  size_t src_offset = 0;
1642  int wrote_sei = 0;
1643  int status;
1644  uint8_t size_buf[4];
1645  uint8_t nal_type;
1646  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1647 
1648  if (length_code_size > 4) {
1649  return AVERROR_INVALIDDATA;
1650  }
1651 
1652  while (remaining_src_size > 0) {
1653  size_t curr_src_len;
1654  size_t curr_dst_len;
1655  size_t box_len = 0;
1656  size_t i;
1657 
1658  uint8_t *dst_box;
1659 
1660  status = CMBlockBufferCopyDataBytes(block,
1661  src_offset,
1662  length_code_size,
1663  size_buf);
1664  if (status) {
1665  av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1666  return AVERROR_EXTERNAL;
1667  }
1668 
1669  status = CMBlockBufferCopyDataBytes(block,
1670  src_offset + length_code_size,
1671  1,
1672  &nal_type);
1673 
1674  if (status) {
1675  av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1676  return AVERROR_EXTERNAL;
1677  }
1678 
1679  nal_type &= 0x1F;
1680 
1681  for (i = 0; i < length_code_size; i++) {
1682  box_len <<= 8;
1683  box_len |= size_buf[i];
1684  }
1685 
1686  if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1687  //No SEI NAL unit - insert.
1688  int wrote_bytes;
1689 
1690  memcpy(dst_data, start_code, sizeof(start_code));
1691  dst_data += sizeof(start_code);
1692  remaining_dst_size -= sizeof(start_code);
1693 
1694  *dst_data = H264_NAL_SEI;
1695  dst_data++;
1696  remaining_dst_size--;
1697 
1698  wrote_bytes = write_sei(sei,
1700  dst_data,
1701  remaining_dst_size);
1702 
1703  if (wrote_bytes < 0)
1704  return wrote_bytes;
1705 
1706  remaining_dst_size -= wrote_bytes;
1707  dst_data += wrote_bytes;
1708 
1709  if (remaining_dst_size <= 0)
1710  return AVERROR_BUFFER_TOO_SMALL;
1711 
1712  *dst_data = 0x80;
1713 
1714  dst_data++;
1715  remaining_dst_size--;
1716 
1717  wrote_sei = 1;
1718  }
1719 
1720  curr_src_len = box_len + length_code_size;
1721  curr_dst_len = box_len + sizeof(start_code);
1722 
1723  if (remaining_src_size < curr_src_len) {
1724  return AVERROR_BUFFER_TOO_SMALL;
1725  }
1726 
1727  if (remaining_dst_size < curr_dst_len) {
1728  return AVERROR_BUFFER_TOO_SMALL;
1729  }
1730 
1731  dst_box = dst_data + sizeof(start_code);
1732 
1733  memcpy(dst_data, start_code, sizeof(start_code));
1734  status = CMBlockBufferCopyDataBytes(block,
1735  src_offset + length_code_size,
1736  box_len,
1737  dst_box);
1738 
1739  if (status) {
1740  av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1741  return AVERROR_EXTERNAL;
1742  }
1743 
1744  if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1745  //Found SEI NAL unit - append.
1746  int wrote_bytes;
1747  int old_sei_length;
1748  int extra_bytes;
1749  uint8_t *new_sei;
1750  old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1751  if (old_sei_length < 0)
1752  return status;
1753 
1754  wrote_bytes = write_sei(sei,
1756  new_sei,
1757  remaining_dst_size - old_sei_length);
1758  if (wrote_bytes < 0)
1759  return wrote_bytes;
1760 
1761  if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1762  return AVERROR_BUFFER_TOO_SMALL;
1763 
1764  new_sei[wrote_bytes++] = 0x80;
1765  extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1766 
1767  dst_data += extra_bytes;
1768  remaining_dst_size -= extra_bytes;
1769 
1770  wrote_sei = 1;
1771  }
1772 
1773  src_offset += curr_src_len;
1774  dst_data += curr_dst_len;
1775 
1776  remaining_src_size -= curr_src_len;
1777  remaining_dst_size -= curr_dst_len;
1778  }
1779 
1780  return 0;
1781 }
1782 
1783 /**
1784  * Returns a sufficient number of bytes to contain the sei data.
1785  * It may be greater than the minimum required.
1786  */
1787 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1788  int copied_size;
1789  if (sei->size == 0)
1790  return 0;
1791 
1792  copied_size = -copy_emulation_prev(sei->data,
1793  sei->size,
1794  NULL,
1795  0,
1796  0);
1797 
1798  if ((sei->size % 255) == 0) //may result in an extra byte
1799  copied_size++;
1800 
1801  return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1802 }
1803 
1805  AVCodecContext *avctx,
1806  CMSampleBufferRef sample_buffer,
1807  AVPacket *pkt,
1808  ExtraSEI *sei)
1809 {
1810  VTEncContext *vtctx = avctx->priv_data;
1811 
1812  int status;
1813  bool is_key_frame;
1814  bool add_header;
1815  size_t length_code_size;
1816  size_t header_size = 0;
1817  size_t in_buf_size;
1818  size_t out_buf_size;
1819  size_t sei_nalu_size = 0;
1820  int64_t dts_delta;
1821  int64_t time_base_num;
1822  int nalu_count;
1823  CMTime pts;
1824  CMTime dts;
1825  CMVideoFormatDescriptionRef vid_fmt;
1826 
1827 
1828  vtenc_get_frame_info(sample_buffer, &is_key_frame);
1829  status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1830  if (status) return status;
1831 
1832  add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1833 
1834  if (add_header) {
1835  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1836  if (!vid_fmt) {
1837  av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1838  return AVERROR_EXTERNAL;
1839  }
1840 
1841  int status = get_params_size(avctx, vid_fmt, &header_size);
1842  if (status) return status;
1843  }
1844 
1845  status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1846  if(status)
1847  return status;
1848 
1849  if (sei) {
1850  size_t msg_size = get_sei_msg_bytes(sei,
1852 
1853  sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1854  }
1855 
1856  in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1857  out_buf_size = header_size +
1858  in_buf_size +
1859  sei_nalu_size +
1860  nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1861 
1862  status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1863  if (status < 0)
1864  return status;
1865 
1866  if (add_header) {
1867  status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1868  if(status) return status;
1869  }
1870 
1871  status = copy_replace_length_codes(
1872  avctx,
1873  length_code_size,
1874  sample_buffer,
1875  sei,
1876  pkt->data + header_size,
1877  pkt->size - header_size
1878  );
1879 
1880  if (status) {
1881  av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1882  return status;
1883  }
1884 
1885  if (is_key_frame) {
1886  pkt->flags |= AV_PKT_FLAG_KEY;
1887  }
1888 
1889  pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1890  dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
1891 
1892  if (CMTIME_IS_INVALID(dts)) {
1893  if (!vtctx->has_b_frames) {
1894  dts = pts;
1895  } else {
1896  av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1897  return AVERROR_EXTERNAL;
1898  }
1899  }
1900 
1901  dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1902  time_base_num = avctx->time_base.num;
1903  pkt->pts = pts.value / time_base_num;
1904  pkt->dts = dts.value / time_base_num - dts_delta;
1905  pkt->size = out_buf_size;
1906 
1907  return 0;
1908 }
1909 
1910 /*
1911  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1912  * containing all planes if so.
1913  */
1915  AVCodecContext *avctx,
1916  const AVFrame *frame,
1917  int *color,
1918  int *plane_count,
1919  size_t *widths,
1920  size_t *heights,
1921  size_t *strides,
1922  size_t *contiguous_buf_size)
1923 {
1924  VTEncContext *vtctx = avctx->priv_data;
1925  int av_format = frame->format;
1926  int av_color_range = frame->color_range;
1927  int i;
1928  int range_guessed;
1929  int status;
1930 
1931  status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1932  if (status) {
1933  av_log(avctx,
1934  AV_LOG_ERROR,
1935  "Could not get pixel format for color format '%s' range '%s'.\n",
1936  av_get_pix_fmt_name(av_format),
1937  av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1938  av_color_range < AVCOL_RANGE_NB ?
1939  av_color_range_name(av_color_range) :
1940  "Unknown");
1941 
1942  return AVERROR(EINVAL);
1943  }
1944 
1945  if (range_guessed) {
1946  if (!vtctx->warned_color_range) {
1947  vtctx->warned_color_range = true;
1948  av_log(avctx,
1950  "Color range not set for %s. Using MPEG range.\n",
1951  av_get_pix_fmt_name(av_format));
1952  }
1953  }
1954 
1955  switch (av_format) {
1956  case AV_PIX_FMT_NV12:
1957  *plane_count = 2;
1958 
1959  widths [0] = avctx->width;
1960  heights[0] = avctx->height;
1961  strides[0] = frame ? frame->linesize[0] : avctx->width;
1962 
1963  widths [1] = (avctx->width + 1) / 2;
1964  heights[1] = (avctx->height + 1) / 2;
1965  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
1966  break;
1967 
1968  case AV_PIX_FMT_YUV420P:
1969  *plane_count = 3;
1970 
1971  widths [0] = avctx->width;
1972  heights[0] = avctx->height;
1973  strides[0] = frame ? frame->linesize[0] : avctx->width;
1974 
1975  widths [1] = (avctx->width + 1) / 2;
1976  heights[1] = (avctx->height + 1) / 2;
1977  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
1978 
1979  widths [2] = (avctx->width + 1) / 2;
1980  heights[2] = (avctx->height + 1) / 2;
1981  strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
1982  break;
1983 
1984  default:
1985  av_log(
1986  avctx,
1987  AV_LOG_ERROR,
1988  "Could not get frame format info for color %d range %d.\n",
1989  av_format,
1990  av_color_range);
1991 
1992  return AVERROR(EINVAL);
1993  }
1994 
1995  *contiguous_buf_size = 0;
1996  for (i = 0; i < *plane_count; i++) {
1997  if (i < *plane_count - 1 &&
1998  frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
1999  *contiguous_buf_size = 0;
2000  break;
2001  }
2002 
2003  *contiguous_buf_size += strides[i] * heights[i];
2004  }
2005 
2006  return 0;
2007 }
2008 
2009 #if !TARGET_OS_IPHONE
2010 //Not used on iOS - frame is always copied.
2011 static void free_avframe(
2012  void *release_ctx,
2013  const void *data,
2014  size_t size,
2015  size_t plane_count,
2016  const void *plane_addresses[])
2017 {
2018  AVFrame *frame = release_ctx;
2019  av_frame_free(&frame);
2020 }
2021 #else
2022 //Not used on OSX - frame is never copied.
2023 static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx,
2024  const AVFrame *frame,
2025  CVPixelBufferRef cv_img,
2026  const size_t *plane_strides,
2027  const size_t *plane_rows)
2028 {
2029  int i, j;
2030  size_t plane_count;
2031  int status;
2032  int rows;
2033  int src_stride;
2034  int dst_stride;
2035  uint8_t *src_addr;
2036  uint8_t *dst_addr;
2037  size_t copy_bytes;
2038 
2039  status = CVPixelBufferLockBaseAddress(cv_img, 0);
2040  if (status) {
2041  av_log(
2042  avctx,
2043  AV_LOG_ERROR,
2044  "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2045  status
2046  );
2047  }
2048 
2049  if (CVPixelBufferIsPlanar(cv_img)) {
2050  plane_count = CVPixelBufferGetPlaneCount(cv_img);
2051  for (i = 0; frame->data[i]; i++) {
2052  if (i == plane_count) {
2053  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2054  av_log(avctx,
2055  AV_LOG_ERROR,
2056  "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2057  );
2058 
2059  return AVERROR_EXTERNAL;
2060  }
2061 
2062  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2063  src_addr = (uint8_t*)frame->data[i];
2064  dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2065  src_stride = plane_strides[i];
2066  rows = plane_rows[i];
2067 
2068  if (dst_stride == src_stride) {
2069  memcpy(dst_addr, src_addr, src_stride * rows);
2070  } else {
2071  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2072 
2073  for (j = 0; j < rows; j++) {
2074  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2075  }
2076  }
2077  }
2078  } else {
2079  if (frame->data[1]) {
2080  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2081  av_log(avctx,
2082  AV_LOG_ERROR,
2083  "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2084  );
2085 
2086  return AVERROR_EXTERNAL;
2087  }
2088 
2089  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2090  src_addr = (uint8_t*)frame->data[0];
2091  dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2092  src_stride = plane_strides[0];
2093  rows = plane_rows[0];
2094 
2095  if (dst_stride == src_stride) {
2096  memcpy(dst_addr, src_addr, src_stride * rows);
2097  } else {
2098  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2099 
2100  for (j = 0; j < rows; j++) {
2101  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2102  }
2103  }
2104  }
2105 
2106  status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2107  if (status) {
2108  av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2109  return AVERROR_EXTERNAL;
2110  }
2111 
2112  return 0;
2113 }
2114 #endif //!TARGET_OS_IPHONE
2115 
2117  const AVFrame *frame,
2118  CVPixelBufferRef *cv_img)
2119 {
2120  int plane_count;
2121  int color;
2122  size_t widths [AV_NUM_DATA_POINTERS];
2123  size_t heights[AV_NUM_DATA_POINTERS];
2124  size_t strides[AV_NUM_DATA_POINTERS];
2125  int status;
2126  size_t contiguous_buf_size;
2127 #if TARGET_OS_IPHONE
2128  CVPixelBufferPoolRef pix_buf_pool;
2129  VTEncContext* vtctx = avctx->priv_data;
2130 #else
2131  CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
2132  kCFAllocatorDefault,
2133  10,
2134  &kCFCopyStringDictionaryKeyCallBacks,
2135  &kCFTypeDictionaryValueCallBacks);
2136 
2137  if (!pix_buf_attachments) return AVERROR(ENOMEM);
2138 #endif
2139 
2140  if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2142 
2143  *cv_img = (CVPixelBufferRef)frame->data[3];
2144  av_assert0(*cv_img);
2145 
2146  CFRetain(*cv_img);
2147  return 0;
2148  }
2149 
2150  memset(widths, 0, sizeof(widths));
2151  memset(heights, 0, sizeof(heights));
2152  memset(strides, 0, sizeof(strides));
2153 
2154  status = get_cv_pixel_info(
2155  avctx,
2156  frame,
2157  &color,
2158  &plane_count,
2159  widths,
2160  heights,
2161  strides,
2162  &contiguous_buf_size
2163  );
2164 
2165  if (status) {
2166  av_log(
2167  avctx,
2168  AV_LOG_ERROR,
2169  "Error: Cannot convert format %d color_range %d: %d\n",
2170  frame->format,
2171  frame->color_range,
2172  status
2173  );
2174 
2175  return AVERROR_EXTERNAL;
2176  }
2177 
2178 #if TARGET_OS_IPHONE
2179  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2180  if (!pix_buf_pool) {
2181  /* On iOS, the VT session is invalidated when the APP switches from
2182  * foreground to background and vice versa. Fetch the actual error code
2183  * of the VT session to detect that case and restart the VT session
2184  * accordingly. */
2185  OSStatus vtstatus;
2186 
2187  vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2188  if (vtstatus == kVTInvalidSessionErr) {
2189  CFRelease(vtctx->session);
2190  vtctx->session = NULL;
2191  status = vtenc_configure_encoder(avctx);
2192  if (status == 0)
2193  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2194  }
2195  if (!pix_buf_pool) {
2196  av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2197  return AVERROR_EXTERNAL;
2198  }
2199  else
2200  av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2201  "kVTInvalidSessionErr error.\n");
2202  }
2203 
2204  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2205  pix_buf_pool,
2206  cv_img);
2207 
2208 
2209  if (status) {
2210  av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2211  return AVERROR_EXTERNAL;
2212  }
2213 
2214  status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2215  if (status) {
2216  CFRelease(*cv_img);
2217  *cv_img = NULL;
2218  return status;
2219  }
2220 #else
2221  AVFrame *enc_frame = av_frame_alloc();
2222  if (!enc_frame) return AVERROR(ENOMEM);
2223 
2224  status = av_frame_ref(enc_frame, frame);
2225  if (status) {
2226  av_frame_free(&enc_frame);
2227  return status;
2228  }
2229 
2230  status = CVPixelBufferCreateWithPlanarBytes(
2231  kCFAllocatorDefault,
2232  enc_frame->width,
2233  enc_frame->height,
2234  color,
2235  NULL,
2236  contiguous_buf_size,
2237  plane_count,
2238  (void **)enc_frame->data,
2239  widths,
2240  heights,
2241  strides,
2242  free_avframe,
2243  enc_frame,
2244  NULL,
2245  cv_img
2246  );
2247 
2248  add_color_attr(avctx, pix_buf_attachments);
2249  CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
2250  CFRelease(pix_buf_attachments);
2251 
2252  if (status) {
2253  av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status);
2254  return AVERROR_EXTERNAL;
2255  }
2256 #endif
2257 
2258  return 0;
2259 }
2260 
2261 static int create_encoder_dict_h264(const AVFrame *frame,
2262  CFDictionaryRef* dict_out)
2263 {
2264  CFDictionaryRef dict = NULL;
2265  if (frame->pict_type == AV_PICTURE_TYPE_I) {
2266  const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2267  const void *vals[] = { kCFBooleanTrue };
2268 
2269  dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2270  if(!dict) return AVERROR(ENOMEM);
2271  }
2272 
2273  *dict_out = dict;
2274  return 0;
2275 }
2276 
2278  VTEncContext *vtctx,
2279  const AVFrame *frame)
2280 {
2281  CMTime time;
2282  CFDictionaryRef frame_dict;
2283  CVPixelBufferRef cv_img = NULL;
2284  AVFrameSideData *side_data = NULL;
2285  ExtraSEI *sei = NULL;
2286  int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2287 
2288  if (status) return status;
2289 
2290  status = create_encoder_dict_h264(frame, &frame_dict);
2291  if (status) {
2292  CFRelease(cv_img);
2293  return status;
2294  }
2295 
2296  side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2297  if (vtctx->a53_cc && side_data && side_data->size) {
2298  sei = av_mallocz(sizeof(*sei));
2299  if (!sei) {
2300  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2301  } else {
2302  int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2303  if (ret < 0) {
2304  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2305  av_free(sei);
2306  sei = NULL;
2307  }
2308  }
2309  }
2310 
2311  time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2312  status = VTCompressionSessionEncodeFrame(
2313  vtctx->session,
2314  cv_img,
2315  time,
2316  kCMTimeInvalid,
2317  frame_dict,
2318  sei,
2319  NULL
2320  );
2321 
2322  if (frame_dict) CFRelease(frame_dict);
2323  CFRelease(cv_img);
2324 
2325  if (status) {
2326  av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2327  return AVERROR_EXTERNAL;
2328  }
2329 
2330  return 0;
2331 }
2332 
2334  AVCodecContext *avctx,
2335  AVPacket *pkt,
2336  const AVFrame *frame,
2337  int *got_packet)
2338 {
2339  VTEncContext *vtctx = avctx->priv_data;
2340  bool get_frame;
2341  int status;
2342  CMSampleBufferRef buf = NULL;
2343  ExtraSEI *sei = NULL;
2344 
2345  if (frame) {
2346  status = vtenc_send_frame(avctx, vtctx, frame);
2347 
2348  if (status) {
2349  status = AVERROR_EXTERNAL;
2350  goto end_nopkt;
2351  }
2352 
2353  if (vtctx->frame_ct_in == 0) {
2354  vtctx->first_pts = frame->pts;
2355  } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2356  vtctx->dts_delta = frame->pts - vtctx->first_pts;
2357  }
2358 
2359  vtctx->frame_ct_in++;
2360  } else if(!vtctx->flushing) {
2361  vtctx->flushing = true;
2362 
2363  status = VTCompressionSessionCompleteFrames(vtctx->session,
2364  kCMTimeIndefinite);
2365 
2366  if (status) {
2367  av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2368  status = AVERROR_EXTERNAL;
2369  goto end_nopkt;
2370  }
2371  }
2372 
2373  *got_packet = 0;
2374  get_frame = vtctx->dts_delta >= 0 || !frame;
2375  if (!get_frame) {
2376  status = 0;
2377  goto end_nopkt;
2378  }
2379 
2380  status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2381  if (status) goto end_nopkt;
2382  if (!buf) goto end_nopkt;
2383 
2384  status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2385  if (sei) {
2386  if (sei->data) av_free(sei->data);
2387  av_free(sei);
2388  }
2389  CFRelease(buf);
2390  if (status) goto end_nopkt;
2391 
2392  *got_packet = 1;
2393  return 0;
2394 
2395 end_nopkt:
2396  av_packet_unref(pkt);
2397  return status;
2398 }
2399 
2401  CMVideoCodecType codec_type,
2402  CFStringRef profile_level,
2403  CFNumberRef gamma_level,
2404  CFDictionaryRef enc_info,
2405  CFDictionaryRef pixel_buffer_info)
2406 {
2407  VTEncContext *vtctx = avctx->priv_data;
2408  AVFrame *frame = av_frame_alloc();
2409  int y_size = avctx->width * avctx->height;
2410  int chroma_size = (avctx->width / 2) * (avctx->height / 2);
2411  CMSampleBufferRef buf = NULL;
2412  int status;
2413 
2414  if (!frame)
2415  return AVERROR(ENOMEM);
2416 
2417  frame->buf[0] = av_buffer_alloc(y_size + 2 * chroma_size);
2418 
2419  if(!frame->buf[0]){
2420  status = AVERROR(ENOMEM);
2421  goto pe_cleanup;
2422  }
2423 
2424  status = vtenc_create_encoder(avctx,
2425  codec_type,
2426  profile_level,
2427  gamma_level,
2428  enc_info,
2429  pixel_buffer_info,
2430  &vtctx->session);
2431  if (status)
2432  goto pe_cleanup;
2433 
2434  frame->data[0] = frame->buf[0]->data;
2435  memset(frame->data[0], 0, y_size);
2436 
2437  frame->data[1] = frame->buf[0]->data + y_size;
2438  memset(frame->data[1], 128, chroma_size);
2439 
2440 
2441  if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
2442  frame->data[2] = frame->buf[0]->data + y_size + chroma_size;
2443  memset(frame->data[2], 128, chroma_size);
2444  }
2445 
2446  frame->linesize[0] = avctx->width;
2447 
2448  if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
2449  frame->linesize[1] =
2450  frame->linesize[2] = (avctx->width + 1) / 2;
2451  } else {
2452  frame->linesize[1] = (avctx->width + 1) / 2;
2453  }
2454 
2455  frame->format = avctx->pix_fmt;
2456  frame->width = avctx->width;
2457  frame->height = avctx->height;
2458  frame->colorspace = avctx->colorspace;
2459  frame->color_range = avctx->color_range;
2460  frame->color_trc = avctx->color_trc;
2461  frame->color_primaries = avctx->color_primaries;
2462 
2463  frame->pts = 0;
2464  status = vtenc_send_frame(avctx, vtctx, frame);
2465  if (status) {
2466  av_log(avctx, AV_LOG_ERROR, "Error sending frame: %d\n", status);
2467  goto pe_cleanup;
2468  }
2469 
2470  //Populates extradata - output frames are flushed and param sets are available.
2471  status = VTCompressionSessionCompleteFrames(vtctx->session,
2472  kCMTimeIndefinite);
2473 
2474  if (status)
2475  goto pe_cleanup;
2476 
2477  status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2478  if (status) {
2479  av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2480  goto pe_cleanup;
2481  }
2482 
2483  CFRelease(buf);
2484 
2485 
2486 
2487 pe_cleanup:
2488  if(vtctx->session)
2489  CFRelease(vtctx->session);
2490 
2491  vtctx->session = NULL;
2492  vtctx->frame_ct_out = 0;
2493 
2494  av_frame_unref(frame);
2495  av_frame_free(&frame);
2496 
2497  av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2498 
2499  return status;
2500 }
2501 
2503 {
2504  VTEncContext *vtctx = avctx->priv_data;
2505 
2507  pthread_mutex_destroy(&vtctx->lock);
2508 
2509  if(!vtctx->session) return 0;
2510 
2511  VTCompressionSessionCompleteFrames(vtctx->session,
2512  kCMTimeIndefinite);
2513  clear_frame_queue(vtctx);
2514  CFRelease(vtctx->session);
2515  vtctx->session = NULL;
2516 
2517  if (vtctx->color_primaries) {
2518  CFRelease(vtctx->color_primaries);
2519  vtctx->color_primaries = NULL;
2520  }
2521 
2522  if (vtctx->transfer_function) {
2523  CFRelease(vtctx->transfer_function);
2524  vtctx->transfer_function = NULL;
2525  }
2526 
2527  if (vtctx->ycbcr_matrix) {
2528  CFRelease(vtctx->ycbcr_matrix);
2529  vtctx->ycbcr_matrix = NULL;
2530  }
2531 
2532  return 0;
2533 }
2534 
2535 static const enum AVPixelFormat pix_fmts[] = {
2540 };
2541 
2542 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2543 #define COMMON_OPTIONS \
2544  { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2545  { .i64 = 0 }, 0, 1, VE }, \
2546  { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2547  OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2548  { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2549  OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2550  { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2551  OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2552 
2553 #define OFFSET(x) offsetof(VTEncContext, x)
2554 static const AVOption h264_options[] = {
2555  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2556  { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2557  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2558  { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
2559 
2560  { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2561  { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2562  { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2563  { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2564  { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2565  { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2566  { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2567  { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2568  { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2569  { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2570  { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2571 
2572  { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2573  { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2574  { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2575  { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2576  { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2577 
2578  { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2579 
2581  { NULL },
2582 };
2583 
2585  .class_name = "h264_videotoolbox",
2586  .item_name = av_default_item_name,
2587  .option = h264_options,
2588  .version = LIBAVUTIL_VERSION_INT,
2589 };
2590 
2592  .name = "h264_videotoolbox",
2593  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2594  .type = AVMEDIA_TYPE_VIDEO,
2595  .id = AV_CODEC_ID_H264,
2596  .priv_data_size = sizeof(VTEncContext),
2597  .pix_fmts = pix_fmts,
2598  .init = vtenc_init,
2599  .encode2 = vtenc_frame,
2600  .close = vtenc_close,
2601  .capabilities = AV_CODEC_CAP_DELAY,
2602  .priv_class = &h264_videotoolbox_class,
2603  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2605 };
2606 
2607 static const AVOption hevc_options[] = {
2608  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2609  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2610  { "main10", "Main10 Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2611 
2613  { NULL },
2614 };
2615 
2617  .class_name = "hevc_videotoolbox",
2618  .item_name = av_default_item_name,
2619  .option = hevc_options,
2620  .version = LIBAVUTIL_VERSION_INT,
2621 };
2622 
2624  .name = "hevc_videotoolbox",
2625  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2626  .type = AVMEDIA_TYPE_VIDEO,
2627  .id = AV_CODEC_ID_HEVC,
2628  .priv_data_size = sizeof(VTEncContext),
2629  .pix_fmts = pix_fmts,
2630  .init = vtenc_init,
2631  .encode2 = vtenc_frame,
2632  .close = vtenc_close,
2633  .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2634  .priv_class = &hevc_videotoolbox_class,
2635  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2637  .wrapper_name = "videotoolbox",
2638 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:473
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:488
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
#define AV_NUM_DATA_POINTERS
Definition: frame.h:227
static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:108
pthread_cond_t cv_sample_sent
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
#define pthread_mutex_lock(a)
Definition: ffprobe.c:61
static void free_avframe(void *release_ctx, const void *data, size_t size, size_t plane_count, const void *plane_addresses[])
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:166
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
BufNode * q_head
const char * fmt
Definition: avisynth_c.h:769
struct BufNode * next
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static const AVClass hevc_videotoolbox_class
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1583
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:418
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1793
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:492
static int copy_param_sets(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, uint8_t *dst, size_t dst_size)
#define AV_CODEC_CAP_HARDWARE
Codec is backed by a hardware implementation.
Definition: avcodec.h:1065
static int create_cv_pixel_buffer_info(AVCodecContext *avctx, CFMutableDictionaryRef *dict)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2164
int num
Numerator.
Definition: rational.h:59
int size
Definition: avcodec.h:1446
static int vtenc_configure_encoder(AVCodecContext *avctx)
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
static int write_sei(const ExtraSEI *sei, int sei_type, uint8_t *dst, size_t dst_size)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1912
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:493
enum AVMediaType codec_type
Definition: rtp.c:37
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1743
CFStringRef kVTProfileLevel_H264_High_5_2
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3424
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:140
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:457
functionally identical to above
Definition: pixfmt.h:494
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2839
CFStringRef kVTCompressionPropertyKey_H264EntropyMode
static int copy_replace_length_codes(AVCodecContext *avctx, size_t length_code_size, CMSampleBufferRef sample_buffer, ExtraSEI *sei, uint8_t *dst_data, size_t dst_size)
Copies NAL units and replaces length codes with H.264 Annex B start codes.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1656
static int is_post_sei_nal_type(int nal_type)
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:734
static int16_t block[64]
Definition: dct.c:115
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:993
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int vtenc_cm_to_avpacket(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, AVPacket *pkt, ExtraSEI *sei)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
AVCodec ff_hevc_videotoolbox_encoder
CFStringRef kCVImageBufferTransferFunction_ITU_R_2020
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel
AVOptions.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
static const AVOption hevc_options[]
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2772
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:462
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:319
CFStringRef kVTProfileLevel_H264_Baseline_5_2
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1634
static AVFrame * frame
static int get_cv_pixel_info(AVCodecContext *avctx, const AVFrame *frame, int *color, int *plane_count, size_t *widths, size_t *heights, size_t *strides, size_t *contiguous_buf_size)
Structure to hold side data for an AVFrame.
Definition: frame.h:188
uint8_t * data
Definition: avcodec.h:1445
int64_t frame_ct_in
Not part of ABI.
Definition: pixfmt.h:513
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:148
static int get_length_code_size(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, size_t *size)
CFStringRef kVTProfileLevel_H264_High_4_0
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:509
ptrdiff_t size
Definition: opengl_enc.c:101
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:433
static av_cold int vtenc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1477
CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel
H.264 common definitions.
#define GET_SYM(symbol, defaultVal)
CFStringRef kVTProfileLevel_H264_Baseline_5_1
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:215
int width
Definition: frame.h:284
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1823
static struct @169 compat_keys
CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
CFStringRef kVTProfileLevel_H264_Baseline_5_0
CFStringRef kVTProfileLevel_H264_Main_4_2
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
static int find_sei_end(AVCodecContext *avctx, uint8_t *nal_data, size_t nal_size, uint8_t **sei_end)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
BufNode * q_tail
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:471
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict)
CFStringRef kVTProfileLevel_H264_High_4_1
static int get_params_size(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, size_t *size)
Get the parameter sets from a CMSampleBufferRef.
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1613
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:482
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:435
ExtraSEI * sei
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3431
CFStringRef kVTProfileLevel_H264_High_3_0
VTH264Entropy
VT_HEVCProfile
CFStringRef kVTProfileLevel_H264_Baseline_4_0
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
static int get_frame(AVFilterContext *ctx, int is_second)
Definition: vf_nnedi.c:689
static bool get_vt_h264_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1451
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2791
int64_t frames_before
static int vtenc_create_encoder(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info, VTCompressionSessionRef *session)
VTCompressionSessionRef session
CFStringRef color_primaries
CFStringRef kVTProfileLevel_H264_High_4_2
static int get_cv_transfer_function(AVCodecContext *avctx, CFStringRef *transfer_fnc, CFNumberRef *gamma_level)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:309
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
Definition: error.h:51
CMSampleBufferRef cm_buffer
int width
picture width / height.
Definition: avcodec.h:1706
static void loadVTEncSymbols()
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:497
getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2143
OSStatus(* getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc, size_t parameterSetIndex, const uint8_t **parameterSetPointerOut, size_t *parameterSetSizeOut, size_t *parameterSetCountOut, int *NALUnitHeaderLengthOut)
#define OFFSET(x)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:100
CFStringRef kVTH264EntropyMode_CAVLC
CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:65
static enum AVPixelFormat pix_fmts[]
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:512
enum AVCodecID codec_id
CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel
static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:299
CFStringRef kVTCompressionPropertyKey_RealTime
also ITU-R BT1361
Definition: pixfmt.h:459
static int vtenc_populate_extradata(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info)
CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020
static const AVClass h264_videotoolbox_class
Libavcodec external API header.
static int get_cv_pixel_format(AVCodecContext *avctx, enum AVPixelFormat fmt, enum AVColorRange range, int *av_pixel_format, int *range_guessed)
enum AVCodecID codec_id
Definition: avcodec.h:1543
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:257
CFStringRef kVTProfileLevel_H264_High_3_1
main external API structure.
Definition: avcodec.h:1533
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:598
uint8_t * data
The data buffer.
Definition: buffer.h:89
static int create_encoder_dict_h264(const AVFrame *frame, CFDictionaryRef *dict_out)
void * buf
Definition: avisynth_c.h:690
static av_cold int vtenc_close(AVCodecContext *avctx)
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1635
Describe the class of an AVClass context structure.
Definition: log.h:67
CFStringRef transfer_function
static void clear_frame_queue(VTEncContext *vtctx)
static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix)
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2157
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2150
AVCodec ff_h264_videotoolbox_encoder
static int copy_emulation_prev(const uint8_t *src, size_t src_size, uint8_t *dst, ssize_t dst_offset, size_t dst_size)
Copies the data inserting emulation prevention bytes as needed.
static int vtenc_send_frame(AVCodecContext *avctx, VTEncContext *vtctx, const AVFrame *frame)
VT_H264Profile
static pthread_once_t once_ctrl
registered user data as specified by Rec. ITU-T T.35
Definition: h264_sei.h:32
int64_t frames_after
CFStringRef kVTProfileLevel_H264_Main_AutoLevel
mfxU16 profile
Definition: qsvenc.c:44
CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
static int get_sei_msg_bytes(const ExtraSEI *sei, int type)
Returns a sufficient number of bytes to contain the sei data.
#define COMMON_OPTIONS
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:596
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:240
uint8_t level
Definition: svq3.c:207
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:891
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:511
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1728
static int create_cv_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef *cv_img)
TARGET_OS_IPHONE.
getParameterSetAtIndex get_param_set_func
CFStringRef kVTH264EntropyMode_CABAC
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
common internal api header.
_fmutex pthread_mutex_t
Definition: os2threads.h:49
CFStringRef kVTProfileLevel_H264_High_AutoLevel
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2815
CFStringRef ycbcr_matrix
static void vtenc_output_callback(void *ctx, void *sourceFrameCtx, OSStatus status, VTEncodeInfoFlags flags, CMSampleBufferRef sample_buffer)
also ITU-R BT470BG
Definition: pixfmt.h:463
static void vt_release_num(CFNumberRef *refPtr)
NULL-safe release of *refPtr, and sets value to NULL.
CFStringRef kVTProfileLevel_H264_High_3_2
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:129
pthread_mutex_t lock
static int FUNC() sei(CodedBitstreamContext *ctx, RWContext *rw, H264RawSEI *current)
int den
Denominator.
Definition: rational.h:60
static bool get_vt_hevc_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:782
void * priv_data
Definition: avcodec.h:1560
#define PTHREAD_ONCE_INIT
Definition: os2threads.h:67
#define av_free(p)
int len
CFStringRef kVTProfileLevel_H264_High_5_1
enum AVColorPrimaries color_primaries
Definition: frame.h:473
static const AVOption h264_options[]
#define VE
int64_t frame_ct_out
CFStringRef kVTProfileLevel_H264_Baseline_4_2
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:472
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1444
int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for A53 side data and allocate and fill SEI message with A53 info.
Definition: utils.c:2145
ITU-R BT2020.
Definition: pixfmt.h:444
int height
Definition: frame.h:284
static int count_nalus(size_t length_code_size, CMSampleBufferRef sample_buffer, int *count)
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:475
static const uint8_t start_code[]
static av_cold int vtenc_init(AVCodecContext *avctx)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2362
static int get_cv_color_primaries(AVCodecContext *avctx, CFStringRef *primaries)
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1422
static av_always_inline int pthread_once(pthread_once_t *once_control, void(*init_routine)(void))
Definition: os2threads.h:184
static void set_async_error(VTEncContext *vtctx, int err)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1438
GLuint buffer
Definition: opengl_enc.c:102
CFStringRef kVTProfileLevel_H264_Main_5_1
CFStringRef kVTProfileLevel_H264_Main_5_2
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:2407